From 5f8de423f190bbb79a62f804151bc24824fa32d8 Mon Sep 17 00:00:00 2001 From: "Matt A. Tobin" Date: Fri, 2 Feb 2018 04:16:08 -0500 Subject: Add m-esr52 at 52.6.0 --- js/src/jit/AliasAnalysis.cpp | 283 + js/src/jit/AliasAnalysis.h | 31 + js/src/jit/AliasAnalysisShared.cpp | 188 + js/src/jit/AliasAnalysisShared.h | 81 + js/src/jit/AlignmentMaskAnalysis.cpp | 94 + js/src/jit/AlignmentMaskAnalysis.h | 32 + js/src/jit/AtomicOp.h | 73 + js/src/jit/AtomicOperations.h | 353 + js/src/jit/BacktrackingAllocator.cpp | 3124 ++++ js/src/jit/BacktrackingAllocator.h | 816 + js/src/jit/Bailouts.cpp | 314 + js/src/jit/Bailouts.h | 219 + js/src/jit/BaselineBailouts.cpp | 1999 +++ js/src/jit/BaselineCacheIR.cpp | 1283 ++ js/src/jit/BaselineCacheIR.h | 67 + js/src/jit/BaselineCompiler.cpp | 4527 ++++++ js/src/jit/BaselineCompiler.h | 357 + js/src/jit/BaselineDebugModeOSR.cpp | 1184 ++ js/src/jit/BaselineDebugModeOSR.h | 146 + js/src/jit/BaselineFrame-inl.h | 107 + js/src/jit/BaselineFrame.cpp | 157 + js/src/jit/BaselineFrame.h | 458 + js/src/jit/BaselineFrameInfo-inl.h | 41 + js/src/jit/BaselineFrameInfo.cpp | 196 + js/src/jit/BaselineFrameInfo.h | 315 + js/src/jit/BaselineIC.cpp | 8719 +++++++++++ js/src/jit/BaselineIC.h | 3384 +++++ js/src/jit/BaselineICList.h | 123 + js/src/jit/BaselineInspector.cpp | 924 ++ js/src/jit/BaselineInspector.h | 148 + js/src/jit/BaselineJIT.cpp | 1251 ++ js/src/jit/BaselineJIT.h | 635 + js/src/jit/BitSet.cpp | 115 + js/src/jit/BitSet.h | 182 + js/src/jit/BytecodeAnalysis.cpp | 227 + js/src/jit/BytecodeAnalysis.h | 78 + js/src/jit/C1Spewer.cpp | 194 + js/src/jit/C1Spewer.h | 51 + js/src/jit/CacheIR.cpp | 473 + js/src/jit/CacheIR.h | 453 + js/src/jit/CodeGenerator.cpp | 12098 +++++++++++++++ js/src/jit/CodeGenerator.h | 593 + js/src/jit/CompactBuffer.h | 206 + js/src/jit/CompileInfo-inl.h | 90 + js/src/jit/CompileInfo.h | 560 + js/src/jit/CompileWrappers.cpp | 310 + js/src/jit/CompileWrappers.h | 158 + js/src/jit/Disassembler.cpp | 64 + js/src/jit/Disassembler.h | 278 + js/src/jit/EagerSimdUnbox.cpp | 128 + js/src/jit/EagerSimdUnbox.h | 25 + js/src/jit/EdgeCaseAnalysis.cpp | 47 + js/src/jit/EdgeCaseAnalysis.h | 31 + js/src/jit/EffectiveAddressAnalysis.cpp | 277 + js/src/jit/EffectiveAddressAnalysis.h | 39 + js/src/jit/ExecutableAllocator.cpp | 390 + js/src/jit/ExecutableAllocator.h | 330 + js/src/jit/FixedList.h | 106 + js/src/jit/FlowAliasAnalysis.cpp | 949 ++ js/src/jit/FlowAliasAnalysis.h | 71 + js/src/jit/FoldLinearArithConstants.cpp | 104 + js/src/jit/FoldLinearArithConstants.h | 22 + js/src/jit/ICStubSpace.h | 82 + js/src/jit/InlinableNatives.h | 166 + js/src/jit/InlineList.h | 671 + js/src/jit/InstructionReordering.cpp | 190 + js/src/jit/InstructionReordering.h | 21 + js/src/jit/Ion.cpp | 3560 +++++ js/src/jit/Ion.h | 221 + js/src/jit/IonAnalysis.cpp | 4760 ++++++ js/src/jit/IonAnalysis.h | 218 + js/src/jit/IonBuilder.cpp | 14696 +++++++++++++++++++ js/src/jit/IonBuilder.h | 1533 ++ js/src/jit/IonCaches.cpp | 5072 +++++++ js/src/jit/IonCaches.h | 848 ++ js/src/jit/IonCode.h | 825 ++ js/src/jit/IonInstrumentation.h | 33 + js/src/jit/IonOptimizationLevels.cpp | 178 + js/src/jit/IonOptimizationLevels.h | 302 + js/src/jit/IonTypes.h | 875 ++ js/src/jit/JSONSpewer.cpp | 410 + js/src/jit/JSONSpewer.h | 72 + js/src/jit/JitAllocPolicy.h | 210 + js/src/jit/JitCommon.h | 52 + js/src/jit/JitCompartment.h | 667 + js/src/jit/JitFrameIterator-inl.h | 51 + js/src/jit/JitFrameIterator.h | 864 ++ js/src/jit/JitFrames-inl.h | 73 + js/src/jit/JitFrames.cpp | 3158 ++++ js/src/jit/JitFrames.h | 1044 ++ js/src/jit/JitOptions.cpp | 298 + js/src/jit/JitOptions.h | 110 + js/src/jit/JitSpewer.cpp | 679 + js/src/jit/JitSpewer.h | 293 + js/src/jit/JitcodeMap.cpp | 1662 +++ js/src/jit/JitcodeMap.h | 1493 ++ js/src/jit/LICM.cpp | 272 + js/src/jit/LICM.h | 25 + js/src/jit/LIR.cpp | 621 + js/src/jit/LIR.h | 2025 +++ js/src/jit/LOpcodes.h | 32 + js/src/jit/Label.h | 117 + js/src/jit/Linker.cpp | 64 + js/src/jit/Linker.h | 46 + js/src/jit/LoopUnroller.cpp | 408 + js/src/jit/LoopUnroller.h | 21 + js/src/jit/Lowering.cpp | 4930 +++++++ js/src/jit/Lowering.h | 338 + js/src/jit/MCallOptimize.cpp | 4099 ++++++ js/src/jit/MIR.cpp | 6642 +++++++++ js/src/jit/MIR.h | 14267 ++++++++++++++++++ js/src/jit/MIRGenerator.h | 229 + js/src/jit/MIRGraph.cpp | 1750 +++ js/src/jit/MIRGraph.h | 1060 ++ js/src/jit/MOpcodes.h | 349 + js/src/jit/MacroAssembler-inl.h | 819 ++ js/src/jit/MacroAssembler.cpp | 2980 ++++ js/src/jit/MacroAssembler.h | 2233 +++ js/src/jit/MoveEmitter.h | 26 + js/src/jit/MoveResolver.cpp | 321 + js/src/jit/MoveResolver.h | 333 + js/src/jit/OptimizationTracking.cpp | 1305 ++ js/src/jit/OptimizationTracking.h | 575 + js/src/jit/PcScriptCache.h | 81 + js/src/jit/PerfSpewer.cpp | 340 + js/src/jit/PerfSpewer.h | 95 + js/src/jit/ProcessExecutableMemory.cpp | 656 + js/src/jit/ProcessExecutableMemory.h | 48 + js/src/jit/RangeAnalysis.cpp | 3634 +++++ js/src/jit/RangeAnalysis.h | 711 + js/src/jit/Recover.cpp | 1694 +++ js/src/jit/Recover.h | 692 + js/src/jit/RegisterAllocator.cpp | 614 + js/src/jit/RegisterAllocator.h | 375 + js/src/jit/RegisterSets.h | 1333 ++ js/src/jit/Registers.h | 250 + js/src/jit/RematerializedFrame.cpp | 222 + js/src/jit/RematerializedFrame.h | 275 + js/src/jit/Safepoints.cpp | 562 + js/src/jit/Safepoints.h | 131 + js/src/jit/ScalarReplacement.cpp | 1350 ++ js/src/jit/ScalarReplacement.h | 25 + js/src/jit/SharedIC.cpp | 4306 ++++++ js/src/jit/SharedIC.h | 3120 ++++ js/src/jit/SharedICHelpers.h | 32 + js/src/jit/SharedICList.h | 55 + js/src/jit/SharedICRegisters.h | 34 + js/src/jit/Sink.cpp | 232 + js/src/jit/Sink.h | 25 + js/src/jit/Snapshots.cpp | 731 + js/src/jit/Snapshots.h | 579 + js/src/jit/StackSlotAllocator.h | 110 + js/src/jit/StupidAllocator.cpp | 434 + js/src/jit/StupidAllocator.h | 90 + js/src/jit/TypePolicy.cpp | 1330 ++ js/src/jit/TypePolicy.h | 536 + js/src/jit/TypedObjectPrediction.cpp | 308 + js/src/jit/TypedObjectPrediction.h | 201 + js/src/jit/VMFunctions.cpp | 1361 ++ js/src/jit/VMFunctions.h | 808 + js/src/jit/ValueNumbering.cpp | 1306 ++ js/src/jit/ValueNumbering.h | 127 + js/src/jit/WasmBCE.cpp | 94 + js/src/jit/WasmBCE.h | 33 + js/src/jit/arm/Architecture-arm.cpp | 444 + js/src/jit/arm/Architecture-arm.h | 673 + js/src/jit/arm/Assembler-arm.cpp | 3442 +++++ js/src/jit/arm/Assembler-arm.h | 2429 +++ js/src/jit/arm/AtomicOperations-arm.h | 247 + js/src/jit/arm/Bailouts-arm.cpp | 119 + js/src/jit/arm/BaselineCompiler-arm.cpp | 15 + js/src/jit/arm/BaselineCompiler-arm.h | 26 + js/src/jit/arm/BaselineIC-arm.cpp | 74 + js/src/jit/arm/CodeGenerator-arm.cpp | 3720 +++++ js/src/jit/arm/CodeGenerator-arm.h | 336 + js/src/jit/arm/DoubleEntryTable.tbl | 257 + js/src/jit/arm/LIR-arm.h | 710 + js/src/jit/arm/LOpcodes-arm.h | 32 + js/src/jit/arm/Lowering-arm.cpp | 1031 ++ js/src/jit/arm/Lowering-arm.h | 132 + js/src/jit/arm/MacroAssembler-arm-inl.h | 2143 +++ js/src/jit/arm/MacroAssembler-arm.cpp | 5559 +++++++ js/src/jit/arm/MacroAssembler-arm.h | 1554 ++ js/src/jit/arm/MoveEmitter-arm.cpp | 427 + js/src/jit/arm/MoveEmitter-arm.h | 66 + js/src/jit/arm/SharedIC-arm.cpp | 217 + js/src/jit/arm/SharedICHelpers-arm.h | 384 + js/src/jit/arm/SharedICRegisters-arm.h | 54 + js/src/jit/arm/Simulator-arm.cpp | 4941 +++++++ js/src/jit/arm/Simulator-arm.h | 519 + js/src/jit/arm/Trampoline-arm.cpp | 1442 ++ js/src/jit/arm/disasm/Constants-arm.cpp | 144 + js/src/jit/arm/disasm/Constants-arm.h | 745 + js/src/jit/arm/disasm/Disasm-arm.cpp | 2173 +++ js/src/jit/arm/disasm/Disasm-arm.h | 143 + js/src/jit/arm/gen-double-encoder-table.py | 32 + .../jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S | 27 + .../jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S | 28 + js/src/jit/arm/llvm-compiler-rt/assembly.h | 70 + js/src/jit/arm64/Architecture-arm64.cpp | 75 + js/src/jit/arm64/Architecture-arm64.h | 462 + js/src/jit/arm64/Assembler-arm64.cpp | 670 + js/src/jit/arm64/Assembler-arm64.h | 557 + js/src/jit/arm64/AtomicOperations-arm64.h | 156 + js/src/jit/arm64/Bailouts-arm64.cpp | 67 + js/src/jit/arm64/BaselineCompiler-arm64.h | 28 + js/src/jit/arm64/BaselineIC-arm64.cpp | 75 + js/src/jit/arm64/CodeGenerator-arm64.cpp | 783 + js/src/jit/arm64/CodeGenerator-arm64.h | 262 + js/src/jit/arm64/LIR-arm64.h | 395 + js/src/jit/arm64/LOpcodes-arm64.h | 20 + js/src/jit/arm64/Lowering-arm64.cpp | 369 + js/src/jit/arm64/Lowering-arm64.h | 132 + js/src/jit/arm64/MacroAssembler-arm64-inl.h | 1793 +++ js/src/jit/arm64/MacroAssembler-arm64.cpp | 838 ++ js/src/jit/arm64/MacroAssembler-arm64.h | 2338 +++ js/src/jit/arm64/MoveEmitter-arm64.cpp | 300 + js/src/jit/arm64/MoveEmitter-arm64.h | 86 + js/src/jit/arm64/SharedIC-arm64.cpp | 219 + js/src/jit/arm64/SharedICHelpers-arm64.h | 337 + js/src/jit/arm64/SharedICRegisters-arm64.h | 58 + js/src/jit/arm64/Trampoline-arm64.cpp | 1229 ++ js/src/jit/arm64/vixl/.clang-format | 4 + js/src/jit/arm64/vixl/Assembler-vixl.cpp | 5088 +++++++ js/src/jit/arm64/vixl/Assembler-vixl.h | 4257 ++++++ js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h | 179 + js/src/jit/arm64/vixl/Constants-vixl.h | 2148 +++ js/src/jit/arm64/vixl/Cpu-vixl.cpp | 170 + js/src/jit/arm64/vixl/Cpu-vixl.h | 83 + js/src/jit/arm64/vixl/Debugger-vixl.cpp | 1535 ++ js/src/jit/arm64/vixl/Debugger-vixl.h | 117 + js/src/jit/arm64/vixl/Decoder-vixl.cpp | 874 ++ js/src/jit/arm64/vixl/Decoder-vixl.h | 274 + js/src/jit/arm64/vixl/Disasm-vixl.cpp | 3488 +++++ js/src/jit/arm64/vixl/Disasm-vixl.h | 177 + js/src/jit/arm64/vixl/Globals-vixl.h | 122 + js/src/jit/arm64/vixl/Instructions-vixl.cpp | 670 + js/src/jit/arm64/vixl/Instructions-vixl.h | 830 ++ js/src/jit/arm64/vixl/Instrument-vixl.cpp | 844 ++ js/src/jit/arm64/vixl/Instrument-vixl.h | 110 + js/src/jit/arm64/vixl/Logic-vixl.cpp | 4878 ++++++ js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp | 2007 +++ js/src/jit/arm64/vixl/MacroAssembler-vixl.h | 2494 ++++ js/src/jit/arm64/vixl/MozAssembler-vixl.cpp | 712 + js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h | 216 + js/src/jit/arm64/vixl/MozInstructions-vixl.cpp | 195 + js/src/jit/arm64/vixl/MozSimulator-vixl.cpp | 708 + js/src/jit/arm64/vixl/Platform-vixl.h | 39 + js/src/jit/arm64/vixl/Simulator-Constants-vixl.h | 141 + js/src/jit/arm64/vixl/Simulator-vixl.cpp | 3949 +++++ js/src/jit/arm64/vixl/Simulator-vixl.h | 2677 ++++ js/src/jit/arm64/vixl/Utils-vixl.cpp | 145 + js/src/jit/arm64/vixl/Utils-vixl.h | 286 + .../jit/mips-shared/Architecture-mips-shared.cpp | 77 + js/src/jit/mips-shared/Architecture-mips-shared.h | 338 + js/src/jit/mips-shared/Assembler-mips-shared.cpp | 1746 +++ js/src/jit/mips-shared/Assembler-mips-shared.h | 1522 ++ .../jit/mips-shared/AtomicOperations-mips-shared.h | 241 + js/src/jit/mips-shared/Bailouts-mips-shared.cpp | 24 + .../mips-shared/BaselineCompiler-mips-shared.cpp | 16 + .../jit/mips-shared/BaselineCompiler-mips-shared.h | 24 + js/src/jit/mips-shared/BaselineIC-mips-shared.cpp | 39 + .../jit/mips-shared/CodeGenerator-mips-shared.cpp | 2931 ++++ js/src/jit/mips-shared/CodeGenerator-mips-shared.h | 301 + js/src/jit/mips-shared/LIR-mips-shared.h | 408 + js/src/jit/mips-shared/Lowering-mips-shared.cpp | 753 + js/src/jit/mips-shared/Lowering-mips-shared.h | 108 + .../mips-shared/MacroAssembler-mips-shared-inl.h | 1030 ++ .../jit/mips-shared/MacroAssembler-mips-shared.cpp | 1728 +++ .../jit/mips-shared/MacroAssembler-mips-shared.h | 262 + js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp | 223 + js/src/jit/mips-shared/MoveEmitter-mips-shared.h | 76 + .../jit/mips-shared/SharedICHelpers-mips-shared.h | 382 + js/src/jit/mips32/Architecture-mips32.cpp | 102 + js/src/jit/mips32/Architecture-mips32.h | 287 + js/src/jit/mips32/Assembler-mips32.cpp | 545 + js/src/jit/mips32/Assembler-mips32.h | 227 + js/src/jit/mips32/Bailouts-mips32.cpp | 48 + js/src/jit/mips32/Bailouts-mips32.h | 77 + js/src/jit/mips32/BaselineCompiler-mips32.cpp | 16 + js/src/jit/mips32/BaselineCompiler-mips32.h | 26 + js/src/jit/mips32/BaselineIC-mips32.cpp | 45 + js/src/jit/mips32/CodeGenerator-mips32.cpp | 832 ++ js/src/jit/mips32/CodeGenerator-mips32.h | 96 + js/src/jit/mips32/LIR-mips32.h | 169 + js/src/jit/mips32/LOpcodes-mips32.h | 25 + js/src/jit/mips32/Lowering-mips32.cpp | 258 + js/src/jit/mips32/Lowering-mips32.h | 57 + js/src/jit/mips32/MacroAssembler-mips32-inl.h | 1077 ++ js/src/jit/mips32/MacroAssembler-mips32.cpp | 2365 +++ js/src/jit/mips32/MacroAssembler-mips32.h | 1021 ++ js/src/jit/mips32/MoveEmitter-mips32.cpp | 156 + js/src/jit/mips32/MoveEmitter-mips32.h | 34 + js/src/jit/mips32/SharedIC-mips32.cpp | 177 + js/src/jit/mips32/SharedICRegisters-mips32.h | 44 + js/src/jit/mips32/Simulator-mips32.cpp | 3519 +++++ js/src/jit/mips32/Simulator-mips32.h | 424 + js/src/jit/mips32/Trampoline-mips32.cpp | 1418 ++ js/src/jit/mips64/Architecture-mips64.cpp | 93 + js/src/jit/mips64/Architecture-mips64.h | 209 + js/src/jit/mips64/Assembler-mips64.cpp | 529 + js/src/jit/mips64/Assembler-mips64.h | 236 + js/src/jit/mips64/Bailouts-mips64.cpp | 28 + js/src/jit/mips64/Bailouts-mips64.h | 44 + js/src/jit/mips64/BaselineCompiler-mips64.cpp | 16 + js/src/jit/mips64/BaselineCompiler-mips64.h | 26 + js/src/jit/mips64/BaselineIC-mips64.cpp | 47 + js/src/jit/mips64/CodeGenerator-mips64.cpp | 774 + js/src/jit/mips64/CodeGenerator-mips64.h | 102 + js/src/jit/mips64/LIR-mips64.h | 140 + js/src/jit/mips64/LOpcodes-mips64.h | 24 + js/src/jit/mips64/Lowering-mips64.cpp | 184 + js/src/jit/mips64/Lowering-mips64.h | 57 + js/src/jit/mips64/MacroAssembler-mips64-inl.h | 774 + js/src/jit/mips64/MacroAssembler-mips64.cpp | 2485 ++++ js/src/jit/mips64/MacroAssembler-mips64.h | 1041 ++ js/src/jit/mips64/MoveEmitter-mips64.cpp | 155 + js/src/jit/mips64/MoveEmitter-mips64.h | 34 + js/src/jit/mips64/SharedIC-mips64.cpp | 191 + js/src/jit/mips64/SharedICRegisters-mips64.h | 47 + js/src/jit/mips64/Simulator-mips64.cpp | 3874 +++++ js/src/jit/mips64/Simulator-mips64.h | 440 + js/src/jit/mips64/Trampoline-mips64.cpp | 1363 ++ js/src/jit/none/Architecture-none.h | 157 + js/src/jit/none/AtomicOperations-none.h | 134 + js/src/jit/none/AtomicOperations-ppc.h | 242 + js/src/jit/none/AtomicOperations-sparc.h | 251 + js/src/jit/none/BaselineCompiler-none.h | 30 + js/src/jit/none/CodeGenerator-none.h | 62 + js/src/jit/none/LIR-none.h | 111 + js/src/jit/none/LOpcodes-none.h | 14 + js/src/jit/none/Lowering-none.h | 118 + js/src/jit/none/MacroAssembler-none.h | 464 + js/src/jit/none/MoveEmitter-none.h | 30 + js/src/jit/none/SharedICHelpers-none.h | 42 + js/src/jit/none/SharedICRegisters-none.h | 35 + js/src/jit/none/Trampoline-none.cpp | 49 + js/src/jit/shared/Assembler-shared.h | 991 ++ js/src/jit/shared/BaselineCompiler-shared.cpp | 146 + js/src/jit/shared/BaselineCompiler-shared.h | 172 + js/src/jit/shared/CodeGenerator-shared-inl.h | 437 + js/src/jit/shared/CodeGenerator-shared.cpp | 1865 +++ js/src/jit/shared/CodeGenerator-shared.h | 850 ++ js/src/jit/shared/IonAssemblerBuffer.h | 417 + .../shared/IonAssemblerBufferWithConstantPools.h | 1145 ++ js/src/jit/shared/LIR-shared.h | 8904 +++++++++++ js/src/jit/shared/LOpcodes-shared.h | 441 + js/src/jit/shared/Lowering-shared-inl.h | 858 ++ js/src/jit/shared/Lowering-shared.cpp | 306 + js/src/jit/shared/Lowering-shared.h | 296 + js/src/jit/x64/Assembler-x64.cpp | 303 + js/src/jit/x64/Assembler-x64.h | 1040 ++ js/src/jit/x64/Bailouts-x64.cpp | 75 + js/src/jit/x64/BaseAssembler-x64.h | 929 ++ js/src/jit/x64/BaselineCompiler-x64.cpp | 15 + js/src/jit/x64/BaselineCompiler-x64.h | 26 + js/src/jit/x64/BaselineIC-x64.cpp | 46 + js/src/jit/x64/CodeGenerator-x64.cpp | 880 ++ js/src/jit/x64/CodeGenerator-x64.h | 89 + js/src/jit/x64/LIR-x64.h | 183 + js/src/jit/x64/LOpcodes-x64.h | 23 + js/src/jit/x64/Lowering-x64.cpp | 495 + js/src/jit/x64/Lowering-x64.h | 80 + js/src/jit/x64/MacroAssembler-x64-inl.h | 897 ++ js/src/jit/x64/MacroAssembler-x64.cpp | 859 ++ js/src/jit/x64/MacroAssembler-x64.h | 966 ++ js/src/jit/x64/SharedIC-x64.cpp | 234 + js/src/jit/x64/SharedICHelpers-x64.h | 352 + js/src/jit/x64/SharedICRegisters-x64.h | 35 + js/src/jit/x64/Trampoline-x64.cpp | 1303 ++ js/src/jit/x86-shared/Architecture-x86-shared.cpp | 97 + js/src/jit/x86-shared/Architecture-x86-shared.h | 463 + js/src/jit/x86-shared/Assembler-x86-shared.cpp | 350 + js/src/jit/x86-shared/Assembler-x86-shared.h | 3652 +++++ .../jit/x86-shared/AssemblerBuffer-x86-shared.cpp | 25 + js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h | 205 + .../jit/x86-shared/AtomicOperations-x86-shared.h | 602 + js/src/jit/x86-shared/BaseAssembler-x86-shared.h | 5393 +++++++ .../jit/x86-shared/BaselineCompiler-x86-shared.cpp | 15 + .../jit/x86-shared/BaselineCompiler-x86-shared.h | 24 + js/src/jit/x86-shared/BaselineIC-x86-shared.cpp | 44 + js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp | 4727 ++++++ js/src/jit/x86-shared/CodeGenerator-x86-shared.h | 357 + js/src/jit/x86-shared/Constants-x86-shared.h | 228 + js/src/jit/x86-shared/Disassembler-x86-shared.cpp | 568 + js/src/jit/x86-shared/Encoding-x86-shared.h | 413 + js/src/jit/x86-shared/LIR-x86-shared.h | 421 + js/src/jit/x86-shared/Lowering-x86-shared.cpp | 1019 ++ js/src/jit/x86-shared/Lowering-x86-shared.h | 81 + .../jit/x86-shared/MacroAssembler-x86-shared-inl.h | 1284 ++ .../jit/x86-shared/MacroAssembler-x86-shared.cpp | 855 ++ js/src/jit/x86-shared/MacroAssembler-x86-shared.h | 1411 ++ js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp | 581 + js/src/jit/x86-shared/MoveEmitter-x86-shared.h | 74 + js/src/jit/x86-shared/Patching-x86-shared.h | 124 + js/src/jit/x86/Assembler-x86.cpp | 106 + js/src/jit/x86/Assembler-x86.h | 991 ++ js/src/jit/x86/Bailouts-x86.cpp | 115 + js/src/jit/x86/BaseAssembler-x86.h | 203 + js/src/jit/x86/BaselineCompiler-x86.cpp | 15 + js/src/jit/x86/BaselineCompiler-x86.h | 26 + js/src/jit/x86/BaselineIC-x86.cpp | 48 + js/src/jit/x86/CodeGenerator-x86.cpp | 1298 ++ js/src/jit/x86/CodeGenerator-x86.h | 98 + js/src/jit/x86/LIR-x86.h | 207 + js/src/jit/x86/LOpcodes-x86.h | 24 + js/src/jit/x86/Lowering-x86.cpp | 658 + js/src/jit/x86/Lowering-x86.h | 96 + js/src/jit/x86/MacroAssembler-x86-inl.h | 1075 ++ js/src/jit/x86/MacroAssembler-x86.cpp | 1028 ++ js/src/jit/x86/MacroAssembler-x86.h | 870 ++ js/src/jit/x86/SharedIC-x86.cpp | 242 + js/src/jit/x86/SharedICHelpers-x86.h | 353 + js/src/jit/x86/SharedICRegisters-x86.h | 38 + js/src/jit/x86/Trampoline-x86.cpp | 1336 ++ 415 files changed, 352937 insertions(+) create mode 100644 js/src/jit/AliasAnalysis.cpp create mode 100644 js/src/jit/AliasAnalysis.h create mode 100644 js/src/jit/AliasAnalysisShared.cpp create mode 100644 js/src/jit/AliasAnalysisShared.h create mode 100644 js/src/jit/AlignmentMaskAnalysis.cpp create mode 100644 js/src/jit/AlignmentMaskAnalysis.h create mode 100644 js/src/jit/AtomicOp.h create mode 100644 js/src/jit/AtomicOperations.h create mode 100644 js/src/jit/BacktrackingAllocator.cpp create mode 100644 js/src/jit/BacktrackingAllocator.h create mode 100644 js/src/jit/Bailouts.cpp create mode 100644 js/src/jit/Bailouts.h create mode 100644 js/src/jit/BaselineBailouts.cpp create mode 100644 js/src/jit/BaselineCacheIR.cpp create mode 100644 js/src/jit/BaselineCacheIR.h create mode 100644 js/src/jit/BaselineCompiler.cpp create mode 100644 js/src/jit/BaselineCompiler.h create mode 100644 js/src/jit/BaselineDebugModeOSR.cpp create mode 100644 js/src/jit/BaselineDebugModeOSR.h create mode 100644 js/src/jit/BaselineFrame-inl.h create mode 100644 js/src/jit/BaselineFrame.cpp create mode 100644 js/src/jit/BaselineFrame.h create mode 100644 js/src/jit/BaselineFrameInfo-inl.h create mode 100644 js/src/jit/BaselineFrameInfo.cpp create mode 100644 js/src/jit/BaselineFrameInfo.h create mode 100644 js/src/jit/BaselineIC.cpp create mode 100644 js/src/jit/BaselineIC.h create mode 100644 js/src/jit/BaselineICList.h create mode 100644 js/src/jit/BaselineInspector.cpp create mode 100644 js/src/jit/BaselineInspector.h create mode 100644 js/src/jit/BaselineJIT.cpp create mode 100644 js/src/jit/BaselineJIT.h create mode 100644 js/src/jit/BitSet.cpp create mode 100644 js/src/jit/BitSet.h create mode 100644 js/src/jit/BytecodeAnalysis.cpp create mode 100644 js/src/jit/BytecodeAnalysis.h create mode 100644 js/src/jit/C1Spewer.cpp create mode 100644 js/src/jit/C1Spewer.h create mode 100644 js/src/jit/CacheIR.cpp create mode 100644 js/src/jit/CacheIR.h create mode 100644 js/src/jit/CodeGenerator.cpp create mode 100644 js/src/jit/CodeGenerator.h create mode 100644 js/src/jit/CompactBuffer.h create mode 100644 js/src/jit/CompileInfo-inl.h create mode 100644 js/src/jit/CompileInfo.h create mode 100644 js/src/jit/CompileWrappers.cpp create mode 100644 js/src/jit/CompileWrappers.h create mode 100644 js/src/jit/Disassembler.cpp create mode 100644 js/src/jit/Disassembler.h create mode 100644 js/src/jit/EagerSimdUnbox.cpp create mode 100644 js/src/jit/EagerSimdUnbox.h create mode 100644 js/src/jit/EdgeCaseAnalysis.cpp create mode 100644 js/src/jit/EdgeCaseAnalysis.h create mode 100644 js/src/jit/EffectiveAddressAnalysis.cpp create mode 100644 js/src/jit/EffectiveAddressAnalysis.h create mode 100644 js/src/jit/ExecutableAllocator.cpp create mode 100644 js/src/jit/ExecutableAllocator.h create mode 100644 js/src/jit/FixedList.h create mode 100644 js/src/jit/FlowAliasAnalysis.cpp create mode 100644 js/src/jit/FlowAliasAnalysis.h create mode 100644 js/src/jit/FoldLinearArithConstants.cpp create mode 100644 js/src/jit/FoldLinearArithConstants.h create mode 100644 js/src/jit/ICStubSpace.h create mode 100644 js/src/jit/InlinableNatives.h create mode 100644 js/src/jit/InlineList.h create mode 100644 js/src/jit/InstructionReordering.cpp create mode 100644 js/src/jit/InstructionReordering.h create mode 100644 js/src/jit/Ion.cpp create mode 100644 js/src/jit/Ion.h create mode 100644 js/src/jit/IonAnalysis.cpp create mode 100644 js/src/jit/IonAnalysis.h create mode 100644 js/src/jit/IonBuilder.cpp create mode 100644 js/src/jit/IonBuilder.h create mode 100644 js/src/jit/IonCaches.cpp create mode 100644 js/src/jit/IonCaches.h create mode 100644 js/src/jit/IonCode.h create mode 100644 js/src/jit/IonInstrumentation.h create mode 100644 js/src/jit/IonOptimizationLevels.cpp create mode 100644 js/src/jit/IonOptimizationLevels.h create mode 100644 js/src/jit/IonTypes.h create mode 100644 js/src/jit/JSONSpewer.cpp create mode 100644 js/src/jit/JSONSpewer.h create mode 100644 js/src/jit/JitAllocPolicy.h create mode 100644 js/src/jit/JitCommon.h create mode 100644 js/src/jit/JitCompartment.h create mode 100644 js/src/jit/JitFrameIterator-inl.h create mode 100644 js/src/jit/JitFrameIterator.h create mode 100644 js/src/jit/JitFrames-inl.h create mode 100644 js/src/jit/JitFrames.cpp create mode 100644 js/src/jit/JitFrames.h create mode 100644 js/src/jit/JitOptions.cpp create mode 100644 js/src/jit/JitOptions.h create mode 100644 js/src/jit/JitSpewer.cpp create mode 100644 js/src/jit/JitSpewer.h create mode 100644 js/src/jit/JitcodeMap.cpp create mode 100644 js/src/jit/JitcodeMap.h create mode 100644 js/src/jit/LICM.cpp create mode 100644 js/src/jit/LICM.h create mode 100644 js/src/jit/LIR.cpp create mode 100644 js/src/jit/LIR.h create mode 100644 js/src/jit/LOpcodes.h create mode 100644 js/src/jit/Label.h create mode 100644 js/src/jit/Linker.cpp create mode 100644 js/src/jit/Linker.h create mode 100644 js/src/jit/LoopUnroller.cpp create mode 100644 js/src/jit/LoopUnroller.h create mode 100644 js/src/jit/Lowering.cpp create mode 100644 js/src/jit/Lowering.h create mode 100644 js/src/jit/MCallOptimize.cpp create mode 100644 js/src/jit/MIR.cpp create mode 100644 js/src/jit/MIR.h create mode 100644 js/src/jit/MIRGenerator.h create mode 100644 js/src/jit/MIRGraph.cpp create mode 100644 js/src/jit/MIRGraph.h create mode 100644 js/src/jit/MOpcodes.h create mode 100644 js/src/jit/MacroAssembler-inl.h create mode 100644 js/src/jit/MacroAssembler.cpp create mode 100644 js/src/jit/MacroAssembler.h create mode 100644 js/src/jit/MoveEmitter.h create mode 100644 js/src/jit/MoveResolver.cpp create mode 100644 js/src/jit/MoveResolver.h create mode 100644 js/src/jit/OptimizationTracking.cpp create mode 100644 js/src/jit/OptimizationTracking.h create mode 100644 js/src/jit/PcScriptCache.h create mode 100644 js/src/jit/PerfSpewer.cpp create mode 100644 js/src/jit/PerfSpewer.h create mode 100644 js/src/jit/ProcessExecutableMemory.cpp create mode 100644 js/src/jit/ProcessExecutableMemory.h create mode 100644 js/src/jit/RangeAnalysis.cpp create mode 100644 js/src/jit/RangeAnalysis.h create mode 100644 js/src/jit/Recover.cpp create mode 100644 js/src/jit/Recover.h create mode 100644 js/src/jit/RegisterAllocator.cpp create mode 100644 js/src/jit/RegisterAllocator.h create mode 100644 js/src/jit/RegisterSets.h create mode 100644 js/src/jit/Registers.h create mode 100644 js/src/jit/RematerializedFrame.cpp create mode 100644 js/src/jit/RematerializedFrame.h create mode 100644 js/src/jit/Safepoints.cpp create mode 100644 js/src/jit/Safepoints.h create mode 100644 js/src/jit/ScalarReplacement.cpp create mode 100644 js/src/jit/ScalarReplacement.h create mode 100644 js/src/jit/SharedIC.cpp create mode 100644 js/src/jit/SharedIC.h create mode 100644 js/src/jit/SharedICHelpers.h create mode 100644 js/src/jit/SharedICList.h create mode 100644 js/src/jit/SharedICRegisters.h create mode 100644 js/src/jit/Sink.cpp create mode 100644 js/src/jit/Sink.h create mode 100644 js/src/jit/Snapshots.cpp create mode 100644 js/src/jit/Snapshots.h create mode 100644 js/src/jit/StackSlotAllocator.h create mode 100644 js/src/jit/StupidAllocator.cpp create mode 100644 js/src/jit/StupidAllocator.h create mode 100644 js/src/jit/TypePolicy.cpp create mode 100644 js/src/jit/TypePolicy.h create mode 100644 js/src/jit/TypedObjectPrediction.cpp create mode 100644 js/src/jit/TypedObjectPrediction.h create mode 100644 js/src/jit/VMFunctions.cpp create mode 100644 js/src/jit/VMFunctions.h create mode 100644 js/src/jit/ValueNumbering.cpp create mode 100644 js/src/jit/ValueNumbering.h create mode 100644 js/src/jit/WasmBCE.cpp create mode 100644 js/src/jit/WasmBCE.h create mode 100644 js/src/jit/arm/Architecture-arm.cpp create mode 100644 js/src/jit/arm/Architecture-arm.h create mode 100644 js/src/jit/arm/Assembler-arm.cpp create mode 100644 js/src/jit/arm/Assembler-arm.h create mode 100644 js/src/jit/arm/AtomicOperations-arm.h create mode 100644 js/src/jit/arm/Bailouts-arm.cpp create mode 100644 js/src/jit/arm/BaselineCompiler-arm.cpp create mode 100644 js/src/jit/arm/BaselineCompiler-arm.h create mode 100644 js/src/jit/arm/BaselineIC-arm.cpp create mode 100644 js/src/jit/arm/CodeGenerator-arm.cpp create mode 100644 js/src/jit/arm/CodeGenerator-arm.h create mode 100644 js/src/jit/arm/DoubleEntryTable.tbl create mode 100644 js/src/jit/arm/LIR-arm.h create mode 100644 js/src/jit/arm/LOpcodes-arm.h create mode 100644 js/src/jit/arm/Lowering-arm.cpp create mode 100644 js/src/jit/arm/Lowering-arm.h create mode 100644 js/src/jit/arm/MacroAssembler-arm-inl.h create mode 100644 js/src/jit/arm/MacroAssembler-arm.cpp create mode 100644 js/src/jit/arm/MacroAssembler-arm.h create mode 100644 js/src/jit/arm/MoveEmitter-arm.cpp create mode 100644 js/src/jit/arm/MoveEmitter-arm.h create mode 100644 js/src/jit/arm/SharedIC-arm.cpp create mode 100644 js/src/jit/arm/SharedICHelpers-arm.h create mode 100644 js/src/jit/arm/SharedICRegisters-arm.h create mode 100644 js/src/jit/arm/Simulator-arm.cpp create mode 100644 js/src/jit/arm/Simulator-arm.h create mode 100644 js/src/jit/arm/Trampoline-arm.cpp create mode 100644 js/src/jit/arm/disasm/Constants-arm.cpp create mode 100644 js/src/jit/arm/disasm/Constants-arm.h create mode 100644 js/src/jit/arm/disasm/Disasm-arm.cpp create mode 100644 js/src/jit/arm/disasm/Disasm-arm.h create mode 100644 js/src/jit/arm/gen-double-encoder-table.py create mode 100644 js/src/jit/arm/llvm-compiler-rt/arm/aeabi_idivmod.S create mode 100644 js/src/jit/arm/llvm-compiler-rt/arm/aeabi_uidivmod.S create mode 100644 js/src/jit/arm/llvm-compiler-rt/assembly.h create mode 100644 js/src/jit/arm64/Architecture-arm64.cpp create mode 100644 js/src/jit/arm64/Architecture-arm64.h create mode 100644 js/src/jit/arm64/Assembler-arm64.cpp create mode 100644 js/src/jit/arm64/Assembler-arm64.h create mode 100644 js/src/jit/arm64/AtomicOperations-arm64.h create mode 100644 js/src/jit/arm64/Bailouts-arm64.cpp create mode 100644 js/src/jit/arm64/BaselineCompiler-arm64.h create mode 100644 js/src/jit/arm64/BaselineIC-arm64.cpp create mode 100644 js/src/jit/arm64/CodeGenerator-arm64.cpp create mode 100644 js/src/jit/arm64/CodeGenerator-arm64.h create mode 100644 js/src/jit/arm64/LIR-arm64.h create mode 100644 js/src/jit/arm64/LOpcodes-arm64.h create mode 100644 js/src/jit/arm64/Lowering-arm64.cpp create mode 100644 js/src/jit/arm64/Lowering-arm64.h create mode 100644 js/src/jit/arm64/MacroAssembler-arm64-inl.h create mode 100644 js/src/jit/arm64/MacroAssembler-arm64.cpp create mode 100644 js/src/jit/arm64/MacroAssembler-arm64.h create mode 100644 js/src/jit/arm64/MoveEmitter-arm64.cpp create mode 100644 js/src/jit/arm64/MoveEmitter-arm64.h create mode 100644 js/src/jit/arm64/SharedIC-arm64.cpp create mode 100644 js/src/jit/arm64/SharedICHelpers-arm64.h create mode 100644 js/src/jit/arm64/SharedICRegisters-arm64.h create mode 100644 js/src/jit/arm64/Trampoline-arm64.cpp create mode 100644 js/src/jit/arm64/vixl/.clang-format create mode 100644 js/src/jit/arm64/vixl/Assembler-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Assembler-vixl.h create mode 100644 js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h create mode 100644 js/src/jit/arm64/vixl/Constants-vixl.h create mode 100644 js/src/jit/arm64/vixl/Cpu-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Cpu-vixl.h create mode 100644 js/src/jit/arm64/vixl/Debugger-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Debugger-vixl.h create mode 100644 js/src/jit/arm64/vixl/Decoder-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Decoder-vixl.h create mode 100644 js/src/jit/arm64/vixl/Disasm-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Disasm-vixl.h create mode 100644 js/src/jit/arm64/vixl/Globals-vixl.h create mode 100644 js/src/jit/arm64/vixl/Instructions-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Instructions-vixl.h create mode 100644 js/src/jit/arm64/vixl/Instrument-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Instrument-vixl.h create mode 100644 js/src/jit/arm64/vixl/Logic-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/MacroAssembler-vixl.h create mode 100644 js/src/jit/arm64/vixl/MozAssembler-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h create mode 100644 js/src/jit/arm64/vixl/MozInstructions-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/MozSimulator-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Platform-vixl.h create mode 100644 js/src/jit/arm64/vixl/Simulator-Constants-vixl.h create mode 100644 js/src/jit/arm64/vixl/Simulator-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Simulator-vixl.h create mode 100644 js/src/jit/arm64/vixl/Utils-vixl.cpp create mode 100644 js/src/jit/arm64/vixl/Utils-vixl.h create mode 100644 js/src/jit/mips-shared/Architecture-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/Architecture-mips-shared.h create mode 100644 js/src/jit/mips-shared/Assembler-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/Assembler-mips-shared.h create mode 100644 js/src/jit/mips-shared/AtomicOperations-mips-shared.h create mode 100644 js/src/jit/mips-shared/Bailouts-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/BaselineCompiler-mips-shared.h create mode 100644 js/src/jit/mips-shared/BaselineIC-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/CodeGenerator-mips-shared.h create mode 100644 js/src/jit/mips-shared/LIR-mips-shared.h create mode 100644 js/src/jit/mips-shared/Lowering-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/Lowering-mips-shared.h create mode 100644 js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h create mode 100644 js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/MacroAssembler-mips-shared.h create mode 100644 js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp create mode 100644 js/src/jit/mips-shared/MoveEmitter-mips-shared.h create mode 100644 js/src/jit/mips-shared/SharedICHelpers-mips-shared.h create mode 100644 js/src/jit/mips32/Architecture-mips32.cpp create mode 100644 js/src/jit/mips32/Architecture-mips32.h create mode 100644 js/src/jit/mips32/Assembler-mips32.cpp create mode 100644 js/src/jit/mips32/Assembler-mips32.h create mode 100644 js/src/jit/mips32/Bailouts-mips32.cpp create mode 100644 js/src/jit/mips32/Bailouts-mips32.h create mode 100644 js/src/jit/mips32/BaselineCompiler-mips32.cpp create mode 100644 js/src/jit/mips32/BaselineCompiler-mips32.h create mode 100644 js/src/jit/mips32/BaselineIC-mips32.cpp create mode 100644 js/src/jit/mips32/CodeGenerator-mips32.cpp create mode 100644 js/src/jit/mips32/CodeGenerator-mips32.h create mode 100644 js/src/jit/mips32/LIR-mips32.h create mode 100644 js/src/jit/mips32/LOpcodes-mips32.h create mode 100644 js/src/jit/mips32/Lowering-mips32.cpp create mode 100644 js/src/jit/mips32/Lowering-mips32.h create mode 100644 js/src/jit/mips32/MacroAssembler-mips32-inl.h create mode 100644 js/src/jit/mips32/MacroAssembler-mips32.cpp create mode 100644 js/src/jit/mips32/MacroAssembler-mips32.h create mode 100644 js/src/jit/mips32/MoveEmitter-mips32.cpp create mode 100644 js/src/jit/mips32/MoveEmitter-mips32.h create mode 100644 js/src/jit/mips32/SharedIC-mips32.cpp create mode 100644 js/src/jit/mips32/SharedICRegisters-mips32.h create mode 100644 js/src/jit/mips32/Simulator-mips32.cpp create mode 100644 js/src/jit/mips32/Simulator-mips32.h create mode 100644 js/src/jit/mips32/Trampoline-mips32.cpp create mode 100644 js/src/jit/mips64/Architecture-mips64.cpp create mode 100644 js/src/jit/mips64/Architecture-mips64.h create mode 100644 js/src/jit/mips64/Assembler-mips64.cpp create mode 100644 js/src/jit/mips64/Assembler-mips64.h create mode 100644 js/src/jit/mips64/Bailouts-mips64.cpp create mode 100644 js/src/jit/mips64/Bailouts-mips64.h create mode 100644 js/src/jit/mips64/BaselineCompiler-mips64.cpp create mode 100644 js/src/jit/mips64/BaselineCompiler-mips64.h create mode 100644 js/src/jit/mips64/BaselineIC-mips64.cpp create mode 100644 js/src/jit/mips64/CodeGenerator-mips64.cpp create mode 100644 js/src/jit/mips64/CodeGenerator-mips64.h create mode 100644 js/src/jit/mips64/LIR-mips64.h create mode 100644 js/src/jit/mips64/LOpcodes-mips64.h create mode 100644 js/src/jit/mips64/Lowering-mips64.cpp create mode 100644 js/src/jit/mips64/Lowering-mips64.h create mode 100644 js/src/jit/mips64/MacroAssembler-mips64-inl.h create mode 100644 js/src/jit/mips64/MacroAssembler-mips64.cpp create mode 100644 js/src/jit/mips64/MacroAssembler-mips64.h create mode 100644 js/src/jit/mips64/MoveEmitter-mips64.cpp create mode 100644 js/src/jit/mips64/MoveEmitter-mips64.h create mode 100644 js/src/jit/mips64/SharedIC-mips64.cpp create mode 100644 js/src/jit/mips64/SharedICRegisters-mips64.h create mode 100644 js/src/jit/mips64/Simulator-mips64.cpp create mode 100644 js/src/jit/mips64/Simulator-mips64.h create mode 100644 js/src/jit/mips64/Trampoline-mips64.cpp create mode 100644 js/src/jit/none/Architecture-none.h create mode 100644 js/src/jit/none/AtomicOperations-none.h create mode 100644 js/src/jit/none/AtomicOperations-ppc.h create mode 100644 js/src/jit/none/AtomicOperations-sparc.h create mode 100644 js/src/jit/none/BaselineCompiler-none.h create mode 100644 js/src/jit/none/CodeGenerator-none.h create mode 100644 js/src/jit/none/LIR-none.h create mode 100644 js/src/jit/none/LOpcodes-none.h create mode 100644 js/src/jit/none/Lowering-none.h create mode 100644 js/src/jit/none/MacroAssembler-none.h create mode 100644 js/src/jit/none/MoveEmitter-none.h create mode 100644 js/src/jit/none/SharedICHelpers-none.h create mode 100644 js/src/jit/none/SharedICRegisters-none.h create mode 100644 js/src/jit/none/Trampoline-none.cpp create mode 100644 js/src/jit/shared/Assembler-shared.h create mode 100644 js/src/jit/shared/BaselineCompiler-shared.cpp create mode 100644 js/src/jit/shared/BaselineCompiler-shared.h create mode 100644 js/src/jit/shared/CodeGenerator-shared-inl.h create mode 100644 js/src/jit/shared/CodeGenerator-shared.cpp create mode 100644 js/src/jit/shared/CodeGenerator-shared.h create mode 100644 js/src/jit/shared/IonAssemblerBuffer.h create mode 100644 js/src/jit/shared/IonAssemblerBufferWithConstantPools.h create mode 100644 js/src/jit/shared/LIR-shared.h create mode 100644 js/src/jit/shared/LOpcodes-shared.h create mode 100644 js/src/jit/shared/Lowering-shared-inl.h create mode 100644 js/src/jit/shared/Lowering-shared.cpp create mode 100644 js/src/jit/shared/Lowering-shared.h create mode 100644 js/src/jit/x64/Assembler-x64.cpp create mode 100644 js/src/jit/x64/Assembler-x64.h create mode 100644 js/src/jit/x64/Bailouts-x64.cpp create mode 100644 js/src/jit/x64/BaseAssembler-x64.h create mode 100644 js/src/jit/x64/BaselineCompiler-x64.cpp create mode 100644 js/src/jit/x64/BaselineCompiler-x64.h create mode 100644 js/src/jit/x64/BaselineIC-x64.cpp create mode 100644 js/src/jit/x64/CodeGenerator-x64.cpp create mode 100644 js/src/jit/x64/CodeGenerator-x64.h create mode 100644 js/src/jit/x64/LIR-x64.h create mode 100644 js/src/jit/x64/LOpcodes-x64.h create mode 100644 js/src/jit/x64/Lowering-x64.cpp create mode 100644 js/src/jit/x64/Lowering-x64.h create mode 100644 js/src/jit/x64/MacroAssembler-x64-inl.h create mode 100644 js/src/jit/x64/MacroAssembler-x64.cpp create mode 100644 js/src/jit/x64/MacroAssembler-x64.h create mode 100644 js/src/jit/x64/SharedIC-x64.cpp create mode 100644 js/src/jit/x64/SharedICHelpers-x64.h create mode 100644 js/src/jit/x64/SharedICRegisters-x64.h create mode 100644 js/src/jit/x64/Trampoline-x64.cpp create mode 100644 js/src/jit/x86-shared/Architecture-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/Architecture-x86-shared.h create mode 100644 js/src/jit/x86-shared/Assembler-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/Assembler-x86-shared.h create mode 100644 js/src/jit/x86-shared/AssemblerBuffer-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h create mode 100644 js/src/jit/x86-shared/AtomicOperations-x86-shared.h create mode 100644 js/src/jit/x86-shared/BaseAssembler-x86-shared.h create mode 100644 js/src/jit/x86-shared/BaselineCompiler-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/BaselineCompiler-x86-shared.h create mode 100644 js/src/jit/x86-shared/BaselineIC-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/CodeGenerator-x86-shared.h create mode 100644 js/src/jit/x86-shared/Constants-x86-shared.h create mode 100644 js/src/jit/x86-shared/Disassembler-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/Encoding-x86-shared.h create mode 100644 js/src/jit/x86-shared/LIR-x86-shared.h create mode 100644 js/src/jit/x86-shared/Lowering-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/Lowering-x86-shared.h create mode 100644 js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h create mode 100644 js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/MacroAssembler-x86-shared.h create mode 100644 js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp create mode 100644 js/src/jit/x86-shared/MoveEmitter-x86-shared.h create mode 100644 js/src/jit/x86-shared/Patching-x86-shared.h create mode 100644 js/src/jit/x86/Assembler-x86.cpp create mode 100644 js/src/jit/x86/Assembler-x86.h create mode 100644 js/src/jit/x86/Bailouts-x86.cpp create mode 100644 js/src/jit/x86/BaseAssembler-x86.h create mode 100644 js/src/jit/x86/BaselineCompiler-x86.cpp create mode 100644 js/src/jit/x86/BaselineCompiler-x86.h create mode 100644 js/src/jit/x86/BaselineIC-x86.cpp create mode 100644 js/src/jit/x86/CodeGenerator-x86.cpp create mode 100644 js/src/jit/x86/CodeGenerator-x86.h create mode 100644 js/src/jit/x86/LIR-x86.h create mode 100644 js/src/jit/x86/LOpcodes-x86.h create mode 100644 js/src/jit/x86/Lowering-x86.cpp create mode 100644 js/src/jit/x86/Lowering-x86.h create mode 100644 js/src/jit/x86/MacroAssembler-x86-inl.h create mode 100644 js/src/jit/x86/MacroAssembler-x86.cpp create mode 100644 js/src/jit/x86/MacroAssembler-x86.h create mode 100644 js/src/jit/x86/SharedIC-x86.cpp create mode 100644 js/src/jit/x86/SharedICHelpers-x86.h create mode 100644 js/src/jit/x86/SharedICRegisters-x86.h create mode 100644 js/src/jit/x86/Trampoline-x86.cpp (limited to 'js/src/jit') diff --git a/js/src/jit/AliasAnalysis.cpp b/js/src/jit/AliasAnalysis.cpp new file mode 100644 index 000000000..ad26a890e --- /dev/null +++ b/js/src/jit/AliasAnalysis.cpp @@ -0,0 +1,283 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/AliasAnalysis.h" + +#include + +#include "jit/AliasAnalysisShared.h" +#include "jit/Ion.h" +#include "jit/IonBuilder.h" +#include "jit/JitSpewer.h" +#include "jit/MIR.h" +#include "jit/MIRGraph.h" + +#include "vm/Printer.h" + +using namespace js; +using namespace js::jit; + +using mozilla::Array; + +namespace js { +namespace jit { + +class LoopAliasInfo : public TempObject +{ + private: + LoopAliasInfo* outer_; + MBasicBlock* loopHeader_; + MInstructionVector invariantLoads_; + + public: + LoopAliasInfo(TempAllocator& alloc, LoopAliasInfo* outer, MBasicBlock* loopHeader) + : outer_(outer), loopHeader_(loopHeader), invariantLoads_(alloc) + { } + + MBasicBlock* loopHeader() const { + return loopHeader_; + } + LoopAliasInfo* outer() const { + return outer_; + } + bool addInvariantLoad(MInstruction* ins) { + return invariantLoads_.append(ins); + } + const MInstructionVector& invariantLoads() const { + return invariantLoads_; + } + MInstruction* firstInstruction() const { + return *loopHeader_->begin(); + } +}; + +} // namespace jit +} // namespace js + +AliasAnalysis::AliasAnalysis(MIRGenerator* mir, MIRGraph& graph) + : AliasAnalysisShared(mir, graph), + loop_(nullptr) +{ +} + +// Whether there might be a path from src to dest, excluding loop backedges. This is +// approximate and really ought to depend on precomputed reachability information. +static inline bool +BlockMightReach(MBasicBlock* src, MBasicBlock* dest) +{ + while (src->id() <= dest->id()) { + if (src == dest) + return true; + switch (src->numSuccessors()) { + case 0: + return false; + case 1: { + MBasicBlock* successor = src->getSuccessor(0); + if (successor->id() <= src->id()) + return true; // Don't iloop. + src = successor; + break; + } + default: + return true; + } + } + return false; +} + +static void +IonSpewDependency(MInstruction* load, MInstruction* store, const char* verb, const char* reason) +{ + if (!JitSpewEnabled(JitSpew_Alias)) + return; + + Fprinter& out = JitSpewPrinter(); + out.printf("Load "); + load->printName(out); + out.printf(" %s on store ", verb); + store->printName(out); + out.printf(" (%s)\n", reason); +} + +static void +IonSpewAliasInfo(const char* pre, MInstruction* ins, const char* post) +{ + if (!JitSpewEnabled(JitSpew_Alias)) + return; + + Fprinter& out = JitSpewPrinter(); + out.printf("%s ", pre); + ins->printName(out); + out.printf(" %s\n", post); +} + +// This pass annotates every load instruction with the last store instruction +// on which it depends. The algorithm is optimistic in that it ignores explicit +// dependencies and only considers loads and stores. +// +// Loads inside loops only have an implicit dependency on a store before the +// loop header if no instruction inside the loop body aliases it. To calculate +// this efficiently, we maintain a list of maybe-invariant loads and the combined +// alias set for all stores inside the loop. When we see the loop's backedge, this +// information is used to mark every load we wrongly assumed to be loop invariant as +// having an implicit dependency on the last instruction of the loop header, so that +// it's never moved before the loop header. +// +// The algorithm depends on the invariant that both control instructions and effectful +// instructions (stores) are never hoisted. +bool +AliasAnalysis::analyze() +{ + Vector stores(alloc()); + + // Initialize to the first instruction. + MInstruction* firstIns = *graph_.entryBlock()->begin(); + for (unsigned i = 0; i < AliasSet::NumCategories; i++) { + MInstructionVector defs(alloc()); + if (!defs.append(firstIns)) + return false; + if (!stores.append(Move(defs))) + return false; + } + + // Type analysis may have inserted new instructions. Since this pass depends + // on the instruction number ordering, all instructions are renumbered. + uint32_t newId = 0; + + for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { + if (mir->shouldCancel("Alias Analysis (main loop)")) + return false; + + if (block->isLoopHeader()) { + JitSpew(JitSpew_Alias, "Processing loop header %d", block->id()); + loop_ = new(alloc()) LoopAliasInfo(alloc(), loop_, *block); + } + + for (MPhiIterator def(block->phisBegin()), end(block->phisEnd()); def != end; ++def) + def->setId(newId++); + + for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns())); + def != end; + ++def) + { + def->setId(newId++); + + AliasSet set = def->getAliasSet(); + if (set.isNone()) + continue; + + // For the purposes of alias analysis, all recoverable operations + // are treated as effect free as the memory represented by these + // operations cannot be aliased by others. + if (def->canRecoverOnBailout()) + continue; + + if (set.isStore()) { + for (AliasSetIterator iter(set); iter; iter++) { + if (!stores[*iter].append(*def)) + return false; + } + + if (JitSpewEnabled(JitSpew_Alias)) { + Fprinter& out = JitSpewPrinter(); + out.printf("Processing store "); + def->printName(out); + out.printf(" (flags %x)\n", set.flags()); + } + } else { + // Find the most recent store on which this instruction depends. + MInstruction* lastStore = firstIns; + + for (AliasSetIterator iter(set); iter; iter++) { + MInstructionVector& aliasedStores = stores[*iter]; + for (int i = aliasedStores.length() - 1; i >= 0; i--) { + MInstruction* store = aliasedStores[i]; + if (genericMightAlias(*def, store) != MDefinition::AliasType::NoAlias && + def->mightAlias(store) != MDefinition::AliasType::NoAlias && + BlockMightReach(store->block(), *block)) + { + if (lastStore->id() < store->id()) + lastStore = store; + break; + } + } + } + + def->setDependency(lastStore); + IonSpewDependency(*def, lastStore, "depends", ""); + + // If the last store was before the current loop, we assume this load + // is loop invariant. If a later instruction writes to the same location, + // we will fix this at the end of the loop. + if (loop_ && lastStore->id() < loop_->firstInstruction()->id()) { + if (!loop_->addInvariantLoad(*def)) + return false; + } + } + } + + // Renumber the last instruction, as the analysis depends on this and the order. + block->lastIns()->setId(newId++); + + if (block->isLoopBackedge()) { + MOZ_ASSERT(loop_->loopHeader() == block->loopHeaderOfBackedge()); + JitSpew(JitSpew_Alias, "Processing loop backedge %d (header %d)", block->id(), + loop_->loopHeader()->id()); + LoopAliasInfo* outerLoop = loop_->outer(); + MInstruction* firstLoopIns = *loop_->loopHeader()->begin(); + + const MInstructionVector& invariant = loop_->invariantLoads(); + + for (unsigned i = 0; i < invariant.length(); i++) { + MInstruction* ins = invariant[i]; + AliasSet set = ins->getAliasSet(); + MOZ_ASSERT(set.isLoad()); + + bool hasAlias = false; + for (AliasSetIterator iter(set); iter; iter++) { + MInstructionVector& aliasedStores = stores[*iter]; + for (int i = aliasedStores.length() - 1;; i--) { + MInstruction* store = aliasedStores[i]; + if (store->id() < firstLoopIns->id()) + break; + if (genericMightAlias(ins, store) != MDefinition::AliasType::NoAlias && + ins->mightAlias(store) != MDefinition::AliasType::NoAlias) + { + hasAlias = true; + IonSpewDependency(ins, store, "aliases", "store in loop body"); + break; + } + } + if (hasAlias) + break; + } + + if (hasAlias) { + // This instruction depends on stores inside the loop body. Mark it as having a + // dependency on the last instruction of the loop header. The last instruction is a + // control instruction and these are never hoisted. + MControlInstruction* controlIns = loop_->loopHeader()->lastIns(); + IonSpewDependency(ins, controlIns, "depends", "due to stores in loop body"); + ins->setDependency(controlIns); + } else { + IonSpewAliasInfo("Load", ins, "does not depend on any stores in this loop"); + + if (outerLoop && ins->dependency()->id() < outerLoop->firstInstruction()->id()) { + IonSpewAliasInfo("Load", ins, "may be invariant in outer loop"); + if (!outerLoop->addInvariantLoad(ins)) + return false; + } + } + } + loop_ = loop_->outer(); + } + } + + spewDependencyList(); + + MOZ_ASSERT(loop_ == nullptr); + return true; +} diff --git a/js/src/jit/AliasAnalysis.h b/js/src/jit/AliasAnalysis.h new file mode 100644 index 000000000..9d9dabc17 --- /dev/null +++ b/js/src/jit/AliasAnalysis.h @@ -0,0 +1,31 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_AliasAnalysis_h +#define jit_AliasAnalysis_h + +#include "jit/AliasAnalysisShared.h" +#include "jit/MIR.h" +#include "jit/MIRGraph.h" + +namespace js { +namespace jit { + +class LoopAliasInfo; + +class AliasAnalysis : public AliasAnalysisShared +{ + LoopAliasInfo* loop_; + + public: + AliasAnalysis(MIRGenerator* mir, MIRGraph& graph); + MOZ_MUST_USE bool analyze() override; +}; + +} // namespace jit +} // namespace js + +#endif /* jit_AliasAnalysis_h */ diff --git a/js/src/jit/AliasAnalysisShared.cpp b/js/src/jit/AliasAnalysisShared.cpp new file mode 100644 index 000000000..ae28327ca --- /dev/null +++ b/js/src/jit/AliasAnalysisShared.cpp @@ -0,0 +1,188 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/AliasAnalysisShared.h" + +#include "jit/MIR.h" + +namespace js { +namespace jit { + +void +AliasAnalysisShared::spewDependencyList() +{ +#ifdef JS_JITSPEW + if (JitSpewEnabled(JitSpew_AliasSummaries)) { + Fprinter &print = JitSpewPrinter(); + JitSpewHeader(JitSpew_AliasSummaries); + print.printf("Dependency list for other passes:\n"); + + for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { + for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns())); + def != end; + ++def) + { + if (!def->dependency()) + continue; + if (!def->getAliasSet().isLoad()) + continue; + + JitSpewHeader(JitSpew_AliasSummaries); + print.printf(" "); + MDefinition::PrintOpcodeName(print, def->op()); + print.printf("%d marked depending on ", def->id()); + MDefinition::PrintOpcodeName(print, def->dependency()->op()); + print.printf("%d\n", def->dependency()->id()); + } + } + } +#endif +} + +// Unwrap any slot or element to its corresponding object. +static inline const MDefinition* +MaybeUnwrap(const MDefinition* object) +{ + + while (object->isSlots() || object->isElements() || object->isConvertElementsToDoubles()) { + MOZ_ASSERT(object->numOperands() == 1); + object = object->getOperand(0); + } + + if (object->isTypedArrayElements()) + return nullptr; + if (object->isTypedObjectElements()) + return nullptr; + if (object->isConstantElements()) + return nullptr; + + return object; +} + +// Get the object of any load/store. Returns nullptr if not tied to +// an object. +static inline const MDefinition* +GetObject(const MDefinition* ins) +{ + if (!ins->getAliasSet().isStore() && !ins->getAliasSet().isLoad()) + return nullptr; + + // Note: only return the object if that objects owns that property. + // I.e. the poperty isn't on the prototype chain. + const MDefinition* object = nullptr; + switch (ins->op()) { + case MDefinition::Op_InitializedLength: + case MDefinition::Op_LoadElement: + case MDefinition::Op_LoadUnboxedScalar: + case MDefinition::Op_LoadUnboxedObjectOrNull: + case MDefinition::Op_LoadUnboxedString: + case MDefinition::Op_StoreElement: + case MDefinition::Op_StoreUnboxedObjectOrNull: + case MDefinition::Op_StoreUnboxedString: + case MDefinition::Op_StoreUnboxedScalar: + case MDefinition::Op_SetInitializedLength: + case MDefinition::Op_ArrayLength: + case MDefinition::Op_SetArrayLength: + case MDefinition::Op_StoreElementHole: + case MDefinition::Op_FallibleStoreElement: + case MDefinition::Op_TypedObjectDescr: + case MDefinition::Op_Slots: + case MDefinition::Op_Elements: + case MDefinition::Op_MaybeCopyElementsForWrite: + case MDefinition::Op_MaybeToDoubleElement: + case MDefinition::Op_UnboxedArrayLength: + case MDefinition::Op_UnboxedArrayInitializedLength: + case MDefinition::Op_IncrementUnboxedArrayInitializedLength: + case MDefinition::Op_SetUnboxedArrayInitializedLength: + case MDefinition::Op_TypedArrayLength: + case MDefinition::Op_SetTypedObjectOffset: + case MDefinition::Op_SetDisjointTypedElements: + case MDefinition::Op_ArrayPopShift: + case MDefinition::Op_ArrayPush: + case MDefinition::Op_ArraySlice: + case MDefinition::Op_LoadTypedArrayElementHole: + case MDefinition::Op_StoreTypedArrayElementHole: + case MDefinition::Op_LoadFixedSlot: + case MDefinition::Op_LoadFixedSlotAndUnbox: + case MDefinition::Op_StoreFixedSlot: + case MDefinition::Op_GetPropertyPolymorphic: + case MDefinition::Op_SetPropertyPolymorphic: + case MDefinition::Op_GuardShape: + case MDefinition::Op_GuardReceiverPolymorphic: + case MDefinition::Op_GuardObjectGroup: + case MDefinition::Op_GuardObjectIdentity: + case MDefinition::Op_GuardClass: + case MDefinition::Op_GuardUnboxedExpando: + case MDefinition::Op_LoadUnboxedExpando: + case MDefinition::Op_LoadSlot: + case MDefinition::Op_StoreSlot: + case MDefinition::Op_InArray: + case MDefinition::Op_LoadElementHole: + case MDefinition::Op_TypedArrayElements: + case MDefinition::Op_TypedObjectElements: + object = ins->getOperand(0); + break; + case MDefinition::Op_GetPropertyCache: + case MDefinition::Op_LoadTypedArrayElementStatic: + case MDefinition::Op_StoreTypedArrayElementStatic: + case MDefinition::Op_GetDOMProperty: + case MDefinition::Op_GetDOMMember: + case MDefinition::Op_Call: + case MDefinition::Op_Compare: + case MDefinition::Op_GetArgumentsObjectArg: + case MDefinition::Op_SetArgumentsObjectArg: + case MDefinition::Op_GetFrameArgument: + case MDefinition::Op_SetFrameArgument: + case MDefinition::Op_CompareExchangeTypedArrayElement: + case MDefinition::Op_AtomicExchangeTypedArrayElement: + case MDefinition::Op_AtomicTypedArrayElementBinop: + case MDefinition::Op_AsmJSLoadHeap: + case MDefinition::Op_AsmJSStoreHeap: + case MDefinition::Op_WasmLoad: + case MDefinition::Op_WasmStore: + case MDefinition::Op_AsmJSCompareExchangeHeap: + case MDefinition::Op_AsmJSAtomicBinopHeap: + case MDefinition::Op_WasmLoadGlobalVar: + case MDefinition::Op_WasmStoreGlobalVar: + case MDefinition::Op_ArrayJoin: + return nullptr; + default: +#ifdef DEBUG + // Crash when the default aliasSet is overriden, but when not added in the list above. + if (!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any) + MOZ_CRASH("Overridden getAliasSet without updating AliasAnalysisShared GetObject"); +#endif + + return nullptr; + } + + MOZ_ASSERT(!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any); + object = MaybeUnwrap(object); + MOZ_ASSERT_IF(object, object->type() == MIRType::Object); + return object; +} + +// Generic comparing if a load aliases a store using TI information. +MDefinition::AliasType +AliasAnalysisShared::genericMightAlias(const MDefinition* load, const MDefinition* store) +{ + const MDefinition* loadObject = GetObject(load); + const MDefinition* storeObject = GetObject(store); + if (!loadObject || !storeObject) + return MDefinition::AliasType::MayAlias; + + if (!loadObject->resultTypeSet() || !storeObject->resultTypeSet()) + return MDefinition::AliasType::MayAlias; + + if (loadObject->resultTypeSet()->objectsIntersect(storeObject->resultTypeSet())) + return MDefinition::AliasType::MayAlias; + + return MDefinition::AliasType::NoAlias; +} + + +} // namespace jit +} // namespace js diff --git a/js/src/jit/AliasAnalysisShared.h b/js/src/jit/AliasAnalysisShared.h new file mode 100644 index 000000000..dc19bdb16 --- /dev/null +++ b/js/src/jit/AliasAnalysisShared.h @@ -0,0 +1,81 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_AliasAnalysisShared_h +#define jit_AliasAnalysisShared_h + +#include "jit/MIR.h" +#include "jit/MIRGraph.h" + +namespace js { +namespace jit { + +class MIRGraph; + +class AliasAnalysisShared +{ + protected: + MIRGenerator* mir; + MIRGraph& graph_; + + public: + AliasAnalysisShared(MIRGenerator* mir, MIRGraph& graph) + : mir(mir), + graph_(graph) + {} + + virtual MOZ_MUST_USE bool analyze() { + return true; + } + + static MDefinition::AliasType genericMightAlias(const MDefinition* load, + const MDefinition* store); + + + protected: + void spewDependencyList(); + + TempAllocator& alloc() const { + return graph_.alloc(); + } +}; + +// Iterates over the flags in an AliasSet. +class AliasSetIterator +{ + private: + uint32_t flags; + unsigned pos; + + public: + explicit AliasSetIterator(AliasSet set) + : flags(set.flags()), pos(0) + { + while (flags && (flags & 1) == 0) { + flags >>= 1; + pos++; + } + } + AliasSetIterator& operator ++(int) { + do { + flags >>= 1; + pos++; + } while (flags && (flags & 1) == 0); + return *this; + } + explicit operator bool() const { + return !!flags; + } + unsigned operator*() const { + MOZ_ASSERT(pos < AliasSet::NumCategories); + return pos; + } +}; + +} // namespace jit +} // namespace js + +#endif /* jit_AliasAnalysisShared_h */ diff --git a/js/src/jit/AlignmentMaskAnalysis.cpp b/js/src/jit/AlignmentMaskAnalysis.cpp new file mode 100644 index 000000000..d4fefec07 --- /dev/null +++ b/js/src/jit/AlignmentMaskAnalysis.cpp @@ -0,0 +1,94 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/AlignmentMaskAnalysis.h" +#include "jit/MIR.h" +#include "jit/MIRGraph.h" + +using namespace js; +using namespace jit; + +static bool +IsAlignmentMask(uint32_t m) +{ + // Test whether m is just leading ones and trailing zeros. + return (-m & ~m) == 0; +} + +static void +AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph) +{ + // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result, + // since the users of the BitAnd include heap accesses. This will expose + // the redundancy for GVN when expressions like this: + // a&m + // (a+1)&m, + // (a+2)&m, + // are transformed into this: + // a&m + // (a&m)+1 + // (a&m)+2 + // and it will allow the constants to be folded by the + // EffectiveAddressAnalysis pass. + // + // Putting the add on the outside might seem like it exposes other users of + // the expression to the possibility of i32 overflow, if we aren't in wasm + // and they aren't naturally truncating. However, since we use MAdd::New + // with MIRType::Int32, we make sure that the value is truncated, just as it + // would be by the MBitAnd. + + MOZ_ASSERT(IsCompilingWasm()); + + if (!ptr->isBitAnd()) + return; + + MDefinition* lhs = ptr->toBitAnd()->getOperand(0); + MDefinition* rhs = ptr->toBitAnd()->getOperand(1); + if (lhs->isConstant()) + mozilla::Swap(lhs, rhs); + if (!lhs->isAdd() || !rhs->isConstant()) + return; + + MDefinition* op0 = lhs->toAdd()->getOperand(0); + MDefinition* op1 = lhs->toAdd()->getOperand(1); + if (op0->isConstant()) + mozilla::Swap(op0, op1); + if (!op1->isConstant()) + return; + + uint32_t i = op1->toConstant()->toInt32(); + uint32_t m = rhs->toConstant()->toInt32(); + if (!IsAlignmentMask(m) || (i & m) != i) + return; + + // The pattern was matched! Produce the replacement expression. + MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32); + ptr->block()->insertBefore(ptr->toBitAnd(), and_); + MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32); + ptr->block()->insertBefore(ptr->toBitAnd(), add); + ptr->replaceAllUsesWith(add); + ptr->block()->discard(ptr->toBitAnd()); +} + +bool +AlignmentMaskAnalysis::analyze() +{ + for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { + for (MInstructionIterator i = block->begin(); i != block->end(); i++) { + if (!graph_.alloc().ensureBallast()) + return false; + + // Note that we don't check for MAsmJSCompareExchangeHeap + // or MAsmJSAtomicBinopHeap, because the backend and the OOB + // mechanism don't support non-zero offsets for them yet. + if (i->isAsmJSLoadHeap()) + AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_); + else if (i->isAsmJSStoreHeap()) + AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_); + } + } + return true; +} diff --git a/js/src/jit/AlignmentMaskAnalysis.h b/js/src/jit/AlignmentMaskAnalysis.h new file mode 100644 index 000000000..a455f29a2 --- /dev/null +++ b/js/src/jit/AlignmentMaskAnalysis.h @@ -0,0 +1,32 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_AlignmentMaskAnalysis_h +#define jit_AlignmentMaskAnalysis_h + +#include "mozilla/Attributes.h" + +namespace js { +namespace jit { + +class MIRGraph; + +class AlignmentMaskAnalysis +{ + MIRGraph& graph_; + + public: + explicit AlignmentMaskAnalysis(MIRGraph& graph) + : graph_(graph) + {} + + MOZ_MUST_USE bool analyze(); +}; + +} /* namespace jit */ +} /* namespace js */ + +#endif /* jit_AlignmentMaskAnalysis_h */ diff --git a/js/src/jit/AtomicOp.h b/js/src/jit/AtomicOp.h new file mode 100644 index 000000000..9a686cdd7 --- /dev/null +++ b/js/src/jit/AtomicOp.h @@ -0,0 +1,73 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_AtomicOp_h +#define jit_AtomicOp_h + +namespace js { +namespace jit { + +// Types of atomic operation, shared by MIR and LIR. + +enum AtomicOp { + AtomicFetchAddOp, + AtomicFetchSubOp, + AtomicFetchAndOp, + AtomicFetchOrOp, + AtomicFetchXorOp +}; + +// Memory barrier types, shared by MIR and LIR. +// +// MembarSynchronizing is here because some platforms can make the +// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS) +// but there's been no reason to use it yet. + +enum MemoryBarrierBits { + MembarLoadLoad = 1, + MembarLoadStore = 2, + MembarStoreStore = 4, + MembarStoreLoad = 8, + + MembarSynchronizing = 16, + + // For validity testing + MembarNobits = 0, + MembarAllbits = 31, +}; + +static inline constexpr MemoryBarrierBits +operator|(MemoryBarrierBits a, MemoryBarrierBits b) +{ + return MemoryBarrierBits(int(a) | int(b)); +} + +static inline constexpr MemoryBarrierBits +operator&(MemoryBarrierBits a, MemoryBarrierBits b) +{ + return MemoryBarrierBits(int(a) & int(b)); +} + +static inline constexpr MemoryBarrierBits +operator~(MemoryBarrierBits a) +{ + return MemoryBarrierBits(~int(a)); +} + +// Standard barrier bits for a full barrier. +static constexpr MemoryBarrierBits MembarFull = MembarLoadLoad|MembarLoadStore|MembarStoreLoad|MembarStoreStore; + +// Standard sets of barrier bits for atomic loads and stores. +// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more. +static constexpr MemoryBarrierBits MembarBeforeLoad = MembarNobits; +static constexpr MemoryBarrierBits MembarAfterLoad = MembarLoadLoad|MembarLoadStore; +static constexpr MemoryBarrierBits MembarBeforeStore = MembarStoreStore; +static constexpr MemoryBarrierBits MembarAfterStore = MembarStoreLoad; + +} // namespace jit +} // namespace js + +#endif /* jit_AtomicOp_h */ diff --git a/js/src/jit/AtomicOperations.h b/js/src/jit/AtomicOperations.h new file mode 100644 index 000000000..42aee72eb --- /dev/null +++ b/js/src/jit/AtomicOperations.h @@ -0,0 +1,353 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_AtomicOperations_h +#define jit_AtomicOperations_h + +#include "mozilla/Types.h" + +#include "vm/SharedMem.h" + +namespace js { +namespace jit { + +class RegionLock; + +/* + * The atomic operations layer defines types and functions for + * JIT-compatible atomic operation. + * + * The fundamental constraints on the functions are: + * + * - That their realization here MUST be compatible with code the JIT + * generates for its Atomics operations, so that an atomic access + * from the interpreter or runtime - from any C++ code - really is + * atomic relative to a concurrent, compatible atomic access from + * jitted code. That is, these primitives expose JIT-compatible + * atomicity functionality to C++. + * + * - That accesses may race without creating C++ undefined behavior: + * atomic accesses (marked "SeqCst") may race with non-atomic + * accesses (marked "SafeWhenRacy"); overlapping but non-matching, + * and hence incompatible, atomic accesses may race; and non-atomic + * accesses may race. The effects of races need not be predictable, + * so garbage can be produced by a read or written by a write, but + * the effects must be benign: the program must continue to run, and + * only the memory in the union of addresses named in the racing + * accesses may be affected. + * + * The compatibility constraint means that if the JIT makes dynamic + * decisions about how to implement atomic operations then + * corresponding dynamic decisions MUST be made in the implementations + * of the functions below. + * + * The safe-for-races constraint means that by and large, it is hard + * to implement these primitives in C++. See "Implementation notes" + * below. + * + * The "SeqCst" suffix on operations means "sequentially consistent" + * and means such a function's operation must have "sequentially + * consistent" memory ordering. See mfbt/Atomics.h for an explanation + * of this memory ordering. + * + * Note that a "SafeWhenRacy" access does not provide the atomicity of + * a "relaxed atomic" access: it can read or write garbage if there's + * a race. + * + * + * Implementation notes. + * + * It's not a requirement that these functions be inlined; performance + * is not a great concern. On some platforms these functions may call + * out to code that's generated at run time. + * + * In principle these functions will not be written in C++, thus + * making races defined behavior if all racy accesses from C++ go via + * these functions. (Jitted code will always be safe for races and + * provides the same guarantees as these functions.) + * + * The appropriate implementations will be platform-specific and + * there are some obvious implementation strategies to choose + * from, sometimes a combination is appropriate: + * + * - generating the code at run-time with the JIT; + * - hand-written assembler (maybe inline); or + * - using special compiler intrinsics or directives. + * + * Trusting the compiler not to generate code that blows up on a + * race definitely won't work in the presence of TSan, or even of + * optimizing compilers in seemingly-"innocuous" conditions. (See + * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf + * for details.) + */ +class AtomicOperations +{ + friend class RegionLock; + + private: + // The following functions are defined for T = int8_t, uint8_t, + // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t. + + // Atomically read *addr. + template + static inline T loadSeqCst(T* addr); + + // Atomically store val in *addr. + template + static inline void storeSeqCst(T* addr, T val); + + // Atomically store val in *addr and return the old value of *addr. + template + static inline T exchangeSeqCst(T* addr, T val); + + // Atomically check that *addr contains oldval and if so replace it + // with newval, in any case returning the old contents of *addr. + template + static inline T compareExchangeSeqCst(T* addr, T oldval, T newval); + + // The following functions are defined for T = int8_t, uint8_t, + // int16_t, uint16_t, int32_t, uint32_t only. + + // Atomically add, subtract, bitwise-AND, bitwise-OR, or bitwise-XOR + // val into *addr and return the old value of *addr. + template + static inline T fetchAddSeqCst(T* addr, T val); + + template + static inline T fetchSubSeqCst(T* addr, T val); + + template + static inline T fetchAndSeqCst(T* addr, T val); + + template + static inline T fetchOrSeqCst(T* addr, T val); + + template + static inline T fetchXorSeqCst(T* addr, T val); + + // The SafeWhenRacy functions are to be used when C++ code has to access + // memory without synchronization and can't guarantee that there + // won't be a race on the access. + + // Defined for all the integral types as well as for float32 and float64. + template + static inline T loadSafeWhenRacy(T* addr); + + // Defined for all the integral types as well as for float32 and float64. + template + static inline void storeSafeWhenRacy(T* addr, T val); + + // Replacement for memcpy(). + static inline void memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes); + + // Replacement for memmove(). + static inline void memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes); + + public: + // Test lock-freedom for any int32 value. This implements the + // Atomics::isLockFree() operation in the Shared Memory and + // Atomics specification, as follows: + // + // 1, 2, and 4 bytes are always lock free (in SpiderMonkey). + // + // Lock-freedom for 8 bytes is determined by the platform's + // isLockfree8(). However, the spec stipulates that isLockFree(8) + // is true only if there is an integer array that admits atomic + // operations whose BYTES_PER_ELEMENT=8; at the moment (February + // 2016) there are no such arrays. + // + // There is no lock-freedom for any other values on any platform. + static inline bool isLockfree(int32_t n); + + // If the return value is true then a call to the 64-bit (8-byte) + // routines below will work, otherwise those functions will assert in + // debug builds and may crash in release build. (See the code in + // ../arm for an example.) The value of this call does not change + // during execution. + static inline bool isLockfree8(); + + // Execute a full memory barrier (LoadLoad+LoadStore+StoreLoad+StoreStore). + static inline void fenceSeqCst(); + + // All clients should use the APIs that take SharedMem pointers. + // See above for semantics and acceptable types. + + template + static T loadSeqCst(SharedMem addr) { + return loadSeqCst(addr.unwrap()); + } + + template + static void storeSeqCst(SharedMem addr, T val) { + return storeSeqCst(addr.unwrap(), val); + } + + template + static T exchangeSeqCst(SharedMem addr, T val) { + return exchangeSeqCst(addr.unwrap(), val); + } + + template + static T compareExchangeSeqCst(SharedMem addr, T oldval, T newval) { + return compareExchangeSeqCst(addr.unwrap(), oldval, newval); + } + + template + static T fetchAddSeqCst(SharedMem addr, T val) { + return fetchAddSeqCst(addr.unwrap(), val); + } + + template + static T fetchSubSeqCst(SharedMem addr, T val) { + return fetchSubSeqCst(addr.unwrap(), val); + } + + template + static T fetchAndSeqCst(SharedMem addr, T val) { + return fetchAndSeqCst(addr.unwrap(), val); + } + + template + static T fetchOrSeqCst(SharedMem addr, T val) { + return fetchOrSeqCst(addr.unwrap(), val); + } + + template + static T fetchXorSeqCst(SharedMem addr, T val) { + return fetchXorSeqCst(addr.unwrap(), val); + } + + template + static T loadSafeWhenRacy(SharedMem addr) { + return loadSafeWhenRacy(addr.unwrap()); + } + + template + static void storeSafeWhenRacy(SharedMem addr, T val) { + return storeSafeWhenRacy(addr.unwrap(), val); + } + + template + static void memcpySafeWhenRacy(SharedMem dest, SharedMem src, size_t nbytes) { + memcpySafeWhenRacy(dest.template cast().unwrap(), + src.template cast().unwrap(), nbytes); + } + + template + static void memcpySafeWhenRacy(SharedMem dest, T* src, size_t nbytes) { + memcpySafeWhenRacy(dest.template cast().unwrap(), static_cast(src), nbytes); + } + + template + static void memcpySafeWhenRacy(T* dest, SharedMem src, size_t nbytes) { + memcpySafeWhenRacy(static_cast(dest), src.template cast().unwrap(), nbytes); + } + + template + static void memmoveSafeWhenRacy(SharedMem dest, SharedMem src, size_t nbytes) { + memmoveSafeWhenRacy(dest.template cast().unwrap(), + src.template cast().unwrap(), nbytes); + } + + template + static void podCopySafeWhenRacy(SharedMem dest, SharedMem src, size_t nelem) { + memcpySafeWhenRacy(dest, src, nelem * sizeof(T)); + } + + template + static void podMoveSafeWhenRacy(SharedMem dest, SharedMem src, size_t nelem) { + memmoveSafeWhenRacy(dest, src, nelem * sizeof(T)); + } +}; + +/* A data type representing a lock on some region of a + * SharedArrayRawBuffer's memory, to be used only when the hardware + * does not provide necessary atomicity (eg, float64 access on ARMv6 + * and some ARMv7 systems). + */ +class RegionLock +{ + public: + RegionLock() : spinlock(0) {} + + /* Addr is the address to be locked, nbytes the number of bytes we + * need to lock. The lock that is taken may cover a larger range + * of bytes. + */ + template + void acquire(void* addr); + + /* Addr is the address to be unlocked, nbytes the number of bytes + * we need to unlock. The lock must be held by the calling thread, + * at the given address and for the number of bytes. + */ + template + void release(void* addr); + + private: + /* For now, a simple spinlock that covers the entire buffer. */ + uint32_t spinlock; +}; + +inline bool +AtomicOperations::isLockfree(int32_t size) +{ + // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp. + + switch (size) { + case 1: + return true; + case 2: + return true; + case 4: + // The spec requires Atomics.isLockFree(4) to return true. + return true; + case 8: + // The spec requires Atomics.isLockFree(n) to return false + // unless n is the BYTES_PER_ELEMENT value of some integer + // TypedArray that admits atomic operations. At the time of + // writing (February 2016) there is no such array with n=8. + // return AtomicOperations::isLockfree8(); + return false; + default: + return false; + } +} + +} // namespace jit +} // namespace js + +#if defined(JS_CODEGEN_ARM) +# include "jit/arm/AtomicOperations-arm.h" +#elif defined(JS_CODEGEN_ARM64) +# include "jit/arm64/AtomicOperations-arm64.h" +#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) +# include "jit/mips-shared/AtomicOperations-mips-shared.h" +#elif defined(__ppc__) || defined(__PPC__) +# include "jit/none/AtomicOperations-ppc.h" +#elif defined(__sparc__) +# include "jit/none/AtomicOperations-sparc.h" +#elif defined(JS_CODEGEN_NONE) + // You can disable the JIT with --disable-ion but you must still + // provide the atomic operations that will be used by the JS engine. + // When the JIT is disabled the operations are simply safe-for-races + // C++ realizations of atomics. These operations cannot be written + // in portable C++, hence the default here is to crash. See the + // top of the file for more guidance. +# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__) +# include "jit/none/AtomicOperations-ppc.h" +# elif defined(__aarch64__) +# include "jit/arm64/AtomicOperations-arm64.h" +# else +# include "jit/none/AtomicOperations-none.h" // These MOZ_CRASH() always +# endif +#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) +# include "jit/x86-shared/AtomicOperations-x86-shared.h" +#else +# error "Atomic operations must be defined for this platform" +#endif + +#endif // jit_AtomicOperations_h diff --git a/js/src/jit/BacktrackingAllocator.cpp b/js/src/jit/BacktrackingAllocator.cpp new file mode 100644 index 000000000..94ef25785 --- /dev/null +++ b/js/src/jit/BacktrackingAllocator.cpp @@ -0,0 +1,3124 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/BacktrackingAllocator.h" + +#include "jsprf.h" + +#include "jit/BitSet.h" + +using namespace js; +using namespace js::jit; + +using mozilla::DebugOnly; + +///////////////////////////////////////////////////////////////////// +// Utility +///////////////////////////////////////////////////////////////////// + +static inline bool +SortBefore(UsePosition* a, UsePosition* b) +{ + return a->pos <= b->pos; +} + +static inline bool +SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b) +{ + LiveRange* rangea = LiveRange::get(a); + LiveRange* rangeb = LiveRange::get(b); + MOZ_ASSERT(!rangea->intersects(rangeb)); + return rangea->from() < rangeb->from(); +} + +static inline bool +SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b) +{ + return LiveRange::get(a)->from() <= LiveRange::get(b)->from(); +} + +template +static inline void +InsertSortedList(InlineForwardList &list, T* value) +{ + if (list.empty()) { + list.pushFront(value); + return; + } + + if (SortBefore(list.back(), value)) { + list.pushBack(value); + return; + } + + T* prev = nullptr; + for (InlineForwardListIterator iter = list.begin(); iter; iter++) { + if (SortBefore(value, *iter)) + break; + prev = *iter; + } + + if (prev) + list.insertAfter(prev, value); + else + list.pushFront(value); +} + +///////////////////////////////////////////////////////////////////// +// LiveRange +///////////////////////////////////////////////////////////////////// + +void +LiveRange::addUse(UsePosition* use) +{ + MOZ_ASSERT(covers(use->pos)); + InsertSortedList(uses_, use); +} + +void +LiveRange::distributeUses(LiveRange* other) +{ + MOZ_ASSERT(other->vreg() == vreg()); + MOZ_ASSERT(this != other); + + // Move over all uses which fit in |other|'s boundaries. + for (UsePositionIterator iter = usesBegin(); iter; ) { + UsePosition* use = *iter; + if (other->covers(use->pos)) { + uses_.removeAndIncrement(iter); + other->addUse(use); + } else { + iter++; + } + } + + // Distribute the definition to |other| as well, if possible. + if (hasDefinition() && from() == other->from()) + other->setHasDefinition(); +} + +bool +LiveRange::contains(LiveRange* other) const +{ + return from() <= other->from() && to() >= other->to(); +} + +void +LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const +{ + MOZ_ASSERT(pre->empty() && inside->empty() && post->empty()); + + CodePosition innerFrom = from(); + if (from() < other->from()) { + if (to() < other->from()) { + *pre = range_; + return; + } + *pre = Range(from(), other->from()); + innerFrom = other->from(); + } + + CodePosition innerTo = to(); + if (to() > other->to()) { + if (from() >= other->to()) { + *post = range_; + return; + } + *post = Range(other->to(), to()); + innerTo = other->to(); + } + + if (innerFrom != innerTo) + *inside = Range(innerFrom, innerTo); +} + +bool +LiveRange::intersects(LiveRange* other) const +{ + Range pre, inside, post; + intersect(other, &pre, &inside, &post); + return !inside.empty(); +} + +///////////////////////////////////////////////////////////////////// +// SpillSet +///////////////////////////////////////////////////////////////////// + +void +SpillSet::setAllocation(LAllocation alloc) +{ + for (size_t i = 0; i < numSpilledBundles(); i++) + spilledBundle(i)->setAllocation(alloc); +} + +///////////////////////////////////////////////////////////////////// +// LiveBundle +///////////////////////////////////////////////////////////////////// + +#ifdef DEBUG +size_t +LiveBundle::numRanges() const +{ + size_t count = 0; + for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) + count++; + return count; +} +#endif // DEBUG + +LiveRange* +LiveBundle::rangeFor(CodePosition pos) const +{ + for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (range->covers(pos)) + return range; + } + return nullptr; +} + +void +LiveBundle::addRange(LiveRange* range) +{ + MOZ_ASSERT(!range->bundle()); + range->setBundle(this); + InsertSortedList(ranges_, &range->bundleLink); +} + +bool +LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to) +{ + LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to); + if (!range) + return false; + addRange(range); + return true; +} + +bool +LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange, + CodePosition from, CodePosition to) +{ + LiveRange* range = LiveRange::FallibleNew(alloc, oldRange->vreg(), from, to); + if (!range) + return false; + addRange(range); + oldRange->distributeUses(range); + return true; +} + +LiveRange* +LiveBundle::popFirstRange() +{ + LiveRange::BundleLinkIterator iter = rangesBegin(); + if (!iter) + return nullptr; + + LiveRange* range = LiveRange::get(*iter); + ranges_.removeAt(iter); + + range->setBundle(nullptr); + return range; +} + +void +LiveBundle::removeRange(LiveRange* range) +{ + for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) { + LiveRange* existing = LiveRange::get(*iter); + if (existing == range) { + ranges_.removeAt(iter); + return; + } + } + MOZ_CRASH(); +} + +///////////////////////////////////////////////////////////////////// +// VirtualRegister +///////////////////////////////////////////////////////////////////// + +bool +VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to) +{ + MOZ_ASSERT(from < to); + + // Mark [from,to) as a live range for this register during the initial + // liveness analysis, coalescing with any existing overlapping ranges. + + LiveRange* prev = nullptr; + LiveRange* merged = nullptr; + for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) { + LiveRange* existing = LiveRange::get(*iter); + + if (from > existing->to()) { + // The new range should go after this one. + prev = existing; + iter++; + continue; + } + + if (to.next() < existing->from()) { + // The new range should go before this one. + break; + } + + if (!merged) { + // This is the first old range we've found that overlaps the new + // range. Extend this one to cover its union with the new range. + merged = existing; + + if (from < existing->from()) + existing->setFrom(from); + if (to > existing->to()) + existing->setTo(to); + + // Continue searching to see if any other old ranges can be + // coalesced with the new merged range. + iter++; + continue; + } + + // Coalesce this range into the previous range we merged into. + MOZ_ASSERT(existing->from() >= merged->from()); + if (existing->to() > merged->to()) + merged->setTo(existing->to()); + + MOZ_ASSERT(!existing->hasDefinition()); + existing->distributeUses(merged); + MOZ_ASSERT(!existing->hasUses()); + + ranges_.removeAndIncrement(iter); + } + + if (!merged) { + // The new range does not overlap any existing range for the vreg. + LiveRange* range = LiveRange::FallibleNew(alloc, vreg(), from, to); + if (!range) + return false; + + if (prev) + ranges_.insertAfter(&prev->registerLink, &range->registerLink); + else + ranges_.pushFront(&range->registerLink); + } + + return true; +} + +void +VirtualRegister::addInitialUse(UsePosition* use) +{ + LiveRange::get(*rangesBegin())->addUse(use); +} + +void +VirtualRegister::setInitialDefinition(CodePosition from) +{ + LiveRange* first = LiveRange::get(*rangesBegin()); + MOZ_ASSERT(from >= first->from()); + first->setFrom(from); + first->setHasDefinition(); +} + +LiveRange* +VirtualRegister::rangeFor(CodePosition pos, bool preferRegister /* = false */) const +{ + LiveRange* found = nullptr; + for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (range->covers(pos)) { + if (!preferRegister || range->bundle()->allocation().isRegister()) + return range; + if (!found) + found = range; + } + } + return found; +} + +void +VirtualRegister::addRange(LiveRange* range) +{ + InsertSortedList(ranges_, &range->registerLink); +} + +void +VirtualRegister::removeRange(LiveRange* range) +{ + for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) { + LiveRange* existing = LiveRange::get(*iter); + if (existing == range) { + ranges_.removeAt(iter); + return; + } + } + MOZ_CRASH(); +} + +///////////////////////////////////////////////////////////////////// +// BacktrackingAllocator +///////////////////////////////////////////////////////////////////// + +// This function pre-allocates and initializes as much global state as possible +// to avoid littering the algorithms with memory management cruft. +bool +BacktrackingAllocator::init() +{ + if (!RegisterAllocator::init()) + return false; + + liveIn = mir->allocate(graph.numBlockIds()); + if (!liveIn) + return false; + + size_t numVregs = graph.numVirtualRegisters(); + if (!vregs.init(mir->alloc(), numVregs)) + return false; + memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs); + for (uint32_t i = 0; i < numVregs; i++) + new(&vregs[i]) VirtualRegister(); + + // Build virtual register objects. + for (size_t i = 0; i < graph.numBlocks(); i++) { + if (mir->shouldCancel("Create data structures (main loop)")) + return false; + + LBlock* block = graph.getBlock(i); + for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { + if (mir->shouldCancel("Create data structures (inner loop 1)")) + return false; + + for (size_t j = 0; j < ins->numDefs(); j++) { + LDefinition* def = ins->getDef(j); + if (def->isBogusTemp()) + continue; + vreg(def).init(*ins, def, /* isTemp = */ false); + } + + for (size_t j = 0; j < ins->numTemps(); j++) { + LDefinition* def = ins->getTemp(j); + if (def->isBogusTemp()) + continue; + vreg(def).init(*ins, def, /* isTemp = */ true); + } + } + for (size_t j = 0; j < block->numPhis(); j++) { + LPhi* phi = block->getPhi(j); + LDefinition* def = phi->getDef(0); + vreg(def).init(phi, def, /* isTemp = */ false); + } + } + + LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet()); + while (!remainingRegisters.emptyGeneral()) { + AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral()); + registers[reg.code()].allocatable = true; + } + while (!remainingRegisters.emptyFloat()) { + AnyRegister reg = AnyRegister(remainingRegisters.takeAnyFloat()); + registers[reg.code()].allocatable = true; + } + + LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc(); + for (size_t i = 0; i < AnyRegister::Total; i++) { + registers[i].reg = AnyRegister::FromCode(i); + registers[i].allocations.setAllocator(lifoAlloc); + } + + hotcode.setAllocator(lifoAlloc); + callRanges.setAllocator(lifoAlloc); + + // Partition the graph into hot and cold sections, for helping to make + // splitting decisions. Since we don't have any profiling data this is a + // crapshoot, so just mark the bodies of inner loops as hot and everything + // else as cold. + + LBlock* backedge = nullptr; + for (size_t i = 0; i < graph.numBlocks(); i++) { + LBlock* block = graph.getBlock(i); + + // If we see a loop header, mark the backedge so we know when we have + // hit the end of the loop. Don't process the loop immediately, so that + // if there is an inner loop we will ignore the outer backedge. + if (block->mir()->isLoopHeader()) + backedge = block->mir()->backedge()->lir(); + + if (block == backedge) { + LBlock* header = block->mir()->loopHeaderOfBackedge()->lir(); + LiveRange* range = LiveRange::FallibleNew(alloc(), 0, entryOf(header), + exitOf(block).next()); + if (!range || !hotcode.insert(range)) + return false; + } + } + + return true; +} + +bool +BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to) +{ + LiveRange* range = LiveRange::FallibleNew(alloc(), 0, from, to); + return range && registers[reg.code()].allocations.insert(range); +} + +#ifdef DEBUG +// Returns true iff ins has a def/temp reusing the input allocation. +static bool +IsInputReused(LInstruction* ins, LUse* use) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) + { + return true; + } + } + + for (size_t i = 0; i < ins->numTemps(); i++) { + if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) + { + return true; + } + } + + return false; +} +#endif + +/* + * This function builds up liveness ranges for all virtual registers + * defined in the function. + * + * The algorithm is based on the one published in: + * + * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on + * SSA Form." Proceedings of the International Symposium on Code Generation + * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF. + * + * The algorithm operates on blocks ordered such that dominators of a block + * are before the block itself, and such that all blocks of a loop are + * contiguous. It proceeds backwards over the instructions in this order, + * marking registers live at their uses, ending their live ranges at + * definitions, and recording which registers are live at the top of every + * block. To deal with loop backedges, registers live at the beginning of + * a loop gain a range covering the entire loop. + */ +bool +BacktrackingAllocator::buildLivenessInfo() +{ + JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis"); + + Vector loopWorkList; + BitSet loopDone(graph.numBlockIds()); + if (!loopDone.init(alloc())) + return false; + + for (size_t i = graph.numBlocks(); i > 0; i--) { + if (mir->shouldCancel("Build Liveness Info (main loop)")) + return false; + + LBlock* block = graph.getBlock(i - 1); + MBasicBlock* mblock = block->mir(); + + BitSet& live = liveIn[mblock->id()]; + new (&live) BitSet(graph.numVirtualRegisters()); + if (!live.init(alloc())) + return false; + + // Propagate liveIn from our successors to us. + for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { + MBasicBlock* successor = mblock->lastIns()->getSuccessor(i); + // Skip backedges, as we fix them up at the loop header. + if (mblock->id() < successor->id()) + live.insertAll(liveIn[successor->id()]); + } + + // Add successor phis. + if (mblock->successorWithPhis()) { + LBlock* phiSuccessor = mblock->successorWithPhis()->lir(); + for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { + LPhi* phi = phiSuccessor->getPhi(j); + LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor()); + uint32_t reg = use->toUse()->virtualRegister(); + live.insert(reg); + vreg(use).setUsedByPhi(); + } + } + + // Registers are assumed alive for the entire block, a define shortens + // the range to the point of definition. + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { + if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next())) + return false; + } + + // Shorten the front end of ranges for live variables to their point of + // definition, if found. + for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { + // Calls may clobber registers, so force a spill and reload around the callsite. + if (ins->isCall()) { + for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); ++iter) { + bool found = false; + for (size_t i = 0; i < ins->numDefs(); i++) { + if (ins->getDef(i)->isFixed() && + ins->getDef(i)->output()->aliases(LAllocation(*iter))) { + found = true; + break; + } + } + // If this register doesn't have an explicit def above, mark + // it as clobbered by the call unless it is actually + // call-preserved. + if (!found && !ins->isCallPreserved(*iter)) { + if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next())) + return false; + } + } + + CallRange* callRange = + new(alloc().fallible()) CallRange(outputOf(*ins), outputOf(*ins).next()); + if (!callRange) + return false; + + callRangesList.pushFront(callRange); + if (!callRanges.insert(callRange)) + return false; + } + DebugOnly hasDoubleDef = false; + DebugOnly hasFloat32Def = false; + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->isBogusTemp()) + continue; +#ifdef DEBUG + if (def->type() == LDefinition::DOUBLE) + hasDoubleDef = true; + if (def->type() == LDefinition::FLOAT32) + hasFloat32Def = true; +#endif + CodePosition from = outputOf(*ins); + + if (def->policy() == LDefinition::MUST_REUSE_INPUT) { + // MUST_REUSE_INPUT is implemented by allocating an output + // register and moving the input to it. Register hints are + // used to avoid unnecessary moves. We give the input an + // LUse::ANY policy to avoid allocating a register for the + // input. + LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse(); + MOZ_ASSERT(inputUse->policy() == LUse::REGISTER); + MOZ_ASSERT(inputUse->usedAtStart()); + *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); + } + + if (!vreg(def).addInitialRange(alloc(), from, from.next())) + return false; + vreg(def).setInitialDefinition(from); + live.remove(def->virtualRegister()); + } + + for (size_t i = 0; i < ins->numTemps(); i++) { + LDefinition* temp = ins->getTemp(i); + if (temp->isBogusTemp()) + continue; + + // Normally temps are considered to cover both the input + // and output of the associated instruction. In some cases + // though we want to use a fixed register as both an input + // and clobbered register in the instruction, so watch for + // this and shorten the temp to cover only the output. + CodePosition from = inputOf(*ins); + if (temp->policy() == LDefinition::FIXED) { + AnyRegister reg = temp->output()->toRegister(); + for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { + if (alloc->isUse()) { + LUse* use = alloc->toUse(); + if (use->isFixedRegister()) { + if (GetFixedRegister(vreg(use).def(), use) == reg) + from = outputOf(*ins); + } + } + } + } + + CodePosition to = + ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); + + if (!vreg(temp).addInitialRange(alloc(), from, to)) + return false; + vreg(temp).setInitialDefinition(from); + } + + DebugOnly hasUseRegister = false; + DebugOnly hasUseRegisterAtStart = false; + + for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { + if (inputAlloc->isUse()) { + LUse* use = inputAlloc->toUse(); + + // Call uses should always be at-start, since calls use all + // registers. + MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), + use->usedAtStart()); + +#ifdef DEBUG + // Don't allow at-start call uses if there are temps of the same kind, + // so that we don't assign the same register. Only allow this when the + // use and temp are fixed registers, as they can't alias. + if (ins->isCall() && use->usedAtStart()) { + for (size_t i = 0; i < ins->numTemps(); i++) { + MOZ_ASSERT(vreg(ins->getTemp(i)).type() != vreg(use).type() || + (use->isFixedRegister() && ins->getTemp(i)->isFixed())); + } + } + + // If there are both useRegisterAtStart(x) and useRegister(y) + // uses, we may assign the same register to both operands + // (bug 772830). Don't allow this for now. + if (use->policy() == LUse::REGISTER) { + if (use->usedAtStart()) { + if (!IsInputReused(*ins, use)) + hasUseRegisterAtStart = true; + } else { + hasUseRegister = true; + } + } + MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); +#endif + + // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. + if (use->policy() == LUse::RECOVERED_INPUT) + continue; + + CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins); + if (use->isFixedRegister()) { + LAllocation reg(AnyRegister::FromCode(use->registerCode())); + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->policy() == LDefinition::FIXED && *def->output() == reg) + to = inputOf(*ins); + } + } + + if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next())) + return false; + UsePosition* usePosition = new(alloc().fallible()) UsePosition(use, to); + if (!usePosition) + return false; + vreg(use).addInitialUse(usePosition); + live.insert(use->virtualRegister()); + } + } + } + + // Phis have simultaneous assignment semantics at block begin, so at + // the beginning of the block we can be sure that liveIn does not + // contain any phi outputs. + for (unsigned int i = 0; i < block->numPhis(); i++) { + LDefinition* def = block->getPhi(i)->getDef(0); + if (live.contains(def->virtualRegister())) { + live.remove(def->virtualRegister()); + } else { + // This is a dead phi, so add a dummy range over all phis. This + // can go away if we have an earlier dead code elimination pass. + CodePosition entryPos = entryOf(block); + if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next())) + return false; + } + } + + if (mblock->isLoopHeader()) { + // A divergence from the published algorithm is required here, as + // our block order does not guarantee that blocks of a loop are + // contiguous. As a result, a single live range spanning the + // loop is not possible. Additionally, we require liveIn in a later + // pass for resolution, so that must also be fixed up here. + MBasicBlock* loopBlock = mblock->backedge(); + while (true) { + // Blocks must already have been visited to have a liveIn set. + MOZ_ASSERT(loopBlock->id() >= mblock->id()); + + // Add a range for this entire loop block + CodePosition from = entryOf(loopBlock->lir()); + CodePosition to = exitOf(loopBlock->lir()).next(); + + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { + if (!vregs[*liveRegId].addInitialRange(alloc(), from, to)) + return false; + } + + // Fix up the liveIn set. + liveIn[loopBlock->id()].insertAll(live); + + // Make sure we don't visit this node again + loopDone.insert(loopBlock->id()); + + // If this is the loop header, any predecessors are either the + // backedge or out of the loop, so skip any predecessors of + // this block + if (loopBlock != mblock) { + for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { + MBasicBlock* pred = loopBlock->getPredecessor(i); + if (loopDone.contains(pred->id())) + continue; + if (!loopWorkList.append(pred)) + return false; + } + } + + // Terminate loop if out of work. + if (loopWorkList.empty()) + break; + + // Grab the next block off the work list, skipping any OSR block. + MBasicBlock* osrBlock = graph.mir().osrBlock(); + while (!loopWorkList.empty()) { + loopBlock = loopWorkList.popCopy(); + if (loopBlock != osrBlock) + break; + } + + // If end is reached without finding a non-OSR block, then no more work items were found. + if (loopBlock == osrBlock) { + MOZ_ASSERT(loopWorkList.empty()); + break; + } + } + + // Clear the done set for other loops + loopDone.clear(); + } + + MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty()); + } + + JitSpew(JitSpew_RegAlloc, "Liveness analysis complete"); + + if (JitSpewEnabled(JitSpew_RegAlloc)) + dumpInstructions(); + + return true; +} + +bool +BacktrackingAllocator::go() +{ + JitSpew(JitSpew_RegAlloc, "Beginning register allocation"); + + if (!init()) + return false; + + if (!buildLivenessInfo()) + return false; + + if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) + return false; + + JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers"); + if (!mergeAndQueueRegisters()) + return false; + + if (JitSpewEnabled(JitSpew_RegAlloc)) + dumpVregs(); + + JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop"); + + // Allocate, spill and split bundles until finished. + while (!allocationQueue.empty()) { + if (mir->shouldCancel("Backtracking Allocation")) + return false; + + QueueItem item = allocationQueue.removeHighest(); + if (!processBundle(mir, item.bundle)) + return false; + } + JitSpew(JitSpew_RegAlloc, "Main allocation loop complete"); + + if (!pickStackSlots()) + return false; + + if (JitSpewEnabled(JitSpew_RegAlloc)) + dumpAllocations(); + + if (!resolveControlFlow()) + return false; + + if (!reifyAllocations()) + return false; + + if (!populateSafepoints()) + return false; + + if (!annotateMoveGroups()) + return false; + + return true; +} + +static bool +IsArgumentSlotDefinition(LDefinition* def) +{ + return def->policy() == LDefinition::FIXED && def->output()->isArgument(); +} + +static bool +IsThisSlotDefinition(LDefinition* def) +{ + return IsArgumentSlotDefinition(def) && + def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value); +} + +bool +BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1) +{ + // See if bundle0 and bundle1 can be merged together. + if (bundle0 == bundle1) + return true; + + // Get a representative virtual register from each bundle. + VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()]; + VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()]; + + if (!reg0.isCompatible(reg1)) + return true; + + // Registers which might spill to the frame's |this| slot can only be + // grouped with other such registers. The frame's |this| slot must always + // hold the |this| value, as required by JitFrame tracing and by the Ion + // constructor calling convention. + if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) { + if (*reg0.def()->output() != *reg1.def()->output()) + return true; + } + + // Registers which might spill to the frame's argument slots can only be + // grouped with other such registers if the frame might access those + // arguments through a lazy arguments object or rest parameter. + if (IsArgumentSlotDefinition(reg0.def()) || IsArgumentSlotDefinition(reg1.def())) { + if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) { + if (*reg0.def()->output() != *reg1.def()->output()) + return true; + } + } + + // Limit the number of times we compare ranges if there are many ranges in + // one of the bundles, to avoid quadratic behavior. + static const size_t MAX_RANGES = 200; + + // Make sure that ranges in the bundles do not overlap. + LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(), iter1 = bundle1->rangesBegin(); + size_t count = 0; + while (iter0 && iter1) { + if (++count >= MAX_RANGES) + return true; + + LiveRange* range0 = LiveRange::get(*iter0); + LiveRange* range1 = LiveRange::get(*iter1); + + if (range0->from() >= range1->to()) + iter1++; + else if (range1->from() >= range0->to()) + iter0++; + else + return true; + } + + // Move all ranges from bundle1 into bundle0. + while (LiveRange* range = bundle1->popFirstRange()) + bundle0->addRange(range); + + return true; +} + +static inline LDefinition* +FindReusingDefOrTemp(LNode* ins, LAllocation* alloc) +{ + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(def->getReusedInput()) == alloc) + return def; + } + for (size_t i = 0; i < ins->numTemps(); i++) { + LDefinition* def = ins->getTemp(i); + if (def->policy() == LDefinition::MUST_REUSE_INPUT && + ins->getOperand(def->getReusedInput()) == alloc) + return def; + } + return nullptr; +} + +static inline size_t +NumReusingDefs(LNode* ins) +{ + size_t num = 0; + for (size_t i = 0; i < ins->numDefs(); i++) { + LDefinition* def = ins->getDef(i); + if (def->policy() == LDefinition::MUST_REUSE_INPUT) + num++; + } + return num; +} + +bool +BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input) +{ + // def is a vreg which reuses input for its output physical register. Try + // to merge ranges for def with those of input if possible, as avoiding + // copies before def's instruction is crucial for generated code quality + // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64). + + if (def.rangeFor(inputOf(def.ins()))) { + MOZ_ASSERT(def.isTemp()); + def.setMustCopyInput(); + return true; + } + + LiveRange* inputRange = input.rangeFor(outputOf(def.ins())); + if (!inputRange) { + // The input is not live after the instruction, either in a safepoint + // for the instruction or in subsequent code. The input and output + // can thus be in the same group. + return tryMergeBundles(def.firstBundle(), input.firstBundle()); + } + + // The input is live afterwards, either in future instructions or in a + // safepoint for the reusing instruction. This is impossible to satisfy + // without copying the input. + // + // It may or may not be better to split the input into two bundles at the + // point of the definition, which may permit merging. One case where it is + // definitely better to split is if the input never has any register uses + // after the instruction. Handle this splitting eagerly. + + LBlock* block = def.ins()->block(); + + // The input's lifetime must end within the same block as the definition, + // otherwise it could live on in phis elsewhere. + if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) { + def.setMustCopyInput(); + return true; + } + + // If we already split the input for some other register, don't make a + // third bundle. + if (inputRange->bundle() != input.firstRange()->bundle()) { + def.setMustCopyInput(); + return true; + } + + // If the input will start out in memory then adding a separate bundle for + // memory uses after the def won't help. + if (input.def()->isFixed() && !input.def()->output()->isRegister()) { + def.setMustCopyInput(); + return true; + } + + // The input cannot have register or reused uses after the definition. + for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) { + if (iter->pos <= inputOf(def.ins())) + continue; + + LUse* use = iter->use(); + if (FindReusingDefOrTemp(insData[iter->pos], use)) { + def.setMustCopyInput(); + return true; + } + if (iter->usePolicy() != LUse::ANY && iter->usePolicy() != LUse::KEEPALIVE) { + def.setMustCopyInput(); + return true; + } + } + + LiveRange* preRange = LiveRange::FallibleNew(alloc(), input.vreg(), + inputRange->from(), outputOf(def.ins())); + if (!preRange) + return false; + + // The new range starts at reg's input position, which means it overlaps + // with the old range at one position. This is what we want, because we + // need to copy the input before the instruction. + LiveRange* postRange = LiveRange::FallibleNew(alloc(), input.vreg(), + inputOf(def.ins()), inputRange->to()); + if (!postRange) + return false; + + inputRange->distributeUses(preRange); + inputRange->distributeUses(postRange); + MOZ_ASSERT(!inputRange->hasUses()); + + JitSpew(JitSpew_RegAlloc, " splitting reused input at %u to try to help grouping", + inputOf(def.ins()).bits()); + + LiveBundle* firstBundle = inputRange->bundle(); + input.removeRange(inputRange); + input.addRange(preRange); + input.addRange(postRange); + + firstBundle->removeRange(inputRange); + firstBundle->addRange(preRange); + + // The new range goes in a separate bundle, where it will be spilled during + // allocation. + LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr); + if (!secondBundle) + return false; + secondBundle->addRange(postRange); + + return tryMergeBundles(def.firstBundle(), input.firstBundle()); +} + +bool +BacktrackingAllocator::mergeAndQueueRegisters() +{ + MOZ_ASSERT(!vregs[0u].hasRanges()); + + // Create a bundle for each register containing all its ranges. + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + if (!reg.hasRanges()) + continue; + + LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr); + if (!bundle) + return false; + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + bundle->addRange(range); + } + } + + // If there is an OSR block, merge parameters in that block with the + // corresponding parameters in the initial block. + if (MBasicBlock* osr = graph.mir().osrBlock()) { + size_t original = 1; + for (LInstructionIterator iter = osr->lir()->begin(); iter != osr->lir()->end(); iter++) { + if (iter->isParameter()) { + for (size_t i = 0; i < iter->numDefs(); i++) { + DebugOnly found = false; + VirtualRegister ¶mVreg = vreg(iter->getDef(i)); + for (; original < paramVreg.vreg(); original++) { + VirtualRegister &originalVreg = vregs[original]; + if (*originalVreg.def()->output() == *iter->getDef(i)->output()) { + MOZ_ASSERT(originalVreg.ins()->isParameter()); + if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle())) + return false; + found = true; + break; + } + } + MOZ_ASSERT(found); + } + } + } + } + + // Try to merge registers with their reused inputs. + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + if (!reg.hasRanges()) + continue; + + if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) { + LUse* use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse(); + if (!tryMergeReusedRegister(reg, vreg(use))) + return false; + } + } + + // Try to merge phis with their inputs. + for (size_t i = 0; i < graph.numBlocks(); i++) { + LBlock* block = graph.getBlock(i); + for (size_t j = 0; j < block->numPhis(); j++) { + LPhi* phi = block->getPhi(j); + VirtualRegister &outputVreg = vreg(phi->getDef(0)); + for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) { + VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse()); + if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle())) + return false; + } + } + } + + // Add all bundles to the allocation queue, and create spill sets for them. + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + LiveBundle* bundle = range->bundle(); + if (range == bundle->firstRange()) { + if (!alloc().ensureBallast()) + return false; + + SpillSet* spill = SpillSet::New(alloc()); + if (!spill) + return false; + bundle->setSpillSet(spill); + + size_t priority = computePriority(bundle); + if (!allocationQueue.insert(QueueItem(bundle, priority))) + return false; + } + } + } + + return true; +} + +static const size_t MAX_ATTEMPTS = 2; + +bool +BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement, + bool* success, bool* pfixed, + LiveBundleVector& conflicting) +{ + // Spill bundles which are required to be in a certain stack slot. + if (!requirement.allocation().isRegister()) { + JitSpew(JitSpew_RegAlloc, " stack allocation requirement"); + bundle->setAllocation(requirement.allocation()); + *success = true; + return true; + } + + AnyRegister reg = requirement.allocation().toRegister(); + return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting); +} + +bool +BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle, + Requirement requirement, Requirement hint, + bool* success, bool* pfixed, + LiveBundleVector& conflicting) +{ + // If we want, but do not require a bundle to be in a specific register, + // only look at that register for allocating and evict or spill if it is + // not available. Picking a separate register may be even worse than + // spilling, as it will still necessitate moves and will tie up more + // registers than if we spilled. + if (hint.kind() == Requirement::FIXED) { + AnyRegister reg = hint.allocation().toRegister(); + if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting)) + return false; + if (*success) + return true; + } + + // Spill bundles which have no hint or register requirement. + if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) { + if (!spill(bundle)) + return false; + *success = true; + return true; + } + + if (conflicting.empty() || minimalBundle(bundle)) { + // Search for any available register which the bundle can be + // allocated to. + for (size_t i = 0; i < AnyRegister::Total; i++) { + if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting)) + return false; + if (*success) + return true; + } + } + + // Spill bundles which have no register requirement if they didn't get + // allocated. + if (requirement.kind() == Requirement::NONE) { + if (!spill(bundle)) + return false; + *success = true; + return true; + } + + // We failed to allocate this bundle. + MOZ_ASSERT(!*success); + return true; +} + +bool +BacktrackingAllocator::processBundle(MIRGenerator* mir, LiveBundle* bundle) +{ + if (JitSpewEnabled(JitSpew_RegAlloc)) { + JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]", + bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle)); + } + + // A bundle can be processed by doing any of the following: + // + // - Assigning the bundle a register. The bundle cannot overlap any other + // bundle allocated for that physical register. + // + // - Spilling the bundle, provided it has no register uses. + // + // - Splitting the bundle into two or more bundles which cover the original + // one. The new bundles are placed back onto the priority queue for later + // processing. + // + // - Evicting one or more existing allocated bundles, and then doing one + // of the above operations. Evicted bundles are placed back on the + // priority queue. Any evicted bundles must have a lower spill weight + // than the bundle being processed. + // + // As long as this structure is followed, termination is guaranteed. + // In general, we want to minimize the amount of bundle splitting (which + // generally necessitates spills), so allocate longer lived, lower weight + // bundles first and evict and split them later if they prevent allocation + // for higher weight bundles. + + Requirement requirement, hint; + bool canAllocate = computeRequirement(bundle, &requirement, &hint); + + bool fixed; + LiveBundleVector conflicting; + for (size_t attempt = 0;; attempt++) { + if (mir->shouldCancel("Backtracking Allocation (processBundle loop)")) + return false; + + if (canAllocate) { + bool success = false; + fixed = false; + conflicting.clear(); + + // Ok, let's try allocating for this bundle. + if (requirement.kind() == Requirement::FIXED) { + if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting)) + return false; + } else { + if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting)) + return false; + } + + // If that worked, we're done! + if (success) + return true; + + // If that didn't work, but we have one or more non-fixed bundles + // known to be conflicting, maybe we can evict them and try again. + if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) && + !fixed && + !conflicting.empty() && + maximumSpillWeight(conflicting) < computeSpillWeight(bundle)) + { + for (size_t i = 0; i < conflicting.length(); i++) { + if (!evictBundle(conflicting[i])) + return false; + } + continue; + } + } + + // A minimal bundle cannot be split any further. If we try to split it + // it at this point we will just end up with the same bundle and will + // enter an infinite loop. Weights and the initial live ranges must + // be constructed so that any minimal bundle is allocatable. + MOZ_ASSERT(!minimalBundle(bundle)); + + LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0]; + return chooseBundleSplit(bundle, canAllocate && fixed, conflict); + } +} + +bool +BacktrackingAllocator::computeRequirement(LiveBundle* bundle, + Requirement *requirement, Requirement *hint) +{ + // Set any requirement or hint on bundle according to its definition and + // uses. Return false if there are conflicting requirements which will + // require the bundle to be split. + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + VirtualRegister ® = vregs[range->vreg()]; + + if (range->hasDefinition()) { + // Deal with any definition constraints/hints. + LDefinition::Policy policy = reg.def()->policy(); + if (policy == LDefinition::FIXED) { + // Fixed policies get a FIXED requirement. + JitSpew(JitSpew_RegAlloc, " Requirement %s, fixed by definition", + reg.def()->output()->toString().get()); + if (!requirement->merge(Requirement(*reg.def()->output()))) + return false; + } else if (reg.ins()->isPhi()) { + // Phis don't have any requirements, but they should prefer their + // input allocations. This is captured by the group hints above. + } else { + // Non-phis get a REGISTER requirement. + if (!requirement->merge(Requirement(Requirement::REGISTER))) + return false; + } + } + + // Search uses for requirements. + for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { + LUse::Policy policy = iter->usePolicy(); + if (policy == LUse::FIXED) { + AnyRegister required = GetFixedRegister(reg.def(), iter->use()); + + JitSpew(JitSpew_RegAlloc, " Requirement %s, due to use at %u", + required.name(), iter->pos.bits()); + + // If there are multiple fixed registers which the bundle is + // required to use, fail. The bundle will need to be split before + // it can be allocated. + if (!requirement->merge(Requirement(LAllocation(required)))) + return false; + } else if (policy == LUse::REGISTER) { + if (!requirement->merge(Requirement(Requirement::REGISTER))) + return false; + } else if (policy == LUse::ANY) { + // ANY differs from KEEPALIVE by actively preferring a register. + if (!hint->merge(Requirement(Requirement::REGISTER))) + return false; + } + } + } + + return true; +} + +bool +BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, + bool* success, bool* pfixed, LiveBundleVector& conflicting) +{ + *success = false; + + if (!r.allocatable) + return true; + + LiveBundleVector aliasedConflicting; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + VirtualRegister ® = vregs[range->vreg()]; + + if (!reg.isCompatible(r.reg)) + return true; + + for (size_t a = 0; a < r.reg.numAliased(); a++) { + PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()]; + LiveRange* existing; + if (!rAlias.allocations.contains(range, &existing)) + continue; + if (existing->hasVreg()) { + MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg); + bool duplicate = false; + for (size_t i = 0; i < aliasedConflicting.length(); i++) { + if (aliasedConflicting[i] == existing->bundle()) { + duplicate = true; + break; + } + } + if (!duplicate && !aliasedConflicting.append(existing->bundle())) + return false; + } else { + JitSpew(JitSpew_RegAlloc, " %s collides with fixed use %s", + rAlias.reg.name(), existing->toString().get()); + *pfixed = true; + return true; + } + } + } + + if (!aliasedConflicting.empty()) { + // One or more aliased registers is allocated to another bundle + // overlapping this one. Keep track of the conflicting set, and in the + // case of multiple conflicting sets keep track of the set with the + // lowest maximum spill weight. + + // The #ifdef guards against "unused variable 'existing'" bustage. +#ifdef JS_JITSPEW + if (JitSpewEnabled(JitSpew_RegAlloc)) { + if (aliasedConflicting.length() == 1) { + LiveBundle* existing = aliasedConflicting[0]; + JitSpew(JitSpew_RegAlloc, " %s collides with %s [weight %" PRIuSIZE "]", + r.reg.name(), existing->toString().get(), computeSpillWeight(existing)); + } else { + JitSpew(JitSpew_RegAlloc, " %s collides with the following", r.reg.name()); + for (size_t i = 0; i < aliasedConflicting.length(); i++) { + LiveBundle* existing = aliasedConflicting[i]; + JitSpew(JitSpew_RegAlloc, " %s [weight %" PRIuSIZE "]", + existing->toString().get(), computeSpillWeight(existing)); + } + } + } +#endif + + if (conflicting.empty()) { + if (!conflicting.appendAll(aliasedConflicting)) + return false; + } else { + if (maximumSpillWeight(aliasedConflicting) < maximumSpillWeight(conflicting)) { + conflicting.clear(); + if (!conflicting.appendAll(aliasedConflicting)) + return false; + } + } + return true; + } + + JitSpew(JitSpew_RegAlloc, " allocated to %s", r.reg.name()); + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (!alloc().ensureBallast()) + return false; + if (!r.allocations.insert(range)) + return false; + } + + bundle->setAllocation(LAllocation(r.reg)); + *success = true; + return true; +} + +bool +BacktrackingAllocator::evictBundle(LiveBundle* bundle) +{ + if (JitSpewEnabled(JitSpew_RegAlloc)) { + JitSpew(JitSpew_RegAlloc, " Evicting %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]", + bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle)); + } + + AnyRegister reg(bundle->allocation().toRegister()); + PhysicalRegister& physical = registers[reg.code()]; + MOZ_ASSERT(physical.reg == reg && physical.allocatable); + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + physical.allocations.remove(range); + } + + bundle->setAllocation(LAllocation()); + + size_t priority = computePriority(bundle); + return allocationQueue.insert(QueueItem(bundle, priority)); +} + +bool +BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle, + const LiveBundleVector& newBundles) +{ + if (JitSpewEnabled(JitSpew_RegAlloc)) { + JitSpew(JitSpew_RegAlloc, " splitting bundle %s into:", bundle->toString().get()); + for (size_t i = 0; i < newBundles.length(); i++) + JitSpew(JitSpew_RegAlloc, " %s", newBundles[i]->toString().get()); + } + + // Remove all ranges in the old bundle from their register's list. + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + vregs[range->vreg()].removeRange(range); + } + + // Add all ranges in the new bundles to their register's list. + for (size_t i = 0; i < newBundles.length(); i++) { + LiveBundle* newBundle = newBundles[i]; + for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + vregs[range->vreg()].addRange(range); + } + } + + // Queue the new bundles for register assignment. + for (size_t i = 0; i < newBundles.length(); i++) { + LiveBundle* newBundle = newBundles[i]; + size_t priority = computePriority(newBundle); + if (!allocationQueue.insert(QueueItem(newBundle, priority))) + return false; + } + + return true; +} + +bool +BacktrackingAllocator::spill(LiveBundle* bundle) +{ + JitSpew(JitSpew_RegAlloc, " Spilling bundle"); + MOZ_ASSERT(bundle->allocation().isBogus()); + + if (LiveBundle* spillParent = bundle->spillParent()) { + JitSpew(JitSpew_RegAlloc, " Using existing spill bundle"); + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + LiveRange* parentRange = spillParent->rangeFor(range->from()); + MOZ_ASSERT(parentRange->contains(range)); + MOZ_ASSERT(range->vreg() == parentRange->vreg()); + range->distributeUses(parentRange); + MOZ_ASSERT(!range->hasUses()); + vregs[range->vreg()].removeRange(range); + } + return true; + } + + return bundle->spillSet()->addSpilledBundle(bundle); +} + +bool +BacktrackingAllocator::pickStackSlots() +{ + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + + if (mir->shouldCancel("Backtracking Pick Stack Slots")) + return false; + + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + LiveBundle* bundle = range->bundle(); + + if (bundle->allocation().isBogus()) { + if (!pickStackSlot(bundle->spillSet())) + return false; + MOZ_ASSERT(!bundle->allocation().isBogus()); + } + } + } + + return true; +} + +bool +BacktrackingAllocator::pickStackSlot(SpillSet* spillSet) +{ + // Look through all ranges that have been spilled in this set for a + // register definition which is fixed to a stack or argument slot. If we + // find one, use it for all bundles that have been spilled. tryMergeBundles + // makes sure this reuse is possible when an initial bundle contains ranges + // from multiple virtual registers. + for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) { + LiveBundle* bundle = spillSet->spilledBundle(i); + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (range->hasDefinition()) { + LDefinition* def = vregs[range->vreg()].def(); + if (def->policy() == LDefinition::FIXED) { + MOZ_ASSERT(!def->output()->isRegister()); + MOZ_ASSERT(!def->output()->isStackSlot()); + spillSet->setAllocation(*def->output()); + return true; + } + } + } + } + + LDefinition::Type type = vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type(); + + SpillSlotList* slotList; + switch (StackSlotAllocator::width(type)) { + case 4: slotList = &normalSlots; break; + case 8: slotList = &doubleSlots; break; + case 16: slotList = &quadSlots; break; + default: + MOZ_CRASH("Bad width"); + } + + // Maximum number of existing spill slots we will look at before giving up + // and allocating a new slot. + static const size_t MAX_SEARCH_COUNT = 10; + + size_t searches = 0; + SpillSlot* stop = nullptr; + while (!slotList->empty()) { + SpillSlot* spillSlot = *slotList->begin(); + if (!stop) { + stop = spillSlot; + } else if (stop == spillSlot) { + // We looked through every slot in the list. + break; + } + + bool success = true; + for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) { + LiveBundle* bundle = spillSet->spilledBundle(i); + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + LiveRange* existing; + if (spillSlot->allocated.contains(range, &existing)) { + success = false; + break; + } + } + if (!success) + break; + } + if (success) { + // We can reuse this physical stack slot for the new bundles. + // Update the allocated ranges for the slot. + for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) { + LiveBundle* bundle = spillSet->spilledBundle(i); + if (!insertAllRanges(spillSlot->allocated, bundle)) + return false; + } + spillSet->setAllocation(spillSlot->alloc); + return true; + } + + // On a miss, move the spill to the end of the list. This will cause us + // to make fewer attempts to allocate from slots with a large and + // highly contended range. + slotList->popFront(); + slotList->pushBack(spillSlot); + + if (++searches == MAX_SEARCH_COUNT) + break; + } + + // We need a new physical stack slot. + uint32_t stackSlot = stackSlotAllocator.allocateSlot(type); + + SpillSlot* spillSlot = new(alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc()); + if (!spillSlot) + return false; + + for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) { + LiveBundle* bundle = spillSet->spilledBundle(i); + if (!insertAllRanges(spillSlot->allocated, bundle)) + return false; + } + + spillSet->setAllocation(spillSlot->alloc); + + slotList->pushFront(spillSlot); + return true; +} + +bool +BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, LiveBundle* bundle) +{ + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (!alloc().ensureBallast()) + return false; + if (!set.insert(range)) + return false; + } + return true; +} + +bool +BacktrackingAllocator::deadRange(LiveRange* range) +{ + // Check for direct uses of this range. + if (range->hasUses() || range->hasDefinition()) + return false; + + CodePosition start = range->from(); + LNode* ins = insData[start]; + if (start == entryOf(ins->block())) + return false; + + VirtualRegister& reg = vregs[range->vreg()]; + + // Check if there are later ranges for this vreg. + LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range); + for (iter++; iter; iter++) { + LiveRange* laterRange = LiveRange::get(*iter); + if (laterRange->from() > range->from()) + return false; + } + + // Check if this range ends at a loop backedge. + LNode* last = insData[range->to().previous()]; + if (last->isGoto() && last->toGoto()->target()->id() < last->block()->mir()->id()) + return false; + + // Check if there are phis which this vreg flows to. + if (reg.usedByPhi()) + return false; + + return true; +} + +bool +BacktrackingAllocator::resolveControlFlow() +{ + // Add moves to handle changing assignments for vregs over their lifetime. + JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)"); + + // Look for places where a register's assignment changes in the middle of a + // basic block. + MOZ_ASSERT(!vregs[0u].hasRanges()); + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + + if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)")) + return false; + + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; ) { + LiveRange* range = LiveRange::get(*iter); + + if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)")) + return false; + + // Remove ranges which will never be used. + if (deadRange(range)) { + reg.removeRangeAndIncrement(iter); + continue; + } + + // The range which defines the register does not have a predecessor + // to add moves from. + if (range->hasDefinition()) { + iter++; + continue; + } + + // Ignore ranges that start at block boundaries. We will handle + // these in the next phase. + CodePosition start = range->from(); + LNode* ins = insData[start]; + if (start == entryOf(ins->block())) { + iter++; + continue; + } + + // If we already saw a range which covers the start of this range + // and has the same allocation, we don't need an explicit move at + // the start of this range. + bool skip = false; + for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin(); + prevIter != iter; + prevIter++) + { + LiveRange* prevRange = LiveRange::get(*prevIter); + if (prevRange->covers(start) && + prevRange->bundle()->allocation() == range->bundle()->allocation()) + { + skip = true; + break; + } + } + if (skip) { + iter++; + continue; + } + + if (!alloc().ensureBallast()) + return false; + + LiveRange* predecessorRange = reg.rangeFor(start.previous(), /* preferRegister = */ true); + if (start.subpos() == CodePosition::INPUT) { + if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type())) + return false; + } else { + if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type())) + return false; + } + + iter++; + } + } + + JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)"); + + for (size_t i = 0; i < graph.numBlocks(); i++) { + if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) + return false; + + LBlock* successor = graph.getBlock(i); + MBasicBlock* mSuccessor = successor->mir(); + if (mSuccessor->numPredecessors() < 1) + continue; + + // Resolve phis to moves. + for (size_t j = 0; j < successor->numPhis(); j++) { + LPhi* phi = successor->getPhi(j); + MOZ_ASSERT(phi->numDefs() == 1); + LDefinition* def = phi->getDef(0); + VirtualRegister& reg = vreg(def); + LiveRange* to = reg.rangeFor(entryOf(successor)); + MOZ_ASSERT(to); + + for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) { + LBlock* predecessor = mSuccessor->getPredecessor(k)->lir(); + MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1); + + LAllocation* input = phi->getOperand(k); + LiveRange* from = vreg(input).rangeFor(exitOf(predecessor), /* preferRegister = */ true); + MOZ_ASSERT(from); + + if (!alloc().ensureBallast()) + return false; + if (!moveAtExit(predecessor, from, to, def->type())) + return false; + } + } + } + + // Add moves to resolve graph edges with different allocations at their + // source and target. + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* targetRange = LiveRange::get(*iter); + + size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id(); + if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId)))) + firstBlockId++; + for (size_t id = firstBlockId; id < graph.numBlocks(); id++) { + LBlock* successor = graph.getBlock(id); + if (!targetRange->covers(entryOf(successor))) + break; + + BitSet& live = liveIn[id]; + if (!live.contains(i)) + continue; + + for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) { + LBlock* predecessor = successor->mir()->getPredecessor(j)->lir(); + if (targetRange->covers(exitOf(predecessor))) + continue; + + if (!alloc().ensureBallast()) + return false; + LiveRange* from = reg.rangeFor(exitOf(predecessor), true); + if (successor->mir()->numPredecessors() > 1) { + MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1); + if (!moveAtExit(predecessor, from, targetRange, reg.type())) + return false; + } else { + if (!moveAtEntry(successor, from, targetRange, reg.type())) + return false; + } + } + } + } + } + + return true; +} + +bool +BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins, bool considerCopy) +{ + if (LDefinition* def = FindReusingDefOrTemp(ins, use)) + return considerCopy || !vregs[def->virtualRegister()].mustCopyInput(); + return false; +} + +bool +BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy) +{ + switch (use->usePolicy()) { + case LUse::ANY: + return isReusedInput(use->use(), ins, considerCopy); + + case LUse::REGISTER: + case LUse::FIXED: + return true; + + default: + return false; + } +} + +bool +BacktrackingAllocator::isRegisterDefinition(LiveRange* range) +{ + if (!range->hasDefinition()) + return false; + + VirtualRegister& reg = vregs[range->vreg()]; + if (reg.ins()->isPhi()) + return false; + + if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister()) + return false; + + return true; +} + +bool +BacktrackingAllocator::reifyAllocations() +{ + JitSpew(JitSpew_RegAlloc, "Reifying Allocations"); + + MOZ_ASSERT(!vregs[0u].hasRanges()); + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + + if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) + return false; + + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + if (range->hasDefinition()) { + reg.def()->setOutput(range->bundle()->allocation()); + if (reg.ins()->recoversInput()) { + LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot(); + for (size_t i = 0; i < snapshot->numEntries(); i++) { + LAllocation* entry = snapshot->getEntry(i); + if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) + *entry = *reg.def()->output(); + } + } + } + + for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { + LAllocation* alloc = iter->use(); + *alloc = range->bundle()->allocation(); + + // For any uses which feed into MUST_REUSE_INPUT definitions, + // add copies if the use and def have different allocations. + LNode* ins = insData[iter->pos]; + if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) { + LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins)); + LAllocation res = outputRange->bundle()->allocation(); + LAllocation sourceAlloc = range->bundle()->allocation(); + + if (res != *alloc) { + if (!this->alloc().ensureBallast()) + return false; + if (NumReusingDefs(ins) <= 1) { + LMoveGroup* group = getInputMoveGroup(ins->toInstruction()); + if (!group->addAfter(sourceAlloc, res, reg.type())) + return false; + } else { + LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction()); + if (!group->add(sourceAlloc, res, reg.type())) + return false; + } + *alloc = res; + } + } + } + + addLiveRegistersForRange(reg, range); + } + } + + graph.setLocalSlotCount(stackSlotAllocator.stackHeight()); + return true; +} + +size_t +BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from) +{ + size_t i = 0; + for (; i < graph.numNonCallSafepoints(); i++) { + const LInstruction* ins = graph.getNonCallSafepoint(i); + if (from <= inputOf(ins)) + break; + } + return i; +} + +void +BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range) +{ + // Fill in the live register sets for all non-call safepoints. + LAllocation a = range->bundle()->allocation(); + if (!a.isRegister()) + return; + + // Don't add output registers to the safepoint. + CodePosition start = range->from(); + if (range->hasDefinition() && !reg.isTemp()) { +#ifdef CHECK_OSIPOINT_REGISTERS + // We don't add the output register to the safepoint, + // but it still might get added as one of the inputs. + // So eagerly add this reg to the safepoint clobbered registers. + if (reg.ins()->isInstruction()) { + if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) + safepoint->addClobberedRegister(a.toRegister()); + } +#endif + start = start.next(); + } + + size_t i = findFirstNonCallSafepoint(start); + for (; i < graph.numNonCallSafepoints(); i++) { + LInstruction* ins = graph.getNonCallSafepoint(i); + CodePosition pos = inputOf(ins); + + // Safepoints are sorted, so we can shortcut out of this loop + // if we go out of range. + if (range->to() <= pos) + break; + + MOZ_ASSERT(range->covers(pos)); + + LSafepoint* safepoint = ins->safepoint(); + safepoint->addLiveRegister(a.toRegister()); + +#ifdef CHECK_OSIPOINT_REGISTERS + if (reg.isTemp()) + safepoint->addClobberedRegister(a.toRegister()); +#endif + } +} + +static inline bool +IsNunbox(VirtualRegister& reg) +{ +#ifdef JS_NUNBOX32 + return reg.type() == LDefinition::TYPE || + reg.type() == LDefinition::PAYLOAD; +#else + return false; +#endif +} + +static inline bool +IsSlotsOrElements(VirtualRegister& reg) +{ + return reg.type() == LDefinition::SLOTS; +} + +static inline bool +IsTraceable(VirtualRegister& reg) +{ + if (reg.type() == LDefinition::OBJECT) + return true; +#ifdef JS_PUNBOX64 + if (reg.type() == LDefinition::BOX) + return true; +#endif + return false; +} + +size_t +BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom) +{ + size_t i = startFrom; + for (; i < graph.numSafepoints(); i++) { + LInstruction* ins = graph.getSafepoint(i); + if (pos <= inputOf(ins)) + break; + } + return i; +} + +bool +BacktrackingAllocator::populateSafepoints() +{ + JitSpew(JitSpew_RegAlloc, "Populating Safepoints"); + + size_t firstSafepoint = 0; + + MOZ_ASSERT(!vregs[0u].def()); + for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + + if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) + continue; + + firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint); + if (firstSafepoint >= graph.numSafepoints()) + break; + + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { + LInstruction* ins = graph.getSafepoint(j); + + if (!range->covers(inputOf(ins))) { + if (inputOf(ins) >= range->to()) + break; + continue; + } + + // Include temps but not instruction outputs. Also make sure + // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or + // we would have to add the input reg to this safepoint. + if (ins == reg.ins() && !reg.isTemp()) { + DebugOnly def = reg.def(); + MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, + def->type() == LDefinition::GENERAL || + def->type() == LDefinition::INT32 || + def->type() == LDefinition::FLOAT32 || + def->type() == LDefinition::DOUBLE); + continue; + } + + LSafepoint* safepoint = ins->safepoint(); + + LAllocation a = range->bundle()->allocation(); + if (a.isGeneralReg() && ins->isCall()) + continue; + + switch (reg.type()) { + case LDefinition::OBJECT: + if (!safepoint->addGcPointer(a)) + return false; + break; + case LDefinition::SLOTS: + if (!safepoint->addSlotsOrElementsPointer(a)) + return false; + break; +#ifdef JS_NUNBOX32 + case LDefinition::TYPE: + if (!safepoint->addNunboxType(i, a)) + return false; + break; + case LDefinition::PAYLOAD: + if (!safepoint->addNunboxPayload(i, a)) + return false; + break; +#else + case LDefinition::BOX: + if (!safepoint->addBoxedValue(a)) + return false; + break; +#endif + default: + MOZ_CRASH("Bad register type"); + } + } + } + } + + return true; +} + +bool +BacktrackingAllocator::annotateMoveGroups() +{ + // Annotate move groups in the LIR graph with any register that is not + // allocated at that point and can be used as a scratch register. This is + // only required for x86, as other platforms always have scratch registers + // available for use. +#ifdef JS_CODEGEN_X86 + LiveRange* range = LiveRange::FallibleNew(alloc(), 0, CodePosition(), CodePosition().next()); + if (!range) + return false; + + for (size_t i = 0; i < graph.numBlocks(); i++) { + if (mir->shouldCancel("Backtracking Annotate Move Groups")) + return false; + + LBlock* block = graph.getBlock(i); + LInstruction* last = nullptr; + for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) { + if (iter->isMoveGroup()) { + CodePosition from = last ? outputOf(last) : entryOf(block); + range->setTo(from.next()); + range->setFrom(from); + + for (size_t i = 0; i < AnyRegister::Total; i++) { + PhysicalRegister& reg = registers[i]; + if (reg.reg.isFloat() || !reg.allocatable) + continue; + + // This register is unavailable for use if (a) it is in use + // by some live range immediately before the move group, + // or (b) it is an operand in one of the group's moves. The + // latter case handles live ranges which end immediately + // before the move group or start immediately after. + // For (b) we need to consider move groups immediately + // preceding or following this one. + + if (iter->toMoveGroup()->uses(reg.reg.gpr())) + continue; + bool found = false; + LInstructionIterator niter(iter); + for (niter++; niter != block->end(); niter++) { + if (niter->isMoveGroup()) { + if (niter->toMoveGroup()->uses(reg.reg.gpr())) { + found = true; + break; + } + } else { + break; + } + } + if (iter != block->begin()) { + LInstructionIterator riter(iter); + do { + riter--; + if (riter->isMoveGroup()) { + if (riter->toMoveGroup()->uses(reg.reg.gpr())) { + found = true; + break; + } + } else { + break; + } + } while (riter != block->begin()); + } + + LiveRange* existing; + if (found || reg.allocations.contains(range, &existing)) + continue; + + iter->toMoveGroup()->setScratchRegister(reg.reg.gpr()); + break; + } + } else { + last = *iter; + } + } + } +#endif + + return true; +} + +///////////////////////////////////////////////////////////////////// +// Debugging methods +///////////////////////////////////////////////////////////////////// + +#ifdef JS_JITSPEW + +UniqueChars +LiveRange::toString() const +{ + AutoEnterOOMUnsafeRegion oomUnsafe; + + char* buf = JS_smprintf("v%u [%u,%u)", hasVreg() ? vreg() : 0, from().bits(), to().bits()); + + if (buf && bundle() && !bundle()->allocation().isBogus()) + buf = JS_sprintf_append(buf, " %s", bundle()->allocation().toString().get()); + + if (buf && hasDefinition()) + buf = JS_sprintf_append(buf, " (def)"); + + for (UsePositionIterator iter = usesBegin(); buf && iter; iter++) + buf = JS_sprintf_append(buf, " %s@%u", iter->use()->toString().get(), iter->pos.bits()); + + if (!buf) + oomUnsafe.crash("LiveRange::toString()"); + + return UniqueChars(buf); +} + +UniqueChars +LiveBundle::toString() const +{ + AutoEnterOOMUnsafeRegion oomUnsafe; + + // Suppress -Wformat warning. + char *buf = JS_smprintf("%s", ""); + + for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter; iter++) { + buf = JS_sprintf_append(buf, "%s %s", + (iter == rangesBegin()) ? "" : " ##", + LiveRange::get(*iter)->toString().get()); + } + + if (!buf) + oomUnsafe.crash("LiveBundle::toString()"); + + return UniqueChars(buf); +} + +#endif // JS_JITSPEW + +void +BacktrackingAllocator::dumpVregs() +{ +#ifdef JS_JITSPEW + MOZ_ASSERT(!vregs[0u].hasRanges()); + + fprintf(stderr, "Live ranges by virtual register:\n"); + + for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { + fprintf(stderr, " "); + VirtualRegister& reg = vregs[i]; + for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) { + if (iter != reg.rangesBegin()) + fprintf(stderr, " ## "); + fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get()); + } + fprintf(stderr, "\n"); + } + + fprintf(stderr, "\nLive ranges by bundle:\n"); + + for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) { + VirtualRegister& reg = vregs[i]; + for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter; baseIter++) { + LiveRange* range = LiveRange::get(*baseIter); + LiveBundle* bundle = range->bundle(); + if (range == bundle->firstRange()) { + fprintf(stderr, " "); + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + if (iter != bundle->rangesBegin()) + fprintf(stderr, " ## "); + fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get()); + } + fprintf(stderr, "\n"); + } + } + } +#endif +} + +#ifdef JS_JITSPEW +struct BacktrackingAllocator::PrintLiveRange +{ + bool& first_; + + explicit PrintLiveRange(bool& first) : first_(first) {} + + void operator()(const LiveRange* range) + { + if (first_) + first_ = false; + else + fprintf(stderr, " /"); + fprintf(stderr, " %s", range->toString().get()); + } +}; +#endif + +void +BacktrackingAllocator::dumpAllocations() +{ +#ifdef JS_JITSPEW + fprintf(stderr, "Allocations:\n"); + + dumpVregs(); + + fprintf(stderr, "Allocations by physical register:\n"); + + for (size_t i = 0; i < AnyRegister::Total; i++) { + if (registers[i].allocatable && !registers[i].allocations.empty()) { + fprintf(stderr, " %s:", AnyRegister::FromCode(i).name()); + bool first = true; + registers[i].allocations.forEach(PrintLiveRange(first)); + fprintf(stderr, "\n"); + } + } + + fprintf(stderr, "\n"); +#endif // JS_JITSPEW +} + +/////////////////////////////////////////////////////////////////////////////// +// Heuristic Methods +/////////////////////////////////////////////////////////////////////////////// + +size_t +BacktrackingAllocator::computePriority(LiveBundle* bundle) +{ + // The priority of a bundle is its total length, so that longer lived + // bundles will be processed before shorter ones (even if the longer ones + // have a low spill weight). See processBundle(). + size_t lifetimeTotal = 0; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + lifetimeTotal += range->to() - range->from(); + } + + return lifetimeTotal; +} + +bool +BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) +{ + // Whether this is a minimal range capturing a definition at ins. + return (range->to() <= minimalDefEnd(ins).next()) && + ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins)); +} + +bool +BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use) +{ + // Whether this is a minimal range capturing |use|. + LNode* ins = insData[use->pos]; + return (range->from() == inputOf(ins)) && + (range->to() == (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next())); +} + +bool +BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) +{ + LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); + LiveRange* range = LiveRange::get(*iter); + + if (!range->hasVreg()) { + *pfixed = true; + return true; + } + + // If a bundle contains multiple ranges, splitAtAllRegisterUses will split + // each range into a separate bundle. + if (++iter) + return false; + + if (range->hasDefinition()) { + VirtualRegister& reg = vregs[range->vreg()]; + if (pfixed) + *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister(); + return minimalDef(range, reg.ins()); + } + + bool fixed = false, minimal = false, multiple = false; + + for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { + if (iter != range->usesBegin()) + multiple = true; + + switch (iter->usePolicy()) { + case LUse::FIXED: + if (fixed) + return false; + fixed = true; + if (minimalUse(range, *iter)) + minimal = true; + break; + + case LUse::REGISTER: + if (minimalUse(range, *iter)) + minimal = true; + break; + + default: + break; + } + } + + // If a range contains a fixed use and at least one other use, + // splitAtAllRegisterUses will split each use into a different bundle. + if (multiple && fixed) + minimal = false; + + if (pfixed) + *pfixed = fixed; + return minimal; +} + +size_t +BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) +{ + // Minimal bundles have an extremely high spill weight, to ensure they + // can evict any other bundles and be allocated to a register. + bool fixed; + if (minimalBundle(bundle, &fixed)) + return fixed ? 2000000 : 1000000; + + size_t usesTotal = 0; + fixed = false; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + if (range->hasDefinition()) { + VirtualRegister& reg = vregs[range->vreg()]; + if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister()) { + usesTotal += 2000; + fixed = true; + } else if (!reg.ins()->isPhi()) { + usesTotal += 2000; + } + } + + for (UsePositionIterator iter = range->usesBegin(); iter; iter++) { + switch (iter->usePolicy()) { + case LUse::ANY: + usesTotal += 1000; + break; + + case LUse::FIXED: + fixed = true; + MOZ_FALLTHROUGH; + case LUse::REGISTER: + usesTotal += 2000; + break; + + case LUse::KEEPALIVE: + break; + + default: + // Note: RECOVERED_INPUT will not appear in UsePositionIterator. + MOZ_CRASH("Bad use"); + } + } + } + + // Bundles with fixed uses are given a higher spill weight, since they must + // be allocated to a specific register. + if (testbed && fixed) + usesTotal *= 2; + + // Compute spill weight as a use density, lowering the weight for long + // lived bundles with relatively few uses. + size_t lifetimeTotal = computePriority(bundle); + return lifetimeTotal ? usesTotal / lifetimeTotal : 0; +} + +size_t +BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles) +{ + size_t maxWeight = 0; + for (size_t i = 0; i < bundles.length(); i++) + maxWeight = Max(maxWeight, computeSpillWeight(bundles[i])); + return maxWeight; +} + +bool +BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success) +{ + // If this bundle has portions that are hot and portions that are cold, + // split it at the boundaries between hot and cold code. + + LiveRange* hotRange = nullptr; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (hotcode.contains(range, &hotRange)) + break; + } + + // Don't split if there is no hot code in the bundle. + if (!hotRange) { + JitSpew(JitSpew_RegAlloc, " bundle does not contain hot code"); + return true; + } + + // Don't split if there is no cold code in the bundle. + bool coldCode = false; + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (!hotRange->contains(range)) { + coldCode = true; + break; + } + } + if (!coldCode) { + JitSpew(JitSpew_RegAlloc, " bundle does not contain cold code"); + return true; + } + + JitSpew(JitSpew_RegAlloc, " split across hot range %s", hotRange->toString().get()); + + // Tweak the splitting method when compiling wasm code to look at actual + // uses within the hot/cold code. This heuristic is in place as the below + // mechanism regresses several asm.js tests. Hopefully this will be fixed + // soon and this special case removed. See bug 948838. + if (compilingWasm()) { + SplitPositionVector splitPositions; + if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to())) + return false; + *success = true; + return splitAt(bundle, splitPositions); + } + + LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), + bundle->spillParent()); + if (!hotBundle) + return false; + LiveBundle* preBundle = nullptr; + LiveBundle* postBundle = nullptr; + LiveBundle* coldBundle = nullptr; + + if (testbed) { + coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), bundle->spillParent()); + if (!coldBundle) + return false; + } + + // Accumulate the ranges of hot and cold code in the bundle. Note that + // we are only comparing with the single hot range found, so the cold code + // may contain separate hot ranges. + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + LiveRange::Range hot, coldPre, coldPost; + range->intersect(hotRange, &coldPre, &hot, &coldPost); + + if (!hot.empty()) { + if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to)) + return false; + } + + if (!coldPre.empty()) { + if (testbed) { + if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to)) + return false; + } else { + if (!preBundle) { + preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), + bundle->spillParent()); + if (!preBundle) + return false; + } + if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to)) + return false; + } + } + + if (!coldPost.empty()) { + if (testbed) { + if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to)) + return false; + } else { + if (!postBundle) { + postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), + bundle->spillParent()); + if (!postBundle) + return false; + } + if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to)) + return false; + } + } + } + + MOZ_ASSERT(hotBundle->numRanges() != 0); + + LiveBundleVector newBundles; + if (!newBundles.append(hotBundle)) + return false; + + if (testbed) { + MOZ_ASSERT(coldBundle->numRanges() != 0); + if (!newBundles.append(coldBundle)) + return false; + } else { + MOZ_ASSERT(preBundle || postBundle); + if (preBundle && !newBundles.append(preBundle)) + return false; + if (postBundle && !newBundles.append(postBundle)) + return false; + } + + *success = true; + return splitAndRequeueBundles(bundle, newBundles); +} + +bool +BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, + bool* success) +{ + // If this bundle's later uses do not require it to be in a register, + // split it after the last use which does require a register. If conflict + // is specified, only consider register uses before the conflict starts. + + CodePosition lastRegisterFrom, lastRegisterTo, lastUse; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + // If the range defines a register, consider that a register use for + // our purposes here. + if (isRegisterDefinition(range)) { + CodePosition spillStart = minimalDefEnd(insData[range->from()]).next(); + if (!conflict || spillStart < conflict->firstRange()->from()) { + lastUse = lastRegisterFrom = range->from(); + lastRegisterTo = spillStart; + } + } + + for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { + LNode* ins = insData[iter->pos]; + + // Uses in the bundle should be sorted. + MOZ_ASSERT(iter->pos >= lastUse); + lastUse = inputOf(ins); + + if (!conflict || outputOf(ins) < conflict->firstRange()->from()) { + if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) { + lastRegisterFrom = inputOf(ins); + lastRegisterTo = iter->pos.next(); + } + } + } + } + + // Can't trim non-register uses off the end by splitting. + if (!lastRegisterFrom.bits()) { + JitSpew(JitSpew_RegAlloc, " bundle has no register uses"); + return true; + } + if (lastUse < lastRegisterTo) { + JitSpew(JitSpew_RegAlloc, " bundle's last use is a register use"); + return true; + } + + JitSpew(JitSpew_RegAlloc, " split after last register use at %u", + lastRegisterTo.bits()); + + SplitPositionVector splitPositions; + if (!splitPositions.append(lastRegisterTo)) + return false; + *success = true; + return splitAt(bundle, splitPositions); +} + +bool +BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success) +{ + // If this bundle's earlier uses do not require it to be in a register, + // split it before the first use which does require a register. If conflict + // is specified, only consider register uses after the conflict ends. + + if (isRegisterDefinition(bundle->firstRange())) { + JitSpew(JitSpew_RegAlloc, " bundle is defined by a register"); + return true; + } + if (!bundle->firstRange()->hasDefinition()) { + JitSpew(JitSpew_RegAlloc, " bundle does not have definition"); + return true; + } + + CodePosition firstRegisterFrom; + + CodePosition conflictEnd; + if (conflict) { + for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + if (range->to() > conflictEnd) + conflictEnd = range->to(); + } + } + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + if (!conflict || range->from() > conflictEnd) { + if (range->hasDefinition() && isRegisterDefinition(range)) { + firstRegisterFrom = range->from(); + break; + } + } + + for (UsePositionIterator iter(range->usesBegin()); iter; iter++) { + LNode* ins = insData[iter->pos]; + + if (!conflict || outputOf(ins) >= conflictEnd) { + if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) { + firstRegisterFrom = inputOf(ins); + break; + } + } + } + if (firstRegisterFrom.bits()) + break; + } + + if (!firstRegisterFrom.bits()) { + // Can't trim non-register uses off the beginning by splitting. + JitSpew(JitSpew_RegAlloc, " bundle has no register uses"); + return true; + } + + JitSpew(JitSpew_RegAlloc, " split before first register use at %u", + firstRegisterFrom.bits()); + + SplitPositionVector splitPositions; + if (!splitPositions.append(firstRegisterFrom)) + return false; + *success = true; + return splitAt(bundle, splitPositions); +} + +// When splitting a bundle according to a list of split positions, return +// whether a use or range at |pos| should use a different bundle than the last +// position this was called for. +static bool +UseNewBundle(const SplitPositionVector& splitPositions, CodePosition pos, + size_t* activeSplitPosition) +{ + if (splitPositions.empty()) { + // When the split positions are empty we are splitting at all uses. + return true; + } + + if (*activeSplitPosition == splitPositions.length()) { + // We've advanced past all split positions. + return false; + } + + if (splitPositions[*activeSplitPosition] > pos) { + // We haven't gotten to the next split position yet. + return false; + } + + // We've advanced past the next split position, find the next one which we + // should split at. + while (*activeSplitPosition < splitPositions.length() && + splitPositions[*activeSplitPosition] <= pos) + { + (*activeSplitPosition)++; + } + return true; +} + +static bool +HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) +{ + MOZ_ASSERT(range->bundle() == bundle); + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* prevRange = LiveRange::get(*iter); + if (prevRange == range) + return false; + if (prevRange->vreg() == range->vreg()) + return true; + } + + MOZ_CRASH(); +} + +static bool +HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) +{ + MOZ_ASSERT(range->bundle() == bundle); + + bool foundRange = false; + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* prevRange = LiveRange::get(*iter); + if (foundRange && prevRange->vreg() == range->vreg()) + return true; + if (prevRange == range) + foundRange = true; + } + + MOZ_ASSERT(foundRange); + return false; +} + +bool +BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions) +{ + // Split the bundle at the given split points. Register uses which have no + // intervening split points are consolidated into the same bundle. If the + // list of split points is empty, then all register uses are placed in + // minimal bundles. + + // splitPositions should be sorted. + for (size_t i = 1; i < splitPositions.length(); ++i) + MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]); + + // We don't need to create a new spill bundle if there already is one. + bool spillBundleIsNew = false; + LiveBundle* spillBundle = bundle->spillParent(); + if (!spillBundle) { + spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr); + if (!spillBundle) + return false; + spillBundleIsNew = true; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + CodePosition from = range->from(); + if (isRegisterDefinition(range)) + from = minimalDefEnd(insData[from]).next(); + + if (from < range->to()) { + if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to())) + return false; + + if (range->hasDefinition() && !isRegisterDefinition(range)) + spillBundle->lastRange()->setHasDefinition(); + } + } + } + + LiveBundleVector newBundles; + + // The bundle which ranges are currently being added to. + LiveBundle* activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle); + if (!activeBundle || !newBundles.append(activeBundle)) + return false; + + // State for use by UseNewBundle. + size_t activeSplitPosition = 0; + + // Make new bundles according to the split positions, and distribute ranges + // and uses to them. + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + + if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) { + activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle); + if (!activeBundle || !newBundles.append(activeBundle)) + return false; + } + + LiveRange* activeRange = LiveRange::FallibleNew(alloc(), range->vreg(), + range->from(), range->to()); + if (!activeRange) + return false; + activeBundle->addRange(activeRange); + + if (isRegisterDefinition(range)) + activeRange->setHasDefinition(); + + while (range->hasUses()) { + UsePosition* use = range->popUse(); + LNode* ins = insData[use->pos]; + + // Any uses of a register that appear before its definition has + // finished must be associated with the range for that definition. + if (isRegisterDefinition(range) && use->pos <= minimalDefEnd(insData[range->from()])) { + activeRange->addUse(use); + } else if (isRegisterUse(use, ins)) { + // Place this register use into a different bundle from the + // last one if there are any split points between the two uses. + // UseNewBundle always returns true if we are splitting at all + // register uses, but we can still reuse the last range and + // bundle if they have uses at the same position, except when + // either use is fixed (the two uses might require incompatible + // registers.) + if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) && + (!activeRange->hasUses() || + activeRange->usesBegin()->pos != use->pos || + activeRange->usesBegin()->usePolicy() == LUse::FIXED || + use->usePolicy() == LUse::FIXED)) + { + activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), + spillBundle); + if (!activeBundle || !newBundles.append(activeBundle)) + return false; + activeRange = LiveRange::FallibleNew(alloc(), range->vreg(), + range->from(), range->to()); + if (!activeRange) + return false; + activeBundle->addRange(activeRange); + } + + activeRange->addUse(use); + } else { + MOZ_ASSERT(spillBundleIsNew); + spillBundle->rangeFor(use->pos)->addUse(use); + } + } + } + + LiveBundleVector filteredBundles; + + // Trim the ends of ranges in each new bundle when there are no other + // earlier or later ranges in the same bundle with the same vreg. + for (size_t i = 0; i < newBundles.length(); i++) { + LiveBundle* bundle = newBundles[i]; + + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; ) { + LiveRange* range = LiveRange::get(*iter); + + if (!range->hasDefinition()) { + if (!HasPrecedingRangeSharingVreg(bundle, range)) { + if (range->hasUses()) { + UsePosition* use = *range->usesBegin(); + range->setFrom(inputOf(insData[use->pos])); + } else { + bundle->removeRangeAndIncrementIterator(iter); + continue; + } + } + } + + if (!HasFollowingRangeSharingVreg(bundle, range)) { + if (range->hasUses()) { + UsePosition* use = range->lastUse(); + range->setTo(use->pos.next()); + } else if (range->hasDefinition()) { + range->setTo(minimalDefEnd(insData[range->from()]).next()); + } else { + bundle->removeRangeAndIncrementIterator(iter); + continue; + } + } + + iter++; + } + + if (bundle->hasRanges() && !filteredBundles.append(bundle)) + return false; + } + + if (spillBundleIsNew && !filteredBundles.append(spillBundle)) + return false; + + return splitAndRequeueBundles(bundle, filteredBundles); +} + +bool +BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) +{ + // Split the bundle to separate register uses and non-register uses and + // allow the vreg to be spilled across its range. + + // Find the locations of all calls in the bundle's range. + SplitPositionVector callPositions; + for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) { + LiveRange* range = LiveRange::get(*iter); + CallRange searchRange(range->from(), range->to()); + CallRange* callRange; + if (!callRanges.contains(&searchRange, &callRange)) { + // There are no calls inside this range. + continue; + } + MOZ_ASSERT(range->covers(callRange->range.from)); + + // The search above returns an arbitrary call within the range. Walk + // backwards to find the first call in the range. + for (CallRangeList::reverse_iterator riter = callRangesList.rbegin(callRange); + riter != callRangesList.rend(); + ++riter) + { + CodePosition pos = riter->range.from; + if (range->covers(pos)) + callRange = *riter; + else + break; + } + + // Add all call positions within the range, by walking forwards. + for (CallRangeList::iterator iter = callRangesList.begin(callRange); + iter != callRangesList.end(); + ++iter) + { + CodePosition pos = iter->range.from; + if (!range->covers(pos)) + break; + + // Calls at the beginning of the range are ignored; there is no splitting to do. + if (range->covers(pos.previous())) { + MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back()); + if (!callPositions.append(pos)) + return false; + } + } + } + MOZ_ASSERT(callPositions.length()); + +#ifdef JS_JITSPEW + JitSpewStart(JitSpew_RegAlloc, " split across calls at "); + for (size_t i = 0; i < callPositions.length(); ++i) + JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits()); + JitSpewFin(JitSpew_RegAlloc); +#endif + + return splitAt(bundle, callPositions); +} + +bool +BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict) +{ + bool success = false; + + if (!trySplitAcrossHotcode(bundle, &success)) + return false; + if (success) + return true; + + if (fixed) + return splitAcrossCalls(bundle); + + if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) + return false; + if (success) + return true; + + if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) + return false; + if (success) + return true; + + // Split at all register uses. + SplitPositionVector emptyPositions; + return splitAt(bundle, emptyPositions); +} diff --git a/js/src/jit/BacktrackingAllocator.h b/js/src/jit/BacktrackingAllocator.h new file mode 100644 index 000000000..6d14ffacd --- /dev/null +++ b/js/src/jit/BacktrackingAllocator.h @@ -0,0 +1,816 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_BacktrackingAllocator_h +#define jit_BacktrackingAllocator_h + +#include "mozilla/Array.h" + +#include "ds/PriorityQueue.h" +#include "ds/SplayTree.h" +#include "jit/RegisterAllocator.h" +#include "jit/StackSlotAllocator.h" + +// Backtracking priority queue based register allocator based on that described +// in the following blog post: +// +// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html + +namespace js { +namespace jit { + +class Requirement +{ + public: + enum Kind { + NONE, + REGISTER, + FIXED, + MUST_REUSE_INPUT + }; + + Requirement() + : kind_(NONE) + { } + + explicit Requirement(Kind kind) + : kind_(kind) + { + // These have dedicated constructors. + MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); + } + + Requirement(Kind kind, CodePosition at) + : kind_(kind), + position_(at) + { + // These have dedicated constructors. + MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT); + } + + explicit Requirement(LAllocation fixed) + : kind_(FIXED), + allocation_(fixed) + { + MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); + } + + // Only useful as a hint, encodes where the fixed requirement is used to + // avoid allocating a fixed register too early. + Requirement(LAllocation fixed, CodePosition at) + : kind_(FIXED), + allocation_(fixed), + position_(at) + { + MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse()); + } + + Requirement(uint32_t vreg, CodePosition at) + : kind_(MUST_REUSE_INPUT), + allocation_(LUse(vreg, LUse::ANY)), + position_(at) + { } + + Kind kind() const { + return kind_; + } + + LAllocation allocation() const { + MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse()); + return allocation_; + } + + uint32_t virtualRegister() const { + MOZ_ASSERT(allocation_.isUse()); + MOZ_ASSERT(kind() == MUST_REUSE_INPUT); + return allocation_.toUse()->virtualRegister(); + } + + CodePosition pos() const { + return position_; + } + + int priority() const; + + MOZ_MUST_USE bool merge(const Requirement& newRequirement) { + // Merge newRequirement with any existing requirement, returning false + // if the new and old requirements conflict. + MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT); + + if (newRequirement.kind() == Requirement::FIXED) { + if (kind() == Requirement::FIXED) + return newRequirement.allocation() == allocation(); + *this = newRequirement; + return true; + } + + MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER); + if (kind() == Requirement::FIXED) + return allocation().isRegister(); + + *this = newRequirement; + return true; + } + + void dump() const; + + private: + Kind kind_; + LAllocation allocation_; + CodePosition position_; +}; + +struct UsePosition : public TempObject, + public InlineForwardListNode +{ + private: + // Packed LUse* with a copy of the LUse::Policy value, in order to avoid + // making cache misses while reaching out to the policy value. + uintptr_t use_; + + void setUse(LUse* use) { + // Assert that we can safely pack the LUse policy in the last 2 bits of + // the LUse pointer. + static_assert((LUse::ANY | LUse::REGISTER | LUse::FIXED | LUse::KEEPALIVE) <= 0x3, + "Cannot pack the LUse::Policy value on 32 bits architectures."); + + // RECOVERED_INPUT is used by snapshots and ignored when building the + // liveness information. Thus we can safely assume that no such value + // would be seen. + MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT); + use_ = uintptr_t(use) | (use->policy() & 0x3); + } + + public: + CodePosition pos; + + LUse* use() const { + return reinterpret_cast(use_ & ~0x3); + } + + LUse::Policy usePolicy() const { + LUse::Policy policy = LUse::Policy(use_ & 0x3); + MOZ_ASSERT(use()->policy() == policy); + return policy; + } + + UsePosition(LUse* use, CodePosition pos) : + pos(pos) + { + // Verify that the usedAtStart() flag is consistent with the + // subposition. For now ignore fixed registers, because they + // are handled specially around calls. + MOZ_ASSERT_IF(!use->isFixedRegister(), + pos.subpos() == (use->usedAtStart() + ? CodePosition::INPUT + : CodePosition::OUTPUT)); + setUse(use); + } +}; + +typedef InlineForwardListIterator UsePositionIterator; + +// Backtracking allocator data structures overview. +// +// LiveRange: A continuous range of positions where a virtual register is live. +// LiveBundle: A set of LiveRanges which do not overlap. +// VirtualRegister: A set of all LiveRanges used for some LDefinition. +// +// The allocator first performs a liveness ananlysis on the LIR graph which +// constructs LiveRanges for each VirtualRegister, determining where the +// registers are live. +// +// The ranges are then bundled together according to heuristics, and placed on +// the allocation queue. +// +// As bundles are removed from the allocation queue, we attempt to find a +// physical register or stack slot allocation for all ranges in the removed +// bundle, possibly evicting already-allocated bundles. See processBundle() +// for details. +// +// If we are not able to allocate a bundle, it is split according to heuristics +// into two or more smaller bundles which cover all the ranges of the original. +// These smaller bundles are then allocated independently. + +class LiveBundle; + +class LiveRange : public TempObject +{ + public: + // Linked lists are used to keep track of the ranges in each LiveBundle and + // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use + // these auxiliary classes to keep things straight. + class BundleLink : public InlineForwardListNode {}; + class RegisterLink : public InlineForwardListNode {}; + + typedef InlineForwardListIterator BundleLinkIterator; + typedef InlineForwardListIterator RegisterLinkIterator; + + // Links in the lists in LiveBundle and VirtualRegister. + BundleLink bundleLink; + RegisterLink registerLink; + + static LiveRange* get(BundleLink* link) { + return reinterpret_cast(reinterpret_cast(link) - + offsetof(LiveRange, bundleLink)); + } + static LiveRange* get(RegisterLink* link) { + return reinterpret_cast(reinterpret_cast(link) - + offsetof(LiveRange, registerLink)); + } + + struct Range + { + // The beginning of this range, inclusive. + CodePosition from; + + // The end of this range, exclusive. + CodePosition to; + + Range() {} + + Range(CodePosition from, CodePosition to) + : from(from), to(to) + { + MOZ_ASSERT(!empty()); + } + + bool empty() { + MOZ_ASSERT(from <= to); + return from == to; + } + }; + + private: + // The virtual register this range is for, or zero if this does not have a + // virtual register (for example, it is in the callRanges bundle). + uint32_t vreg_; + + // The bundle containing this range, null if liveness information is being + // constructed and we haven't started allocating bundles yet. + LiveBundle* bundle_; + + // The code positions in this range. + Range range_; + + // All uses of the virtual register in this range, ordered by location. + InlineForwardList uses_; + + // Whether this range contains the virtual register's definition. + bool hasDefinition_; + + LiveRange(uint32_t vreg, Range range) + : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false) + { + MOZ_ASSERT(!range.empty()); + } + + public: + static LiveRange* FallibleNew(TempAllocator& alloc, uint32_t vreg, + CodePosition from, CodePosition to) + { + return new(alloc.fallible()) LiveRange(vreg, Range(from, to)); + } + + uint32_t vreg() const { + MOZ_ASSERT(hasVreg()); + return vreg_; + } + bool hasVreg() const { + return vreg_ != 0; + } + + LiveBundle* bundle() const { + return bundle_; + } + + CodePosition from() const { + return range_.from; + } + CodePosition to() const { + return range_.to; + } + bool covers(CodePosition pos) const { + return pos >= from() && pos < to(); + } + + // Whether this range wholly contains other. + bool contains(LiveRange* other) const; + + // Intersect this range with other, returning the subranges of this + // that are before, inside, or after other. + void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const; + + // Whether this range has any intersection with other. + bool intersects(LiveRange* other) const; + + UsePositionIterator usesBegin() const { + return uses_.begin(); + } + UsePosition* lastUse() const { + return uses_.back(); + } + bool hasUses() const { + return !!usesBegin(); + } + UsePosition* popUse() { + return uses_.popFront(); + } + + bool hasDefinition() const { + return hasDefinition_; + } + + void setFrom(CodePosition from) { + range_.from = from; + MOZ_ASSERT(!range_.empty()); + } + void setTo(CodePosition to) { + range_.to = to; + MOZ_ASSERT(!range_.empty()); + } + + void setBundle(LiveBundle* bundle) { + bundle_ = bundle; + } + + void addUse(UsePosition* use); + void distributeUses(LiveRange* other); + + void setHasDefinition() { + MOZ_ASSERT(!hasDefinition_); + hasDefinition_ = true; + } + +#ifdef JS_JITSPEW + // Return a string describing this range. + UniqueChars toString() const; +#endif + + // Comparator for use in range splay trees. + static int compare(LiveRange* v0, LiveRange* v1) { + // LiveRange includes 'from' but excludes 'to'. + if (v0->to() <= v1->from()) + return -1; + if (v0->from() >= v1->to()) + return 1; + return 0; + } +}; + +// Tracks information about bundles that should all be spilled to the same +// physical location. At the beginning of allocation, each bundle has its own +// spill set. As bundles are split, the new smaller bundles continue to use the +// same spill set. +class SpillSet : public TempObject +{ + // All bundles with this spill set which have been spilled. All bundles in + // this list will be given the same physical slot. + Vector list_; + + explicit SpillSet(TempAllocator& alloc) + : list_(alloc) + { } + + public: + static SpillSet* New(TempAllocator& alloc) { + return new(alloc) SpillSet(alloc); + } + + MOZ_MUST_USE bool addSpilledBundle(LiveBundle* bundle) { + return list_.append(bundle); + } + size_t numSpilledBundles() const { + return list_.length(); + } + LiveBundle* spilledBundle(size_t i) const { + return list_[i]; + } + + void setAllocation(LAllocation alloc); +}; + +// A set of live ranges which are all pairwise disjoint. The register allocator +// attempts to find allocations for an entire bundle, and if it fails the +// bundle will be broken into smaller ones which are allocated independently. +class LiveBundle : public TempObject +{ + // Set to use if this bundle or one it is split into is spilled. + SpillSet* spill_; + + // All the ranges in this set, ordered by location. + InlineForwardList ranges_; + + // Allocation to use for ranges in this set, bogus if unallocated or spilled + // and not yet given a physical stack slot. + LAllocation alloc_; + + // Bundle which entirely contains this one and has no register uses. This + // may or may not be spilled by the allocator, but it can be spilled and + // will not be split. + LiveBundle* spillParent_; + + LiveBundle(SpillSet* spill, LiveBundle* spillParent) + : spill_(spill), spillParent_(spillParent) + { } + + public: + static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent) + { + return new(alloc.fallible()) LiveBundle(spill, spillParent); + } + + SpillSet* spillSet() const { + return spill_; + } + void setSpillSet(SpillSet* spill) { + spill_ = spill; + } + + LiveRange::BundleLinkIterator rangesBegin() const { + return ranges_.begin(); + } + bool hasRanges() const { + return !!rangesBegin(); + } + LiveRange* firstRange() const { + return LiveRange::get(*rangesBegin()); + } + LiveRange* lastRange() const { + return LiveRange::get(ranges_.back()); + } + LiveRange* rangeFor(CodePosition pos) const; + void removeRange(LiveRange* range); + void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) { + ranges_.removeAndIncrement(iter); + } + void addRange(LiveRange* range); + MOZ_MUST_USE bool addRange(TempAllocator& alloc, uint32_t vreg, + CodePosition from, CodePosition to); + MOZ_MUST_USE bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange, + CodePosition from, CodePosition to); + LiveRange* popFirstRange(); +#ifdef DEBUG + size_t numRanges() const; +#endif + + LAllocation allocation() const { + return alloc_; + } + void setAllocation(LAllocation alloc) { + alloc_ = alloc; + } + + LiveBundle* spillParent() const { + return spillParent_; + } + +#ifdef JS_JITSPEW + // Return a string describing this bundle. + UniqueChars toString() const; +#endif +}; + +// Information about the allocation for a virtual register. +class VirtualRegister +{ + // Instruction which defines this register. + LNode* ins_; + + // Definition in the instruction for this register. + LDefinition* def_; + + // All live ranges for this register. These may overlap each other, and are + // ordered by their start position. + InlineForwardList ranges_; + + // Whether def_ is a temp or an output. + bool isTemp_; + + // Whether this vreg is an input for some phi. This use is not reflected in + // any range on the vreg. + bool usedByPhi_; + + // If this register's definition is MUST_REUSE_INPUT, whether a copy must + // be introduced before the definition that relaxes the policy. + bool mustCopyInput_; + + void operator=(const VirtualRegister&) = delete; + VirtualRegister(const VirtualRegister&) = delete; + + public: + explicit VirtualRegister() + { + // Note: This class is zeroed before it is constructed. + } + + void init(LNode* ins, LDefinition* def, bool isTemp) { + MOZ_ASSERT(!ins_); + ins_ = ins; + def_ = def; + isTemp_ = isTemp; + } + + LNode* ins() const { + return ins_; + } + LDefinition* def() const { + return def_; + } + LDefinition::Type type() const { + return def()->type(); + } + uint32_t vreg() const { + return def()->virtualRegister(); + } + bool isCompatible(const AnyRegister& r) const { + return def_->isCompatibleReg(r); + } + bool isCompatible(const VirtualRegister& vr) const { + return def_->isCompatibleDef(*vr.def_); + } + bool isTemp() const { + return isTemp_; + } + + void setUsedByPhi() { + usedByPhi_ = true; + } + bool usedByPhi() { + return usedByPhi_; + } + + void setMustCopyInput() { + mustCopyInput_ = true; + } + bool mustCopyInput() { + return mustCopyInput_; + } + + LiveRange::RegisterLinkIterator rangesBegin() const { + return ranges_.begin(); + } + LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const { + return ranges_.begin(&range->registerLink); + } + bool hasRanges() const { + return !!rangesBegin(); + } + LiveRange* firstRange() const { + return LiveRange::get(*rangesBegin()); + } + LiveRange* lastRange() const { + return LiveRange::get(ranges_.back()); + } + LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const; + void removeRange(LiveRange* range); + void addRange(LiveRange* range); + + void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) { + ranges_.removeAndIncrement(iter); + } + + LiveBundle* firstBundle() const { + return firstRange()->bundle(); + } + + MOZ_MUST_USE bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to); + void addInitialUse(UsePosition* use); + void setInitialDefinition(CodePosition from); +}; + +// A sequence of code positions, for tellings BacktrackingAllocator::splitAt +// where to split. +typedef js::Vector SplitPositionVector; + +class BacktrackingAllocator : protected RegisterAllocator +{ + friend class C1Spewer; + friend class JSONSpewer; + + // This flag is set when testing new allocator modifications. + bool testbed; + + BitSet* liveIn; + FixedList vregs; + + // Allocation state. + StackSlotAllocator stackSlotAllocator; + + // Priority queue element: a bundle and the associated priority. + struct QueueItem + { + LiveBundle* bundle; + + QueueItem(LiveBundle* bundle, size_t priority) + : bundle(bundle), priority_(priority) + {} + + static size_t priority(const QueueItem& v) { + return v.priority_; + } + + private: + size_t priority_; + }; + + PriorityQueue allocationQueue; + + typedef SplayTree LiveRangeSet; + + // Each physical register is associated with the set of ranges over which + // that register is currently allocated. + struct PhysicalRegister { + bool allocatable; + AnyRegister reg; + LiveRangeSet allocations; + + PhysicalRegister() : allocatable(false) {} + }; + mozilla::Array registers; + + // Ranges of code which are considered to be hot, for which good allocation + // should be prioritized. + LiveRangeSet hotcode; + + struct CallRange : public TempObject, public InlineListNode { + LiveRange::Range range; + + CallRange(CodePosition from, CodePosition to) + : range(from, to) + {} + + // Comparator for use in splay tree. + static int compare(CallRange* v0, CallRange* v1) { + if (v0->range.to <= v1->range.from) + return -1; + if (v0->range.from >= v1->range.to) + return 1; + return 0; + } + }; + + // Ranges where all registers must be spilled due to call instructions. + typedef InlineList CallRangeList; + CallRangeList callRangesList; + SplayTree callRanges; + + // Information about an allocated stack slot. + struct SpillSlot : public TempObject, public InlineForwardListNode { + LStackSlot alloc; + LiveRangeSet allocated; + + SpillSlot(uint32_t slot, LifoAlloc* alloc) + : alloc(slot), allocated(alloc) + {} + }; + typedef InlineForwardList SpillSlotList; + + // All allocated slots of each width. + SpillSlotList normalSlots, doubleSlots, quadSlots; + + public: + BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph, bool testbed) + : RegisterAllocator(mir, lir, graph), + testbed(testbed), + liveIn(nullptr), + callRanges(nullptr) + { } + + MOZ_MUST_USE bool go(); + + private: + + typedef Vector LiveRangeVector; + typedef Vector LiveBundleVector; + + // Liveness methods. + MOZ_MUST_USE bool init(); + MOZ_MUST_USE bool buildLivenessInfo(); + + MOZ_MUST_USE bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to); + + VirtualRegister& vreg(const LDefinition* def) { + return vregs[def->virtualRegister()]; + } + VirtualRegister& vreg(const LAllocation* alloc) { + MOZ_ASSERT(alloc->isUse()); + return vregs[alloc->toUse()->virtualRegister()]; + } + + // Allocation methods. + MOZ_MUST_USE bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1); + MOZ_MUST_USE bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input); + MOZ_MUST_USE bool mergeAndQueueRegisters(); + MOZ_MUST_USE bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement, + bool* success, bool* pfixed, LiveBundleVector& conflicting); + MOZ_MUST_USE bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, + Requirement hint, bool* success, bool* pfixed, + LiveBundleVector& conflicting); + MOZ_MUST_USE bool processBundle(MIRGenerator* mir, LiveBundle* bundle); + MOZ_MUST_USE bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, + Requirement *phint); + MOZ_MUST_USE bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, bool* success, + bool* pfixed, LiveBundleVector& conflicting); + MOZ_MUST_USE bool evictBundle(LiveBundle* bundle); + MOZ_MUST_USE bool splitAndRequeueBundles(LiveBundle* bundle, + const LiveBundleVector& newBundles); + MOZ_MUST_USE bool spill(LiveBundle* bundle); + + bool isReusedInput(LUse* use, LNode* ins, bool considerCopy); + bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false); + bool isRegisterDefinition(LiveRange* range); + MOZ_MUST_USE bool pickStackSlot(SpillSet* spill); + MOZ_MUST_USE bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle); + + // Reification methods. + MOZ_MUST_USE bool pickStackSlots(); + MOZ_MUST_USE bool resolveControlFlow(); + MOZ_MUST_USE bool reifyAllocations(); + MOZ_MUST_USE bool populateSafepoints(); + MOZ_MUST_USE bool annotateMoveGroups(); + MOZ_MUST_USE bool deadRange(LiveRange* range); + size_t findFirstNonCallSafepoint(CodePosition from); + size_t findFirstSafepoint(CodePosition pos, size_t startFrom); + void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range); + + MOZ_MUST_USE bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to, + LDefinition::Type type) { + LAllocation fromAlloc = from->bundle()->allocation(); + LAllocation toAlloc = to->bundle()->allocation(); + MOZ_ASSERT(fromAlloc != toAlloc); + return moves->add(fromAlloc, toAlloc, type); + } + + MOZ_MUST_USE bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to, + LDefinition::Type type) { + if (from->bundle()->allocation() == to->bundle()->allocation()) + return true; + LMoveGroup* moves = getInputMoveGroup(ins); + return addMove(moves, from, to, type); + } + + MOZ_MUST_USE bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to, + LDefinition::Type type) { + if (from->bundle()->allocation() == to->bundle()->allocation()) + return true; + LMoveGroup* moves = getMoveGroupAfter(ins); + return addMove(moves, from, to, type); + } + + MOZ_MUST_USE bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to, + LDefinition::Type type) { + if (from->bundle()->allocation() == to->bundle()->allocation()) + return true; + LMoveGroup* moves = block->getExitMoveGroup(alloc()); + return addMove(moves, from, to, type); + } + + MOZ_MUST_USE bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to, + LDefinition::Type type) { + if (from->bundle()->allocation() == to->bundle()->allocation()) + return true; + LMoveGroup* moves = block->getEntryMoveGroup(alloc()); + return addMove(moves, from, to, type); + } + + // Debugging methods. + void dumpAllocations(); + + struct PrintLiveRange; + + bool minimalDef(LiveRange* range, LNode* ins); + bool minimalUse(LiveRange* range, UsePosition* use); + bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr); + + // Heuristic methods. + + size_t computePriority(LiveBundle* bundle); + size_t computeSpillWeight(LiveBundle* bundle); + + size_t maximumSpillWeight(const LiveBundleVector& bundles); + + MOZ_MUST_USE bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict); + + MOZ_MUST_USE bool splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions); + MOZ_MUST_USE bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success); + MOZ_MUST_USE bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, + bool* success); + MOZ_MUST_USE bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, + bool* success); + MOZ_MUST_USE bool splitAcrossCalls(LiveBundle* bundle); + + bool compilingWasm() { + return mir->info().compilingWasm(); + } + + void dumpVregs(); +}; + +} // namespace jit +} // namespace js + +#endif /* jit_BacktrackingAllocator_h */ diff --git a/js/src/jit/Bailouts.cpp b/js/src/jit/Bailouts.cpp new file mode 100644 index 000000000..d5172c6a3 --- /dev/null +++ b/js/src/jit/Bailouts.cpp @@ -0,0 +1,314 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/Bailouts.h" + +#include "mozilla/ScopeExit.h" + +#include "jscntxt.h" + +#include "jit/BaselineJIT.h" +#include "jit/Ion.h" +#include "jit/JitCompartment.h" +#include "jit/JitSpewer.h" +#include "jit/Snapshots.h" +#include "vm/TraceLogging.h" + +#include "jit/JitFrameIterator-inl.h" +#include "vm/Probes-inl.h" +#include "vm/Stack-inl.h" + +using namespace js; +using namespace js::jit; + +using mozilla::IsInRange; + +uint32_t +jit::Bailout(BailoutStack* sp, BaselineBailoutInfo** bailoutInfo) +{ + JSContext* cx = GetJSContextFromMainThread(); + MOZ_ASSERT(bailoutInfo); + + // We don't have an exit frame. + MOZ_ASSERT(IsInRange(FAKE_JIT_TOP_FOR_BAILOUT, 0, 0x1000) && + IsInRange(FAKE_JIT_TOP_FOR_BAILOUT + sizeof(CommonFrameLayout), 0, 0x1000), + "Fake jitTop pointer should be within the first page."); + cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT; + + JitActivationIterator jitActivations(cx->runtime()); + BailoutFrameInfo bailoutData(jitActivations, sp); + JitFrameIterator iter(jitActivations); + MOZ_ASSERT(!iter.ionScript()->invalidated()); + CommonFrameLayout* currentFramePtr = iter.current(); + + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + TraceLogTimestamp(logger, TraceLogger_Bailout); + + JitSpew(JitSpew_IonBailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset()); + + MOZ_ASSERT(IsBaselineEnabled(cx)); + + *bailoutInfo = nullptr; + uint32_t retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, false, bailoutInfo, + /* excInfo = */ nullptr); + MOZ_ASSERT(retval == BAILOUT_RETURN_OK || + retval == BAILOUT_RETURN_FATAL_ERROR || + retval == BAILOUT_RETURN_OVERRECURSED); + MOZ_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != nullptr); + + if (retval != BAILOUT_RETURN_OK) { + JSScript* script = iter.script(); + probes::ExitScript(cx, script, script->functionNonDelazifying(), + /* popSPSFrame = */ false); + } + + // This condition was wrong when we entered this bailout function, but it + // might be true now. A GC might have reclaimed all the Jit code and + // invalidated all frames which are currently on the stack. As we are + // already in a bailout, we could not switch to an invalidation + // bailout. When the code of an IonScript which is on the stack is + // invalidated (see InvalidateActivation), we remove references to it and + // increment the reference counter for each activation that appear on the + // stack. As the bailed frame is one of them, we have to decrement it now. + if (iter.ionScript()->invalidated()) + iter.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp()); + + // NB: Commentary on how |lastProfilingFrame| is set from bailouts. + // + // Once we return to jitcode, any following frames might get clobbered, + // but the current frame will not (as it will be clobbered "in-place" + // with a baseline frame that will share the same frame prefix). + // However, there may be multiple baseline frames unpacked from this + // single Ion frame, which means we will need to once again reset + // |lastProfilingFrame| to point to the correct unpacked last frame + // in |FinishBailoutToBaseline|. + // + // In the case of error, the jitcode will jump immediately to an + // exception handler, which will unwind the frames and properly set + // the |lastProfilingFrame| to point to the frame being resumed into + // (see |AutoResetLastProfilerFrameOnReturnFromException|). + // + // In both cases, we want to temporarily set the |lastProfilingFrame| + // to the current frame being bailed out, and then fix it up later. + if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) + cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr); + + return retval; +} + +uint32_t +jit::InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut, + BaselineBailoutInfo** bailoutInfo) +{ + sp->checkInvariants(); + + JSContext* cx = GetJSContextFromMainThread(); + + // We don't have an exit frame. + cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT; + + JitActivationIterator jitActivations(cx->runtime()); + BailoutFrameInfo bailoutData(jitActivations, sp); + JitFrameIterator iter(jitActivations); + CommonFrameLayout* currentFramePtr = iter.current(); + + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + TraceLogTimestamp(logger, TraceLogger_Invalidation); + + JitSpew(JitSpew_IonBailouts, "Took invalidation bailout! Snapshot offset: %d", iter.snapshotOffset()); + + // Note: the frame size must be computed before we return from this function. + *frameSizeOut = iter.frameSize(); + + MOZ_ASSERT(IsBaselineEnabled(cx)); + + *bailoutInfo = nullptr; + uint32_t retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, true, bailoutInfo, + /* excInfo = */ nullptr); + MOZ_ASSERT(retval == BAILOUT_RETURN_OK || + retval == BAILOUT_RETURN_FATAL_ERROR || + retval == BAILOUT_RETURN_OVERRECURSED); + MOZ_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != nullptr); + + if (retval != BAILOUT_RETURN_OK) { + // If the bailout failed, then bailout trampoline will pop the + // current frame and jump straight to exception handling code when + // this function returns. Any SPS entry pushed for this frame will + // be silently forgotten. + // + // We call ExitScript here to ensure that if the ionScript had SPS + // instrumentation, then the SPS entry for it is popped. + // + // However, if the bailout was during argument check, then a + // pseudostack frame would not have been pushed in the first + // place, so don't pop anything in that case. + JSScript* script = iter.script(); + probes::ExitScript(cx, script, script->functionNonDelazifying(), + /* popSPSFrame = */ false); + +#ifdef JS_JITSPEW + JitFrameLayout* frame = iter.jsFrame(); + JitSpew(JitSpew_IonInvalidate, "Bailout failed (%s)", + (retval == BAILOUT_RETURN_FATAL_ERROR) ? "Fatal Error" : "Over Recursion"); + JitSpew(JitSpew_IonInvalidate, " calleeToken %p", (void*) frame->calleeToken()); + JitSpew(JitSpew_IonInvalidate, " frameSize %u", unsigned(frame->prevFrameLocalSize())); + JitSpew(JitSpew_IonInvalidate, " ra %p", (void*) frame->returnAddress()); +#endif + } + + iter.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp()); + + // Make the frame being bailed out the top profiled frame. + if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) + cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr); + + return retval; +} + +BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, + const JitFrameIterator& frame) + : machine_(frame.machineState()) +{ + framePointer_ = (uint8_t*) frame.fp(); + topFrameSize_ = frame.frameSize(); + topIonScript_ = frame.ionScript(); + attachOnJitActivation(activations); + + const OsiIndex* osiIndex = frame.osiIndex(); + snapshotOffset_ = osiIndex->snapshotOffset(); +} + +uint32_t +jit::ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame, + ResumeFromException* rfe, + const ExceptionBailoutInfo& excInfo, + bool* overrecursed) +{ + // We can be propagating debug mode exceptions without there being an + // actual exception pending. For instance, when we return false from an + // operation callback like a timeout handler. + MOZ_ASSERT_IF(!excInfo.propagatingIonExceptionForDebugMode(), cx->isExceptionPending()); + + uint8_t* prevJitTop = cx->runtime()->jitTop; + auto restoreJitTop = mozilla::MakeScopeExit([&]() { cx->runtime()->jitTop = prevJitTop; }); + cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT; + + gc::AutoSuppressGC suppress(cx); + + JitActivationIterator jitActivations(cx->runtime()); + BailoutFrameInfo bailoutData(jitActivations, frame.frame()); + JitFrameIterator iter(jitActivations); + CommonFrameLayout* currentFramePtr = iter.current(); + + BaselineBailoutInfo* bailoutInfo = nullptr; + uint32_t retval; + + { + // Currently we do not tolerate OOM here so as not to complicate the + // exception handling code further. + AutoEnterOOMUnsafeRegion oomUnsafe; + + retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, true, + &bailoutInfo, &excInfo); + if (retval == BAILOUT_RETURN_FATAL_ERROR && cx->isThrowingOutOfMemory()) + oomUnsafe.crash("ExceptionHandlerBailout"); + } + + if (retval == BAILOUT_RETURN_OK) { + MOZ_ASSERT(bailoutInfo); + + // Overwrite the kind so HandleException after the bailout returns + // false, jumping directly to the exception tail. + if (excInfo.propagatingIonExceptionForDebugMode()) + bailoutInfo->bailoutKind = Bailout_IonExceptionDebugMode; + + rfe->kind = ResumeFromException::RESUME_BAILOUT; + rfe->target = cx->runtime()->jitRuntime()->getBailoutTail()->raw(); + rfe->bailoutInfo = bailoutInfo; + } else { + // Bailout failed. If the overrecursion check failed, clear the + // exception to turn this into an uncatchable error, continue popping + // all inline frames and have the caller report the error. + MOZ_ASSERT(!bailoutInfo); + + if (retval == BAILOUT_RETURN_OVERRECURSED) { + *overrecursed = true; + if (!excInfo.propagatingIonExceptionForDebugMode()) + cx->clearPendingException(); + } else { + MOZ_ASSERT(retval == BAILOUT_RETURN_FATAL_ERROR); + + // Crash for now so as not to complicate the exception handling code + // further. + MOZ_CRASH(); + } + } + + // Make the frame being bailed out the top profiled frame. + if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) + cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr); + + return retval; +} + +// Initialize the decl env Object, call object, and any arguments obj of the +// current frame. +bool +jit::EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp) +{ + // Ion does not compile eval scripts. + MOZ_ASSERT(!fp.isEvalFrame()); + + if (fp.isFunctionFrame()) { + // Ion does not handle extra var environments due to parameter + // expressions yet. + MOZ_ASSERT(!fp.callee()->needsExtraBodyVarEnvironment()); + + if (!fp.hasInitialEnvironment() && fp.callee()->needsFunctionEnvironmentObjects()) { + if (!fp.initFunctionEnvironmentObjects(cx)) + return false; + } + } + + return true; +} + +void +jit::CheckFrequentBailouts(JSContext* cx, JSScript* script, BailoutKind bailoutKind) +{ + if (script->hasIonScript()) { + // Invalidate if this script keeps bailing out without invalidation. Next time + // we compile this script LICM will be disabled. + IonScript* ionScript = script->ionScript(); + + if (ionScript->bailoutExpected()) { + // If we bailout because of the first execution of a basic block, + // then we should record which basic block we are returning in, + // which should prevent this from happening again. Also note that + // the first execution bailout can be related to an inlined script, + // so there is no need to penalize the caller. + if (bailoutKind != Bailout_FirstExecution && !script->hadFrequentBailouts()) + script->setHadFrequentBailouts(); + + JitSpew(JitSpew_IonInvalidate, "Invalidating due to too many bailouts"); + + Invalidate(cx, script); + } + } +} + +void +BailoutFrameInfo::attachOnJitActivation(const JitActivationIterator& jitActivations) +{ + MOZ_ASSERT(jitActivations.jitTop() == FAKE_JIT_TOP_FOR_BAILOUT); + activation_ = jitActivations->asJit(); + activation_->setBailoutData(this); +} + +BailoutFrameInfo::~BailoutFrameInfo() +{ + activation_->cleanBailoutData(); +} diff --git a/js/src/jit/Bailouts.h b/js/src/jit/Bailouts.h new file mode 100644 index 000000000..747f59b7d --- /dev/null +++ b/js/src/jit/Bailouts.h @@ -0,0 +1,219 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_Bailouts_h +#define jit_Bailouts_h + +#include "jstypes.h" + +#include "jit/JitFrameIterator.h" +#include "jit/JitFrames.h" +#include "vm/Stack.h" + +namespace js { +namespace jit { + +// A "bailout" is a condition in which we need to recover an interpreter frame +// from an IonFrame. Bailouts can happen for the following reasons: +// (1) A deoptimization guard, for example, an add overflows or a type check +// fails. +// (2) A check or assumption held by the JIT is invalidated by the VM, and +// JIT code must be thrown away. This includes the GC possibly deciding +// to evict live JIT code, or a Type Inference reflow. +// +// Note that bailouts as described here do not include normal Ion frame +// inspection, for example, if an exception must be built or the GC needs to +// scan an Ion frame for gcthings. +// +// The second type of bailout needs a different name - "deoptimization" or +// "deep bailout". Here we are concerned with eager (or maybe "shallow") +// bailouts, that happen from JIT code. These happen from guards, like: +// +// cmp [obj + shape], 0x50M37TH1NG +// jmp _bailout +// +// The bailout target needs to somehow translate the Ion frame (whose state +// will differ at each program point) to an interpreter frame. This state is +// captured into the IonScript's snapshot buffer, and for each bailout we know +// which snapshot corresponds to its state. +// +// Roughly, the following needs to happen at the bailout target. +// (1) Move snapshot ID into a known stack location (registers cannot be +// mutated). +// (2) Spill all registers to the stack. +// (3) Call a Bailout() routine, whose argument is the stack pointer. +// (4) Bailout() will find the IonScript on the stack, use the snapshot ID +// to find the structure of the frame, and then use the stack and spilled +// registers to perform frame conversion. +// (5) Bailout() returns, and the JIT must immediately return to the +// interpreter (all frames are converted at once). +// +// (2) and (3) are implemented by a trampoline held in the compartment. +// Naively, we could implement (1) like: +// +// _bailout_ID_1: +// push 1 +// jmp _global_bailout_handler +// _bailout_ID_2: +// push 2 +// jmp _global_bailout_handler +// +// This takes about 10 extra bytes per guard. On some platforms, we can reduce +// this overhead to 4 bytes by creating a global jump table, shared again in +// the compartment: +// +// call _global_bailout_handler +// call _global_bailout_handler +// call _global_bailout_handler +// call _global_bailout_handler +// ... +// _global_bailout_handler: +// +// In the bailout handler, we can recompute which entry in the table was +// selected by subtracting the return addressed pushed by the call, from the +// start of the table, and then dividing by the size of a (call X) entry in the +// table. This gives us a number in [0, TableSize), which we call a +// "BailoutId". +// +// Then, we can provide a per-script mapping from BailoutIds to snapshots, +// which takes only four bytes per entry. +// +// This strategy does not work as given, because the bailout handler has no way +// to compute the location of an IonScript. Currently, we do not use frame +// pointers. To account for this we segregate frames into a limited set of +// "frame sizes", and create a table for each frame size. We also have the +// option of not using bailout tables, for platforms or situations where the +// 10 byte cost is more optimal than a bailout table. See JitFrames.h for more +// detail. + +static const BailoutId INVALID_BAILOUT_ID = BailoutId(-1); + +// Keep this arbitrarily small for now, for testing. +static const uint32_t BAILOUT_TABLE_SIZE = 16; + +// Bailout return codes. +// N.B. the relative order of these values is hard-coded into ::GenerateBailoutThunk. +static const uint32_t BAILOUT_RETURN_OK = 0; +static const uint32_t BAILOUT_RETURN_FATAL_ERROR = 1; +static const uint32_t BAILOUT_RETURN_OVERRECURSED = 2; + +// This address is a magic number made to cause crashes while indicating that we +// are making an attempt to mark the stack during a bailout. +static uint8_t * const FAKE_JIT_TOP_FOR_BAILOUT = reinterpret_cast(0xba1); + +// BailoutStack is an architecture specific pointer to the stack, given by the +// bailout handler. +class BailoutStack; +class InvalidationBailoutStack; + +// Must be implemented by each architecture. + +// This structure is constructed before recovering the baseline frames for a +// bailout. It records all information extracted from the stack, and which are +// needed for the JitFrameIterator. +class BailoutFrameInfo +{ + MachineState machine_; + uint8_t* framePointer_; + size_t topFrameSize_; + IonScript* topIonScript_; + uint32_t snapshotOffset_; + JitActivation* activation_; + + void attachOnJitActivation(const JitActivationIterator& activations); + + public: + BailoutFrameInfo(const JitActivationIterator& activations, BailoutStack* sp); + BailoutFrameInfo(const JitActivationIterator& activations, InvalidationBailoutStack* sp); + BailoutFrameInfo(const JitActivationIterator& activations, const JitFrameIterator& frame); + ~BailoutFrameInfo(); + + uint8_t* fp() const { + return framePointer_; + } + SnapshotOffset snapshotOffset() const { + return snapshotOffset_; + } + const MachineState* machineState() const { + return &machine_; + } + size_t topFrameSize() const { + return topFrameSize_; + } + IonScript* ionScript() const { + return topIonScript_; + } + JitActivation* activation() const { + return activation_; + } +}; + +MOZ_MUST_USE bool EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp); + +struct BaselineBailoutInfo; + +// Called from a bailout thunk. Returns a BAILOUT_* error code. +uint32_t Bailout(BailoutStack* sp, BaselineBailoutInfo** info); + +// Called from the invalidation thunk. Returns a BAILOUT_* error code. +uint32_t InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut, + BaselineBailoutInfo** info); + +class ExceptionBailoutInfo +{ + size_t frameNo_; + jsbytecode* resumePC_; + size_t numExprSlots_; + + public: + ExceptionBailoutInfo(size_t frameNo, jsbytecode* resumePC, size_t numExprSlots) + : frameNo_(frameNo), + resumePC_(resumePC), + numExprSlots_(numExprSlots) + { } + + ExceptionBailoutInfo() + : frameNo_(0), + resumePC_(nullptr), + numExprSlots_(0) + { } + + bool catchingException() const { + return !!resumePC_; + } + bool propagatingIonExceptionForDebugMode() const { + return !resumePC_; + } + + size_t frameNo() const { + MOZ_ASSERT(catchingException()); + return frameNo_; + } + jsbytecode* resumePC() const { + MOZ_ASSERT(catchingException()); + return resumePC_; + } + size_t numExprSlots() const { + MOZ_ASSERT(catchingException()); + return numExprSlots_; + } +}; + +// Called from the exception handler to enter a catch or finally block. +// Returns a BAILOUT_* error code. +uint32_t ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame, + ResumeFromException* rfe, + const ExceptionBailoutInfo& excInfo, + bool* overrecursed); + +uint32_t FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo); + +void CheckFrequentBailouts(JSContext* cx, JSScript* script, BailoutKind bailoutKind); + +} // namespace jit +} // namespace js + +#endif /* jit_Bailouts_h */ diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp new file mode 100644 index 000000000..8fc8a522d --- /dev/null +++ b/js/src/jit/BaselineBailouts.cpp @@ -0,0 +1,1999 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/ScopeExit.h" +#include "mozilla/SizePrintfMacros.h" + +#include "jsprf.h" +#include "jsutil.h" +#include "jit/arm/Simulator-arm.h" +#include "jit/BaselineIC.h" +#include "jit/BaselineJIT.h" +#include "jit/CompileInfo.h" +#include "jit/JitSpewer.h" +#include "jit/mips32/Simulator-mips32.h" +#include "jit/mips64/Simulator-mips64.h" +#include "jit/Recover.h" +#include "jit/RematerializedFrame.h" + +#include "vm/ArgumentsObject.h" +#include "vm/Debugger.h" +#include "vm/TraceLogging.h" + +#include "jsscriptinlines.h" + +#include "jit/JitFrames-inl.h" + +using namespace js; +using namespace js::jit; + +// BaselineStackBuilder may reallocate its buffer if the current one is too +// small. To avoid dangling pointers, BufferPointer represents a pointer into +// this buffer as a pointer to the header and a fixed offset. +template +class BufferPointer +{ + BaselineBailoutInfo** header_; + size_t offset_; + bool heap_; + + public: + BufferPointer(BaselineBailoutInfo** header, size_t offset, bool heap) + : header_(header), offset_(offset), heap_(heap) + { } + + T* get() const { + BaselineBailoutInfo* header = *header_; + if (!heap_) + return (T*)(header->incomingStack + offset_); + + uint8_t* p = header->copyStackTop - offset_; + MOZ_ASSERT(p >= header->copyStackBottom && p < header->copyStackTop); + return (T*)p; + } + + void set(const T& value) { + *get() = value; + } + + // Note: we return a copy instead of a reference, to avoid potential memory + // safety hazards when the underlying buffer gets resized. + const T operator*() const { return *get(); } + T* operator->() const { return get(); } +}; + +/** + * BaselineStackBuilder helps abstract the process of rebuilding the C stack on the heap. + * It takes a bailout iterator and keeps track of the point on the C stack from which + * the reconstructed frames will be written. + * + * It exposes methods to write data into the heap memory storing the reconstructed + * stack. It also exposes method to easily calculate addresses. This includes both the + * virtual address that a particular value will be at when it's eventually copied onto + * the stack, as well as the current actual address of that value (whether on the heap + * allocated portion being constructed or the existing stack). + * + * The abstraction handles transparent re-allocation of the heap memory when it + * needs to be enlarged to accommodate new data. Similarly to the C stack, the + * data that's written to the reconstructed stack grows from high to low in memory. + * + * The lowest region of the allocated memory contains a BaselineBailoutInfo structure that + * points to the start and end of the written data. + */ +struct BaselineStackBuilder +{ + JitFrameIterator& iter_; + JitFrameLayout* frame_; + + static size_t HeaderSize() { + return AlignBytes(sizeof(BaselineBailoutInfo), sizeof(void*)); + } + size_t bufferTotal_; + size_t bufferAvail_; + size_t bufferUsed_; + uint8_t* buffer_; + BaselineBailoutInfo* header_; + + size_t framePushed_; + + BaselineStackBuilder(JitFrameIterator& iter, size_t initialSize) + : iter_(iter), + frame_(static_cast(iter.current())), + bufferTotal_(initialSize), + bufferAvail_(0), + bufferUsed_(0), + buffer_(nullptr), + header_(nullptr), + framePushed_(0) + { + MOZ_ASSERT(bufferTotal_ >= HeaderSize()); + MOZ_ASSERT(iter.isBailoutJS()); + } + + ~BaselineStackBuilder() { + js_free(buffer_); + } + + MOZ_MUST_USE bool init() { + MOZ_ASSERT(!buffer_); + MOZ_ASSERT(bufferUsed_ == 0); + buffer_ = reinterpret_cast(js_calloc(bufferTotal_)); + if (!buffer_) + return false; + bufferAvail_ = bufferTotal_ - HeaderSize(); + bufferUsed_ = 0; + + header_ = reinterpret_cast(buffer_); + header_->incomingStack = reinterpret_cast(frame_); + header_->copyStackTop = buffer_ + bufferTotal_; + header_->copyStackBottom = header_->copyStackTop; + header_->setR0 = 0; + header_->valueR0 = UndefinedValue(); + header_->setR1 = 0; + header_->valueR1 = UndefinedValue(); + header_->resumeFramePtr = nullptr; + header_->resumeAddr = nullptr; + header_->resumePC = nullptr; + header_->monitorStub = nullptr; + header_->numFrames = 0; + header_->checkGlobalDeclarationConflicts = false; + return true; + } + + MOZ_MUST_USE bool enlarge() { + MOZ_ASSERT(buffer_ != nullptr); + if (bufferTotal_ & mozilla::tl::MulOverflowMask<2>::value) + return false; + size_t newSize = bufferTotal_ * 2; + uint8_t* newBuffer = reinterpret_cast(js_calloc(newSize)); + if (!newBuffer) + return false; + memcpy((newBuffer + newSize) - bufferUsed_, header_->copyStackBottom, bufferUsed_); + memcpy(newBuffer, header_, sizeof(BaselineBailoutInfo)); + js_free(buffer_); + buffer_ = newBuffer; + bufferTotal_ = newSize; + bufferAvail_ = newSize - (HeaderSize() + bufferUsed_); + + header_ = reinterpret_cast(buffer_); + header_->copyStackTop = buffer_ + bufferTotal_; + header_->copyStackBottom = header_->copyStackTop - bufferUsed_; + return true; + } + + BaselineBailoutInfo* info() { + MOZ_ASSERT(header_ == reinterpret_cast(buffer_)); + return header_; + } + + BaselineBailoutInfo* takeBuffer() { + MOZ_ASSERT(header_ == reinterpret_cast(buffer_)); + buffer_ = nullptr; + return header_; + } + + void resetFramePushed() { + framePushed_ = 0; + } + + size_t framePushed() const { + return framePushed_; + } + + MOZ_MUST_USE bool subtract(size_t size, const char* info = nullptr) { + // enlarge the buffer if need be. + while (size > bufferAvail_) { + if (!enlarge()) + return false; + } + + // write out element. + header_->copyStackBottom -= size; + bufferAvail_ -= size; + bufferUsed_ += size; + framePushed_ += size; + if (info) { + JitSpew(JitSpew_BaselineBailouts, + " SUB_%03d %p/%p %-15s", + (int) size, header_->copyStackBottom, virtualPointerAtStackOffset(0), info); + } + return true; + } + + template + MOZ_MUST_USE bool write(const T& t) { + MOZ_ASSERT(!(uintptr_t(&t) >= uintptr_t(header_->copyStackBottom) && + uintptr_t(&t) < uintptr_t(header_->copyStackTop)), + "Should not reference memory that can be freed"); + if (!subtract(sizeof(T))) + return false; + memcpy(header_->copyStackBottom, &t, sizeof(T)); + return true; + } + + template + MOZ_MUST_USE bool writePtr(T* t, const char* info) { + if (!write(t)) + return false; + if (info) + JitSpew(JitSpew_BaselineBailouts, + " WRITE_PTR %p/%p %-15s %p", + header_->copyStackBottom, virtualPointerAtStackOffset(0), info, t); + return true; + } + + MOZ_MUST_USE bool writeWord(size_t w, const char* info) { + if (!write(w)) + return false; + if (info) { + if (sizeof(size_t) == 4) { + JitSpew(JitSpew_BaselineBailouts, + " WRITE_WRD %p/%p %-15s %08" PRIxSIZE, + header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w); + } else { + JitSpew(JitSpew_BaselineBailouts, + " WRITE_WRD %p/%p %-15s %016" PRIxSIZE, + header_->copyStackBottom, virtualPointerAtStackOffset(0), info, w); + } + } + return true; + } + + MOZ_MUST_USE bool writeValue(const Value& val, const char* info) { + if (!write(val)) + return false; + if (info) { + JitSpew(JitSpew_BaselineBailouts, + " WRITE_VAL %p/%p %-15s %016" PRIx64, + header_->copyStackBottom, virtualPointerAtStackOffset(0), info, + *((uint64_t*) &val)); + } + return true; + } + + MOZ_MUST_USE bool maybeWritePadding(size_t alignment, size_t after, const char* info) { + MOZ_ASSERT(framePushed_ % sizeof(Value) == 0); + MOZ_ASSERT(after % sizeof(Value) == 0); + size_t offset = ComputeByteAlignment(after, alignment); + while (framePushed_ % alignment != offset) { + if (!writeValue(MagicValue(JS_ARG_POISON), info)) + return false; + } + + return true; + } + + Value popValue() { + MOZ_ASSERT(bufferUsed_ >= sizeof(Value)); + MOZ_ASSERT(framePushed_ >= sizeof(Value)); + bufferAvail_ += sizeof(Value); + bufferUsed_ -= sizeof(Value); + framePushed_ -= sizeof(Value); + Value result = *((Value*) header_->copyStackBottom); + header_->copyStackBottom += sizeof(Value); + return result; + } + + void popValueInto(PCMappingSlotInfo::SlotLocation loc) { + MOZ_ASSERT(PCMappingSlotInfo::ValidSlotLocation(loc)); + switch(loc) { + case PCMappingSlotInfo::SlotInR0: + header_->setR0 = 1; + header_->valueR0 = popValue(); + break; + case PCMappingSlotInfo::SlotInR1: + header_->setR1 = 1; + header_->valueR1 = popValue(); + break; + default: + MOZ_ASSERT(loc == PCMappingSlotInfo::SlotIgnore); + popValue(); + break; + } + } + + void setResumeFramePtr(void* resumeFramePtr) { + header_->resumeFramePtr = resumeFramePtr; + } + + void setResumeAddr(void* resumeAddr) { + header_->resumeAddr = resumeAddr; + } + + void setResumePC(jsbytecode* pc) { + header_->resumePC = pc; + } + + void setMonitorStub(ICStub* stub) { + header_->monitorStub = stub; + } + + template + BufferPointer pointerAtStackOffset(size_t offset) { + if (offset < bufferUsed_) { + // Calculate offset from copyStackTop. + offset = header_->copyStackTop - (header_->copyStackBottom + offset); + return BufferPointer(&header_, offset, /* heap = */ true); + } + + return BufferPointer(&header_, offset - bufferUsed_, /* heap = */ false); + } + + BufferPointer valuePointerAtStackOffset(size_t offset) { + return pointerAtStackOffset(offset); + } + + inline uint8_t* virtualPointerAtStackOffset(size_t offset) { + if (offset < bufferUsed_) + return reinterpret_cast(frame_) - (bufferUsed_ - offset); + return reinterpret_cast(frame_) + (offset - bufferUsed_); + } + + inline JitFrameLayout* startFrame() { + return frame_; + } + + BufferPointer topFrameAddress() { + return pointerAtStackOffset(0); + } + + // + // This method should only be called when the builder is in a state where it is + // starting to construct the stack frame for the next callee. This means that + // the lowest value on the constructed stack is the return address for the previous + // caller frame. + // + // This method is used to compute the value of the frame pointer (e.g. ebp on x86) + // that would have been saved by the baseline jitcode when it was entered. In some + // cases, this value can be bogus since we can ensure that the caller would have saved + // it anyway. + // + void* calculatePrevFramePtr() { + // Get the incoming frame. + BufferPointer topFrame = topFrameAddress(); + FrameType type = topFrame->prevType(); + + // For IonJS, IonAccessorIC and Entry frames, the "saved" frame pointer + // in the baseline frame is meaningless, since Ion saves all registers + // before calling other ion frames, and the entry frame saves all + // registers too. + if (type == JitFrame_IonJS || type == JitFrame_Entry || type == JitFrame_IonAccessorIC) + return nullptr; + + // BaselineStub - Baseline calling into Ion. + // PrevFramePtr needs to point to the BaselineStubFrame's saved frame pointer. + // STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE + // - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr() + if (type == JitFrame_BaselineStub) { + size_t offset = JitFrameLayout::Size() + topFrame->prevFrameLocalSize() + + BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr(); + return virtualPointerAtStackOffset(offset); + } + + MOZ_ASSERT(type == JitFrame_Rectifier); + // Rectifier - behaviour depends on the frame preceding the rectifier frame, and + // whether the arch is x86 or not. The x86 rectifier frame saves the frame pointer, + // so we can calculate it directly. For other archs, the previous frame pointer + // is stored on the stack in the frame that precedes the rectifier frame. + size_t priorOffset = JitFrameLayout::Size() + topFrame->prevFrameLocalSize(); +#if defined(JS_CODEGEN_X86) + // On X86, the FramePointer is pushed as the first value in the Rectifier frame. + MOZ_ASSERT(BaselineFrameReg == FramePointer); + priorOffset -= sizeof(void*); + return virtualPointerAtStackOffset(priorOffset); +#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ + defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ + defined(JS_CODEGEN_X64) + // On X64, ARM, ARM64, and MIPS, the frame pointer save location depends on + // the caller of the rectifier frame. + BufferPointer priorFrame = + pointerAtStackOffset(priorOffset); + FrameType priorType = priorFrame->prevType(); + MOZ_ASSERT(priorType == JitFrame_IonJS || priorType == JitFrame_BaselineStub); + + // If the frame preceding the rectifier is an IonJS frame, then once again + // the frame pointer does not matter. + if (priorType == JitFrame_IonJS) + return nullptr; + + // Otherwise, the frame preceding the rectifier is a BaselineStub frame. + // let X = STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE + // X + RectifierFrameLayout::Size() + // + ((RectifierFrameLayout*) X)->prevFrameLocalSize() + // - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr() + size_t extraOffset = RectifierFrameLayout::Size() + priorFrame->prevFrameLocalSize() + + BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr(); + return virtualPointerAtStackOffset(priorOffset + extraOffset); +#elif defined(JS_CODEGEN_NONE) + MOZ_CRASH(); +#else +# error "Bad architecture!" +#endif + } + + void setCheckGlobalDeclarationConflicts() { + header_->checkGlobalDeclarationConflicts = true; + } +}; + +// Ensure that all value locations are readable from the SnapshotIterator. +// Remove RInstructionResults from the JitActivation if the frame got recovered +// ahead of the bailout. +class SnapshotIteratorForBailout : public SnapshotIterator +{ + JitActivation* activation_; + JitFrameIterator& iter_; + + public: + SnapshotIteratorForBailout(JitActivation* activation, JitFrameIterator& iter) + : SnapshotIterator(iter, activation->bailoutData()->machineState()), + activation_(activation), + iter_(iter) + { + MOZ_ASSERT(iter.isBailoutJS()); + } + + ~SnapshotIteratorForBailout() { + // The bailout is complete, we no longer need the recover instruction + // results. + activation_->removeIonFrameRecovery(fp_); + } + + // Take previously computed result out of the activation, or compute the + // results of all recover instructions contained in the snapshot. + MOZ_MUST_USE bool init(JSContext* cx) { + + // Under a bailout, there is no need to invalidate the frame after + // evaluating the recover instruction, as the invalidation is only + // needed to cause of the frame which has been introspected. + MaybeReadFallback recoverBailout(cx, activation_, &iter_, MaybeReadFallback::Fallback_DoNothing); + return initInstructionResults(recoverBailout); + } +}; + +#ifdef DEBUG +static inline bool +IsInlinableFallback(ICFallbackStub* icEntry) +{ + return icEntry->isCall_Fallback() || icEntry->isGetProp_Fallback() || + icEntry->isSetProp_Fallback(); +} +#endif + +static inline void* +GetStubReturnAddress(JSContext* cx, jsbytecode* pc) +{ + if (IsGetPropPC(pc)) + return cx->compartment()->jitCompartment()->baselineGetPropReturnAddr(); + if (IsSetPropPC(pc)) + return cx->compartment()->jitCompartment()->baselineSetPropReturnAddr(); + // This should be a call op of some kind, now. + MOZ_ASSERT(IsCallPC(pc)); + return cx->compartment()->jitCompartment()->baselineCallReturnAddr(JSOp(*pc) == JSOP_NEW); +} + +static inline jsbytecode* +GetNextNonLoopEntryPc(jsbytecode* pc) +{ + JSOp op = JSOp(*pc); + if (op == JSOP_GOTO) + return pc + GET_JUMP_OFFSET(pc); + if (op == JSOP_LOOPENTRY || op == JSOP_NOP || op == JSOP_LOOPHEAD) + return GetNextPc(pc); + return pc; +} + +static bool +HasLiveIteratorAtStackDepth(JSScript* script, jsbytecode* pc, uint32_t stackDepth) +{ + if (!script->hasTrynotes()) + return false; + + JSTryNote* tn = script->trynotes()->vector; + JSTryNote* tnEnd = tn + script->trynotes()->length; + uint32_t pcOffset = uint32_t(pc - script->main()); + for (; tn != tnEnd; ++tn) { + if (pcOffset < tn->start) + continue; + if (pcOffset >= tn->start + tn->length) + continue; + + // For-in loops have only the iterator on stack. + if (tn->kind == JSTRY_FOR_IN && stackDepth == tn->stackDepth) + return true; + + // For-of loops have both the iterator and the result object on + // stack. The iterator is below the result object. + if (tn->kind == JSTRY_FOR_OF && stackDepth == tn->stackDepth - 1) + return true; + } + + return false; +} + +static bool +IsPrologueBailout(const SnapshotIterator& iter, const ExceptionBailoutInfo* excInfo) +{ + // If we are propagating an exception for debug mode, we will not resume + // into baseline code, but instead into HandleExceptionBaseline (i.e., + // never before the prologue). + return iter.pcOffset() == 0 && !iter.resumeAfter() && + (!excInfo || !excInfo->propagatingIonExceptionForDebugMode()); +} + +// For every inline frame, we write out the following data: +// +// | ... | +// +---------------+ +// | Descr(???) | --- Descr size here is (PREV_FRAME_SIZE) +// +---------------+ +// | ReturnAddr | +// -- +===============+ --- OVERWRITE STARTS HERE (START_STACK_ADDR) +// | | PrevFramePtr | +// | +-> +---------------+ +// | | | Baseline | +// | | | Frame | +// | | +---------------+ +// | | | Fixed0 | +// | | +---------------+ +// +--< | | ... | +// | | | +---------------+ +// | | | | FixedF | +// | | | +---------------+ +// | | | | Stack0 | +// | | | +---------------+ +// | | | | ... | +// | | | +---------------+ +// | | | | StackS | +// | -- | +---------------+ --- IF NOT LAST INLINE FRAME, +// +------------| Descr(BLJS) | --- CALLING INFO STARTS HERE +// | +---------------+ +// | | ReturnAddr | <-- return into main jitcode after IC +// -- | +===============+ +// | | | StubPtr | +// | | +---------------+ +// | +---| FramePtr | +// | +---------------+ --- The inlined frame might OSR in Ion +// | | Padding? | --- Thus the return address should be aligned. +// | +---------------+ +// +--< | ArgA | +// | | +---------------+ +// | | | ... | +// | | +---------------+ +// | | | Arg0 | +// | | +---------------+ +// | | | ThisV | +// | -- +---------------+ +// | | ActualArgC | +// | +---------------+ +// | | CalleeToken | +// | +---------------+ +// +------------| Descr(BLStub) | +// +---------------+ +// | ReturnAddr | <-- return into ICCall_Scripted IC +// -- +===============+ --- IF CALLEE FORMAL ARGS > ActualArgC +// | | Padding? | +// | +---------------+ +// | | UndefinedU | +// | +---------------+ +// | | ... | +// | +---------------+ +// | | Undefined0 | +// +--< +---------------+ +// | | | ArgA | +// | | +---------------+ +// | | | ... | +// | | +---------------+ +// | | | Arg0 | +// | | +---------------+ +// | | | ThisV | +// | -- +---------------+ +// | | ActualArgC | +// | +---------------+ +// | | CalleeToken | +// | +---------------+ +// +------------| Descr(Rect) | +// +---------------+ +// | ReturnAddr | <-- return into ArgumentsRectifier after call +// +===============+ +// +static bool +InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC, + HandleFunction fun, HandleScript script, IonScript* ionScript, + SnapshotIterator& iter, bool invalidate, BaselineStackBuilder& builder, + MutableHandle> startFrameFormals, MutableHandleFunction nextCallee, + jsbytecode** callPC, const ExceptionBailoutInfo* excInfo) +{ + // The Baseline frames we will reconstruct on the heap are not rooted, so GC + // must be suppressed here. + MOZ_ASSERT(cx->mainThread().suppressGC); + + MOZ_ASSERT(script->hasBaselineScript()); + + // Are we catching an exception? + bool catchingException = excInfo && excInfo->catchingException(); + + // If we are catching an exception, we are bailing out to a catch or + // finally block and this is the frame where we will resume. Usually the + // expression stack should be empty in this case but there can be + // iterators on the stack. + uint32_t exprStackSlots; + if (catchingException) + exprStackSlots = excInfo->numExprSlots(); + else + exprStackSlots = iter.numAllocations() - (script->nfixed() + CountArgSlots(script, fun)); + + builder.resetFramePushed(); + + // Build first baseline frame: + // +===============+ + // | PrevFramePtr | + // +---------------+ + // | Baseline | + // | Frame | + // +---------------+ + // | Fixed0 | + // +---------------+ + // | ... | + // +---------------+ + // | FixedF | + // +---------------+ + // | Stack0 | + // +---------------+ + // | ... | + // +---------------+ + // | StackS | + // +---------------+ --- IF NOT LAST INLINE FRAME, + // | Descr(BLJS) | --- CALLING INFO STARTS HERE + // +---------------+ + // | ReturnAddr | <-- return into main jitcode after IC + // +===============+ + + JitSpew(JitSpew_BaselineBailouts, " Unpacking %s:%" PRIuSIZE, script->filename(), script->lineno()); + JitSpew(JitSpew_BaselineBailouts, " [BASELINE-JS FRAME]"); + + // Calculate and write the previous frame pointer value. + // Record the virtual stack offset at this location. Later on, if we end up + // writing out a BaselineStub frame for the next callee, we'll need to save the + // address. + void* prevFramePtr = builder.calculatePrevFramePtr(); + if (!builder.writePtr(prevFramePtr, "PrevFramePtr")) + return false; + prevFramePtr = builder.virtualPointerAtStackOffset(0); + + // Write struct BaselineFrame. + if (!builder.subtract(BaselineFrame::Size(), "BaselineFrame")) + return false; + BufferPointer blFrame = builder.pointerAtStackOffset(0); + + uint32_t flags = 0; + + // If we are bailing to a script whose execution is observed, mark the + // baseline frame as a debuggee frame. This is to cover the case where we + // don't rematerialize the Ion frame via the Debugger. + if (script->isDebuggee()) + flags |= BaselineFrame::DEBUGGEE; + + // Initialize BaselineFrame's envChain and argsObj + JSObject* envChain = nullptr; + Value returnValue; + ArgumentsObject* argsObj = nullptr; + BailoutKind bailoutKind = iter.bailoutKind(); + if (bailoutKind == Bailout_ArgumentCheck) { + // Temporary hack -- skip the (unused) envChain, because it could be + // bogus (we can fail before the env chain slot is set). Strip the + // hasEnvironmentChain flag and this will be fixed up later in + // |FinishBailoutToBaseline|, which calls + // |EnsureHasEnvironmentObjects|. + JitSpew(JitSpew_BaselineBailouts, " Bailout_ArgumentCheck! (no valid envChain)"); + iter.skip(); + + // skip |return value| + iter.skip(); + returnValue = UndefinedValue(); + + // Scripts with |argumentsHasVarBinding| have an extra slot. + if (script->argumentsHasVarBinding()) { + JitSpew(JitSpew_BaselineBailouts, + " Bailout_ArgumentCheck for script with argumentsHasVarBinding!" + "Using empty arguments object"); + iter.skip(); + } + } else { + Value v = iter.read(); + if (v.isObject()) { + envChain = &v.toObject(); + if (fun && + ((fun->needsCallObject() && envChain->is()) || + (fun->needsNamedLambdaEnvironment() && + !fun->needsCallObject() && + envChain->is() && + &envChain->as().scope() == + script->maybeNamedLambdaScope()))) + { + MOZ_ASSERT(!fun->needsExtraBodyVarEnvironment()); + flags |= BaselineFrame::HAS_INITIAL_ENV; + } + } else { + MOZ_ASSERT(v.isUndefined() || v.isMagic(JS_OPTIMIZED_OUT)); + + // Get env chain from function or script. + if (fun) { + // If pcOffset == 0, we may have to push a new call object, so + // we leave envChain nullptr and enter baseline code before + // the prologue. + if (!IsPrologueBailout(iter, excInfo)) + envChain = fun->environment(); + } else if (script->module()) { + envChain = script->module()->environment(); + } else { + // For global scripts without a non-syntactic env the env + // chain is the script's global lexical environment (Ion does + // not compile scripts with a non-syntactic global scope). + // Also note that it's invalid to resume into the prologue in + // this case because the prologue expects the env chain in R1 + // for eval and global scripts. + MOZ_ASSERT(!script->isForEval()); + MOZ_ASSERT(!script->hasNonSyntacticScope()); + envChain = &(script->global().lexicalEnvironment()); + + // We have possibly bailed out before Ion could do the global + // declaration conflicts check. Since it's invalid to resume + // into the prologue, set a flag so FinishBailoutToBaseline + // can do the conflict check. + if (IsPrologueBailout(iter, excInfo)) + builder.setCheckGlobalDeclarationConflicts(); + } + } + + // Make sure to add HAS_RVAL to flags here because setFlags() below + // will clobber it. + returnValue = iter.read(); + flags |= BaselineFrame::HAS_RVAL; + + // If script maybe has an arguments object, the third slot will hold it. + if (script->argumentsHasVarBinding()) { + v = iter.read(); + MOZ_ASSERT(v.isObject() || v.isUndefined() || v.isMagic(JS_OPTIMIZED_OUT)); + if (v.isObject()) + argsObj = &v.toObject().as(); + } + } + JitSpew(JitSpew_BaselineBailouts, " EnvChain=%p", envChain); + blFrame->setEnvironmentChain(envChain); + JitSpew(JitSpew_BaselineBailouts, " ReturnValue=%016" PRIx64, *((uint64_t*) &returnValue)); + blFrame->setReturnValue(returnValue); + + // Do not need to initialize scratchValue field in BaselineFrame. + blFrame->setFlags(flags); + + // initArgsObjUnchecked modifies the frame's flags, so call it after setFlags. + if (argsObj) + blFrame->initArgsObjUnchecked(*argsObj); + + if (fun) { + // The unpacked thisv and arguments should overwrite the pushed args present + // in the calling frame. + Value thisv = iter.read(); + JitSpew(JitSpew_BaselineBailouts, " Is function!"); + JitSpew(JitSpew_BaselineBailouts, " thisv=%016" PRIx64, *((uint64_t*) &thisv)); + + size_t thisvOffset = builder.framePushed() + JitFrameLayout::offsetOfThis(); + builder.valuePointerAtStackOffset(thisvOffset).set(thisv); + + MOZ_ASSERT(iter.numAllocations() >= CountArgSlots(script, fun)); + JitSpew(JitSpew_BaselineBailouts, " frame slots %u, nargs %" PRIuSIZE ", nfixed %" PRIuSIZE, + iter.numAllocations(), fun->nargs(), script->nfixed()); + + if (!callerPC) { + // This is the first frame. Store the formals in a Vector until we + // are done. Due to UCE and phi elimination, we could store an + // UndefinedValue() here for formals we think are unused, but + // locals may still reference the original argument slot + // (MParameter/LArgument) and expect the original Value. + MOZ_ASSERT(startFrameFormals.empty()); + if (!startFrameFormals.resize(fun->nargs())) + return false; + } + + for (uint32_t i = 0; i < fun->nargs(); i++) { + Value arg = iter.read(); + JitSpew(JitSpew_BaselineBailouts, " arg %d = %016" PRIx64, + (int) i, *((uint64_t*) &arg)); + if (callerPC) { + size_t argOffset = builder.framePushed() + JitFrameLayout::offsetOfActualArg(i); + builder.valuePointerAtStackOffset(argOffset).set(arg); + } else { + startFrameFormals[i].set(arg); + } + } + } + + for (uint32_t i = 0; i < script->nfixed(); i++) { + Value slot = iter.read(); + if (!builder.writeValue(slot, "FixedValue")) + return false; + } + + // Get the pc. If we are handling an exception, resume at the pc of the + // catch or finally block. + jsbytecode* pc = catchingException ? excInfo->resumePC() : script->offsetToPC(iter.pcOffset()); + bool resumeAfter = catchingException ? false : iter.resumeAfter(); + + // When pgo is enabled, increment the counter of the block in which we + // resume, as Ion does not keep track of the code coverage. + // + // We need to do that when pgo is enabled, as after a specific number of + // FirstExecution bailouts, we invalidate and recompile the script with + // IonMonkey. Failing to increment the counter of the current basic block + // might lead to repeated bailouts and invalidations. + if (!JitOptions.disablePgo && script->hasScriptCounts()) + script->incHitCount(pc); + + JSOp op = JSOp(*pc); + + // Fixup inlined JSOP_FUNCALL, JSOP_FUNAPPLY, and accessors on the caller side. + // On the caller side this must represent like the function wasn't inlined. + uint32_t pushedSlots = 0; + AutoValueVector savedCallerArgs(cx); + bool needToSaveArgs = op == JSOP_FUNAPPLY || IsGetPropPC(pc) || IsSetPropPC(pc); + if (iter.moreFrames() && (op == JSOP_FUNCALL || needToSaveArgs)) + { + uint32_t inlined_args = 0; + if (op == JSOP_FUNCALL) + inlined_args = 2 + GET_ARGC(pc) - 1; + else if (op == JSOP_FUNAPPLY) + inlined_args = 2 + blFrame->numActualArgs(); + else + inlined_args = 2 + IsSetPropPC(pc); + + MOZ_ASSERT(exprStackSlots >= inlined_args); + pushedSlots = exprStackSlots - inlined_args; + + JitSpew(JitSpew_BaselineBailouts, + " pushing %u expression stack slots before fixup", + pushedSlots); + for (uint32_t i = 0; i < pushedSlots; i++) { + Value v = iter.read(); + if (!builder.writeValue(v, "StackValue")) + return false; + } + + if (op == JSOP_FUNCALL) { + // When funcall got inlined and the native js_fun_call was bypassed, + // the stack state is incorrect. To restore correctly it must look like + // js_fun_call was actually called. This means transforming the stack + // from |target, this, args| to |js_fun_call, target, this, args| + // The js_fun_call is never read, so just pushing undefined now. + JitSpew(JitSpew_BaselineBailouts, " pushing undefined to fixup funcall"); + if (!builder.writeValue(UndefinedValue(), "StackValue")) + return false; + } + + if (needToSaveArgs) { + // When an accessor is inlined, the whole thing is a lie. There + // should never have been a call there. Fix the caller's stack to + // forget it ever happened. + + // When funapply gets inlined we take all arguments out of the + // arguments array. So the stack state is incorrect. To restore + // correctly it must look like js_fun_apply was actually called. + // This means transforming the stack from |target, this, arg1, ...| + // to |js_fun_apply, target, this, argObject|. + // Since the information is never read, we can just push undefined + // for all values. + if (op == JSOP_FUNAPPLY) { + JitSpew(JitSpew_BaselineBailouts, " pushing 4x undefined to fixup funapply"); + if (!builder.writeValue(UndefinedValue(), "StackValue")) + return false; + if (!builder.writeValue(UndefinedValue(), "StackValue")) + return false; + if (!builder.writeValue(UndefinedValue(), "StackValue")) + return false; + if (!builder.writeValue(UndefinedValue(), "StackValue")) + return false; + } + // Save the actual arguments. They are needed on the callee side + // as the arguments. Else we can't recover them. + if (!savedCallerArgs.resize(inlined_args)) + return false; + for (uint32_t i = 0; i < inlined_args; i++) + savedCallerArgs[i].set(iter.read()); + + if (IsSetPropPC(pc)) { + // We would love to just save all the arguments and leave them + // in the stub frame pushed below, but we will lose the inital + // argument which the function was called with, which we must + // return to the caller, even if the setter internally modifies + // its arguments. Stash the initial argument on the stack, to be + // later retrieved by the SetProp_Fallback stub. + Value initialArg = savedCallerArgs[inlined_args - 1]; + JitSpew(JitSpew_BaselineBailouts, " pushing setter's initial argument"); + if (!builder.writeValue(initialArg, "StackValue")) + return false; + } + pushedSlots = exprStackSlots; + } + } + + JitSpew(JitSpew_BaselineBailouts, " pushing %u expression stack slots", + exprStackSlots - pushedSlots); + for (uint32_t i = pushedSlots; i < exprStackSlots; i++) { + Value v; + + if (!iter.moreFrames() && i == exprStackSlots - 1 && + cx->runtime()->jitRuntime()->hasIonReturnOverride()) + { + // If coming from an invalidation bailout, and this is the topmost + // value, and a value override has been specified, don't read from the + // iterator. Otherwise, we risk using a garbage value. + MOZ_ASSERT(invalidate); + iter.skip(); + JitSpew(JitSpew_BaselineBailouts, " [Return Override]"); + v = cx->runtime()->jitRuntime()->takeIonReturnOverride(); + } else if (excInfo && excInfo->propagatingIonExceptionForDebugMode()) { + // If we are in the middle of propagating an exception from Ion by + // bailing to baseline due to debug mode, we might not have all + // the stack if we are at the newest frame. + // + // For instance, if calling |f()| pushed an Ion frame which threw, + // the snapshot expects the return value to be pushed, but it's + // possible nothing was pushed before we threw. We can't drop + // iterators, however, so read them out. They will be closed by + // HandleExceptionBaseline. + MOZ_ASSERT(cx->compartment()->isDebuggee()); + if (iter.moreFrames() || HasLiveIteratorAtStackDepth(script, pc, i + 1)) { + v = iter.read(); + } else { + iter.skip(); + v = MagicValue(JS_OPTIMIZED_OUT); + } + } else { + v = iter.read(); + } + if (!builder.writeValue(v, "StackValue")) + return false; + } + + // BaselineFrame::frameSize is the size of everything pushed since + // the builder.resetFramePushed() call. + uint32_t frameSize = builder.framePushed(); + blFrame->setFrameSize(frameSize); + JitSpew(JitSpew_BaselineBailouts, " FrameSize=%u", frameSize); + + // numValueSlots() is based on the frame size, do some sanity checks. + MOZ_ASSERT(blFrame->numValueSlots() >= script->nfixed()); + MOZ_ASSERT(blFrame->numValueSlots() <= script->nslots()); + + // If we are resuming at a LOOPENTRY op, resume at the next op to avoid + // a bailout -> enter Ion -> bailout loop with --ion-eager. See also + // ThunkToInterpreter. + // + // The algorithm below is the "tortoise and the hare" algorithm. See bug + // 994444 for more explanation. + if (!resumeAfter) { + jsbytecode* fasterPc = pc; + while (true) { + pc = GetNextNonLoopEntryPc(pc); + fasterPc = GetNextNonLoopEntryPc(GetNextNonLoopEntryPc(fasterPc)); + if (fasterPc == pc) + break; + } + op = JSOp(*pc); + } + + uint32_t pcOff = script->pcToOffset(pc); + bool isCall = IsCallPC(pc); + BaselineScript* baselineScript = script->baselineScript(); + +#ifdef DEBUG + uint32_t expectedDepth; + bool reachablePC; + if (!ReconstructStackDepth(cx, script, resumeAfter ? GetNextPc(pc) : pc, &expectedDepth, &reachablePC)) + return false; + + if (reachablePC) { + if (op != JSOP_FUNAPPLY || !iter.moreFrames() || resumeAfter) { + if (op == JSOP_FUNCALL) { + // For fun.call(this, ...); the reconstructStackDepth will + // include the this. When inlining that is not included. + // So the exprStackSlots will be one less. + MOZ_ASSERT(expectedDepth - exprStackSlots <= 1); + } else if (iter.moreFrames() && (IsGetPropPC(pc) || IsSetPropPC(pc))) { + // Accessors coming out of ion are inlined via a complete + // lie perpetrated by the compiler internally. Ion just rearranges + // the stack, and pretends that it looked like a call all along. + // This means that the depth is actually one *more* than expected + // by the interpreter, as there is now a JSFunction, |this| and [arg], + // rather than the expected |this| and [arg] + // Note that none of that was pushed, but it's still reflected + // in exprStackSlots. + MOZ_ASSERT(exprStackSlots - expectedDepth == 1); + } else { + // For fun.apply({}, arguments) the reconstructStackDepth will + // have stackdepth 4, but it could be that we inlined the + // funapply. In that case exprStackSlots, will have the real + // arguments in the slots and not be 4. + MOZ_ASSERT(exprStackSlots == expectedDepth); + } + } + } +#endif + +#ifdef JS_JITSPEW + JitSpew(JitSpew_BaselineBailouts, " Resuming %s pc offset %d (op %s) (line %d) of %s:%" PRIuSIZE, + resumeAfter ? "after" : "at", (int) pcOff, CodeName[op], + PCToLineNumber(script, pc), script->filename(), script->lineno()); + JitSpew(JitSpew_BaselineBailouts, " Bailout kind: %s", + BailoutKindString(bailoutKind)); +#endif + + bool pushedNewTarget = op == JSOP_NEW; + + // If this was the last inline frame, or we are bailing out to a catch or + // finally block in this frame, then unpacking is almost done. + if (!iter.moreFrames() || catchingException) { + // Last frame, so PC for call to next frame is set to nullptr. + *callPC = nullptr; + + // If the bailout was a resumeAfter, and the opcode is monitored, + // then the bailed out state should be in a position to enter + // into the ICTypeMonitor chain for the op. + bool enterMonitorChain = false; + if (resumeAfter && (CodeSpec[op].format & JOF_TYPESET)) { + // Not every monitored op has a monitored fallback stub, e.g. + // JSOP_NEWOBJECT, which always returns the same type for a + // particular script/pc location. + BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback(); + if (fallbackStub->isMonitoredFallback()) + enterMonitorChain = true; + } + + uint32_t numCallArgs = isCall ? GET_ARGC(pc) : 0; + + if (resumeAfter && !enterMonitorChain) + pc = GetNextPc(pc); + + builder.setResumePC(pc); + builder.setResumeFramePtr(prevFramePtr); + + if (enterMonitorChain) { + BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback(); + MOZ_ASSERT(fallbackStub->isMonitoredFallback()); + JitSpew(JitSpew_BaselineBailouts, " [TYPE-MONITOR CHAIN]"); + ICMonitoredFallbackStub* monFallbackStub = fallbackStub->toMonitoredFallbackStub(); + ICStub* firstMonStub = monFallbackStub->fallbackMonitorStub()->firstMonitorStub(); + + // To enter a monitoring chain, we load the top stack value into R0 + JitSpew(JitSpew_BaselineBailouts, " Popping top stack value into R0."); + builder.popValueInto(PCMappingSlotInfo::SlotInR0); + + // Need to adjust the frameSize for the frame to match the values popped + // into registers. + frameSize -= sizeof(Value); + blFrame->setFrameSize(frameSize); + JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize -= %d: %d", + (int) sizeof(Value), (int) frameSize); + + // If resuming into a JSOP_CALL, baseline keeps the arguments on the + // stack and pops them only after returning from the call IC. + // Push undefs onto the stack in anticipation of the popping of the + // callee, thisv, and actual arguments passed from the caller's frame. + if (isCall) { + if (!builder.writeValue(UndefinedValue(), "CallOp FillerCallee")) + return false; + if (!builder.writeValue(UndefinedValue(), "CallOp FillerThis")) + return false; + for (uint32_t i = 0; i < numCallArgs; i++) { + if (!builder.writeValue(UndefinedValue(), "CallOp FillerArg")) + return false; + } + if (pushedNewTarget) { + if (!builder.writeValue(UndefinedValue(), "CallOp FillerNewTarget")) + return false; + } + + frameSize += (numCallArgs + 2 + pushedNewTarget) * sizeof(Value); + blFrame->setFrameSize(frameSize); + JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize += %d: %d", + (int) ((numCallArgs + 2 + pushedNewTarget) * sizeof(Value)), + (int) frameSize); + } + + // Set the resume address to the return point from the IC, and set + // the monitor stub addr. + builder.setResumeAddr(baselineScript->returnAddressForIC(icEntry)); + builder.setMonitorStub(firstMonStub); + JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p monitorStub=%p", + baselineScript->returnAddressForIC(icEntry), firstMonStub); + + } else { + // If needed, initialize BaselineBailoutInfo's valueR0 and/or valueR1 with the + // top stack values. + // + // Note that we use the 'maybe' variant of nativeCodeForPC because + // of exception propagation for debug mode. See note below. + PCMappingSlotInfo slotInfo; + uint8_t* nativeCodeForPC; + + if (excInfo && excInfo->propagatingIonExceptionForDebugMode()) { + // When propagating an exception for debug mode, set the + // resume pc to the throwing pc, so that Debugger hooks report + // the correct pc offset of the throwing op instead of its + // successor (this pc will be used as the BaselineFrame's + // override pc). + // + // Note that we never resume at this pc, it is set for the sake + // of frame iterators giving the correct answer. + jsbytecode* throwPC = script->offsetToPC(iter.pcOffset()); + builder.setResumePC(throwPC); + nativeCodeForPC = baselineScript->nativeCodeForPC(script, throwPC); + } else { + nativeCodeForPC = baselineScript->nativeCodeForPC(script, pc, &slotInfo); + } + MOZ_ASSERT(nativeCodeForPC); + + unsigned numUnsynced = slotInfo.numUnsynced(); + + MOZ_ASSERT(numUnsynced <= 2); + PCMappingSlotInfo::SlotLocation loc1, loc2; + if (numUnsynced > 0) { + loc1 = slotInfo.topSlotLocation(); + JitSpew(JitSpew_BaselineBailouts, " Popping top stack value into %d.", + (int) loc1); + builder.popValueInto(loc1); + } + if (numUnsynced > 1) { + loc2 = slotInfo.nextSlotLocation(); + JitSpew(JitSpew_BaselineBailouts, " Popping next stack value into %d.", + (int) loc2); + MOZ_ASSERT_IF(loc1 != PCMappingSlotInfo::SlotIgnore, loc1 != loc2); + builder.popValueInto(loc2); + } + + // Need to adjust the frameSize for the frame to match the values popped + // into registers. + frameSize -= sizeof(Value) * numUnsynced; + blFrame->setFrameSize(frameSize); + JitSpew(JitSpew_BaselineBailouts, " Adjusted framesize -= %d: %d", + int(sizeof(Value) * numUnsynced), int(frameSize)); + + // If envChain is nullptr, then bailout is occurring during argument check. + // In this case, resume into the prologue. + uint8_t* opReturnAddr; + if (envChain == nullptr) { + // Global and eval scripts expect the env chain in R1, so only + // resume into the prologue for function scripts. + MOZ_ASSERT(fun); + MOZ_ASSERT(numUnsynced == 0); + opReturnAddr = baselineScript->prologueEntryAddr(); + JitSpew(JitSpew_BaselineBailouts, " Resuming into prologue."); + + } else { + opReturnAddr = nativeCodeForPC; + } + builder.setResumeAddr(opReturnAddr); + JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p", opReturnAddr); + } + + if (cx->runtime()->spsProfiler.enabled()) { + // Register bailout with profiler. + const char* filename = script->filename(); + if (filename == nullptr) + filename = ""; + unsigned len = strlen(filename) + 200; + char* buf = js_pod_malloc(len); + if (buf == nullptr) { + ReportOutOfMemory(cx); + return false; + } + snprintf(buf, len, "%s %s %s on line %u of %s:%" PRIuSIZE, + BailoutKindString(bailoutKind), + resumeAfter ? "after" : "at", + CodeName[op], + PCToLineNumber(script, pc), + filename, + script->lineno()); + cx->runtime()->spsProfiler.markEvent(buf); + js_free(buf); + } + + return true; + } + + *callPC = pc; + + // Write out descriptor of BaselineJS frame. + size_t baselineFrameDescr = MakeFrameDescriptor((uint32_t) builder.framePushed(), + JitFrame_BaselineJS, + BaselineStubFrameLayout::Size()); + if (!builder.writeWord(baselineFrameDescr, "Descriptor")) + return false; + + // Calculate and write out return address. + // The icEntry in question MUST have an inlinable fallback stub. + BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + MOZ_ASSERT(IsInlinableFallback(icEntry.firstStub()->getChainFallback())); + if (!builder.writePtr(baselineScript->returnAddressForIC(icEntry), "ReturnAddr")) + return false; + + // Build baseline stub frame: + // +===============+ + // | StubPtr | + // +---------------+ + // | FramePtr | + // +---------------+ + // | Padding? | + // +---------------+ + // | ArgA | + // +---------------+ + // | ... | + // +---------------+ + // | Arg0 | + // +---------------+ + // | ThisV | + // +---------------+ + // | ActualArgC | + // +---------------+ + // | CalleeToken | + // +---------------+ + // | Descr(BLStub) | + // +---------------+ + // | ReturnAddr | + // +===============+ + + JitSpew(JitSpew_BaselineBailouts, " [BASELINE-STUB FRAME]"); + + size_t startOfBaselineStubFrame = builder.framePushed(); + + // Write stub pointer. + MOZ_ASSERT(IsInlinableFallback(icEntry.fallbackStub())); + if (!builder.writePtr(icEntry.fallbackStub(), "StubPtr")) + return false; + + // Write previous frame pointer (saved earlier). + if (!builder.writePtr(prevFramePtr, "PrevFramePtr")) + return false; + prevFramePtr = builder.virtualPointerAtStackOffset(0); + + // Write out actual arguments (and thisv), copied from unpacked stack of BaselineJS frame. + // Arguments are reversed on the BaselineJS frame's stack values. + MOZ_ASSERT(IsIonInlinablePC(pc)); + unsigned actualArgc; + Value callee; + if (needToSaveArgs) { + // For FUNAPPLY or an accessor, the arguments are not on the stack anymore, + // but they are copied in a vector and are written here. + if (op == JSOP_FUNAPPLY) + actualArgc = blFrame->numActualArgs(); + else + actualArgc = IsSetPropPC(pc); + callee = savedCallerArgs[0]; + + // Align the stack based on the number of arguments. + size_t afterFrameSize = (actualArgc + 1) * sizeof(Value) + JitFrameLayout::Size(); + if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) + return false; + + // Push arguments. + MOZ_ASSERT(actualArgc + 2 <= exprStackSlots); + MOZ_ASSERT(savedCallerArgs.length() == actualArgc + 2); + for (unsigned i = 0; i < actualArgc + 1; i++) { + size_t arg = savedCallerArgs.length() - (i + 1); + if (!builder.writeValue(savedCallerArgs[arg], "ArgVal")) + return false; + } + } else { + actualArgc = GET_ARGC(pc); + if (op == JSOP_FUNCALL) { + MOZ_ASSERT(actualArgc > 0); + actualArgc--; + } + + // Align the stack based on the number of arguments. + size_t afterFrameSize = (actualArgc + 1 + pushedNewTarget) * sizeof(Value) + + JitFrameLayout::Size(); + if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) + return false; + + // Copy the arguments and |this| from the BaselineFrame, in reverse order. + size_t valueSlot = blFrame->numValueSlots() - 1; + size_t calleeSlot = valueSlot - actualArgc - 1 - pushedNewTarget; + + for (size_t i = valueSlot; i > calleeSlot; i--) { + Value v = *blFrame->valueSlot(i); + if (!builder.writeValue(v, "ArgVal")) + return false; + } + + callee = *blFrame->valueSlot(calleeSlot); + } + + // In case these arguments need to be copied on the stack again for a rectifier frame, + // save the framePushed values here for later use. + size_t endOfBaselineStubArgs = builder.framePushed(); + + // Calculate frame size for descriptor. + size_t baselineStubFrameSize = builder.framePushed() - startOfBaselineStubFrame; + size_t baselineStubFrameDescr = MakeFrameDescriptor((uint32_t) baselineStubFrameSize, + JitFrame_BaselineStub, + JitFrameLayout::Size()); + + // Push actual argc + if (!builder.writeWord(actualArgc, "ActualArgc")) + return false; + + // Push callee token (must be a JS Function) + JitSpew(JitSpew_BaselineBailouts, " Callee = %016" PRIx64, callee.asRawBits()); + + JSFunction* calleeFun = &callee.toObject().as(); + if (!builder.writePtr(CalleeToToken(calleeFun, JSOp(*pc) == JSOP_NEW), "CalleeToken")) + return false; + nextCallee.set(calleeFun); + + // Push BaselineStub frame descriptor + if (!builder.writeWord(baselineStubFrameDescr, "Descriptor")) + return false; + + // Push return address into ICCall_Scripted stub, immediately after the call. + void* baselineCallReturnAddr = GetStubReturnAddress(cx, pc); + MOZ_ASSERT(baselineCallReturnAddr); + if (!builder.writePtr(baselineCallReturnAddr, "ReturnAddr")) + return false; + MOZ_ASSERT(builder.framePushed() % JitStackAlignment == 0); + + // If actualArgc >= fun->nargs, then we are done. Otherwise, we need to push on + // a reconstructed rectifier frame. + if (actualArgc >= calleeFun->nargs()) + return true; + + // Push a reconstructed rectifier frame. + // +===============+ + // | Padding? | + // +---------------+ + // | UndefinedU | + // +---------------+ + // | ... | + // +---------------+ + // | Undefined0 | + // +---------------+ + // | ArgA | + // +---------------+ + // | ... | + // +---------------+ + // | Arg0 | + // +---------------+ + // | ThisV | + // +---------------+ + // | ActualArgC | + // +---------------+ + // | CalleeToken | + // +---------------+ + // | Descr(Rect) | + // +---------------+ + // | ReturnAddr | + // +===============+ + + JitSpew(JitSpew_BaselineBailouts, " [RECTIFIER FRAME]"); + + size_t startOfRectifierFrame = builder.framePushed(); + + // On x86-only, the frame pointer is saved again in the rectifier frame. +#if defined(JS_CODEGEN_X86) + if (!builder.writePtr(prevFramePtr, "PrevFramePtr-X86Only")) + return false; + // Follow the same logic as in JitRuntime::generateArgumentsRectifier. + prevFramePtr = builder.virtualPointerAtStackOffset(0); + if (!builder.writePtr(prevFramePtr, "Padding-X86Only")) + return false; +#endif + + // Align the stack based on the number of arguments. + size_t afterFrameSize = (calleeFun->nargs() + 1 + pushedNewTarget) * sizeof(Value) + + RectifierFrameLayout::Size(); + if (!builder.maybeWritePadding(JitStackAlignment, afterFrameSize, "Padding")) + return false; + + // Copy new.target, if necessary. + if (pushedNewTarget) { + size_t newTargetOffset = (builder.framePushed() - endOfBaselineStubArgs) + + (actualArgc + 1) * sizeof(Value); + Value newTargetValue = *builder.valuePointerAtStackOffset(newTargetOffset); + if (!builder.writeValue(newTargetValue, "CopiedNewTarget")) + return false; + } + + // Push undefined for missing arguments. + for (unsigned i = 0; i < (calleeFun->nargs() - actualArgc); i++) { + if (!builder.writeValue(UndefinedValue(), "FillerVal")) + return false; + } + + // Copy arguments + thisv from BaselineStub frame. + if (!builder.subtract((actualArgc + 1) * sizeof(Value), "CopiedArgs")) + return false; + BufferPointer stubArgsEnd = + builder.pointerAtStackOffset(builder.framePushed() - endOfBaselineStubArgs); + JitSpew(JitSpew_BaselineBailouts, " MemCpy from %p", stubArgsEnd.get()); + memcpy(builder.pointerAtStackOffset(0).get(), stubArgsEnd.get(), + (actualArgc + 1) * sizeof(Value)); + + // Calculate frame size for descriptor. + size_t rectifierFrameSize = builder.framePushed() - startOfRectifierFrame; + size_t rectifierFrameDescr = MakeFrameDescriptor((uint32_t) rectifierFrameSize, + JitFrame_Rectifier, + JitFrameLayout::Size()); + + // Push actualArgc + if (!builder.writeWord(actualArgc, "ActualArgc")) + return false; + + // Push calleeToken again. + if (!builder.writePtr(CalleeToToken(calleeFun, JSOp(*pc) == JSOP_NEW), "CalleeToken")) + return false; + + // Push rectifier frame descriptor + if (!builder.writeWord(rectifierFrameDescr, "Descriptor")) + return false; + + // Push return address into the ArgumentsRectifier code, immediately after the ioncode + // call. + void* rectReturnAddr = cx->runtime()->jitRuntime()->getArgumentsRectifierReturnAddr(); + MOZ_ASSERT(rectReturnAddr); + if (!builder.writePtr(rectReturnAddr, "ReturnAddr")) + return false; + MOZ_ASSERT(builder.framePushed() % JitStackAlignment == 0); + + return true; +} + +uint32_t +jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIterator& iter, + bool invalidate, BaselineBailoutInfo** bailoutInfo, + const ExceptionBailoutInfo* excInfo) +{ + MOZ_ASSERT(bailoutInfo != nullptr); + MOZ_ASSERT(*bailoutInfo == nullptr); + + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + TraceLogStopEvent(logger, TraceLogger_IonMonkey); + TraceLogStartEvent(logger, TraceLogger_Baseline); + + // Ion bailout can fail due to overrecursion and OOM. In such cases we + // cannot honor any further Debugger hooks on the frame, and need to + // ensure that its Debugger.Frame entry is cleaned up. + auto guardRemoveRematerializedFramesFromDebugger = mozilla::MakeScopeExit([&] { + activation->removeRematerializedFramesFromDebugger(cx, iter.fp()); + }); + + // The caller of the top frame must be one of the following: + // IonJS - Ion calling into Ion. + // BaselineStub - Baseline calling into Ion. + // Entry - Interpreter or other calling into Ion. + // Rectifier - Arguments rectifier calling into Ion. + MOZ_ASSERT(iter.isBailoutJS()); +#if defined(DEBUG) || defined(JS_JITSPEW) + FrameType prevFrameType = iter.prevType(); + MOZ_ASSERT(prevFrameType == JitFrame_IonJS || + prevFrameType == JitFrame_BaselineStub || + prevFrameType == JitFrame_Entry || + prevFrameType == JitFrame_Rectifier || + prevFrameType == JitFrame_IonAccessorIC); +#endif + + // All incoming frames are going to look like this: + // + // +---------------+ + // | ... | + // +---------------+ + // | Args | + // | ... | + // +---------------+ + // | ThisV | + // +---------------+ + // | ActualArgC | + // +---------------+ + // | CalleeToken | + // +---------------+ + // | Descriptor | + // +---------------+ + // | ReturnAddr | + // +---------------+ + // | ||||| | <---- Overwrite starting here. + // | ||||| | + // | ||||| | + // +---------------+ + + JitSpew(JitSpew_BaselineBailouts, "Bailing to baseline %s:%" PRIuSIZE " (IonScript=%p) (FrameType=%d)", + iter.script()->filename(), iter.script()->lineno(), (void*) iter.ionScript(), + (int) prevFrameType); + + bool catchingException; + bool propagatingExceptionForDebugMode; + if (excInfo) { + catchingException = excInfo->catchingException(); + propagatingExceptionForDebugMode = excInfo->propagatingIonExceptionForDebugMode(); + + if (catchingException) + JitSpew(JitSpew_BaselineBailouts, "Resuming in catch or finally block"); + + if (propagatingExceptionForDebugMode) + JitSpew(JitSpew_BaselineBailouts, "Resuming in-place for debug mode"); + } else { + catchingException = false; + propagatingExceptionForDebugMode = false; + } + + JitSpew(JitSpew_BaselineBailouts, " Reading from snapshot offset %u size %" PRIuSIZE, + iter.snapshotOffset(), iter.ionScript()->snapshotsListSize()); + + if (!excInfo) + iter.ionScript()->incNumBailouts(); + iter.script()->updateBaselineOrIonRaw(cx->runtime()); + + // Allocate buffer to hold stack replacement data. + BaselineStackBuilder builder(iter, 1024); + if (!builder.init()) { + ReportOutOfMemory(cx); + return BAILOUT_RETURN_FATAL_ERROR; + } + JitSpew(JitSpew_BaselineBailouts, " Incoming frame ptr = %p", builder.startFrame()); + + SnapshotIteratorForBailout snapIter(activation, iter); + if (!snapIter.init(cx)) + return BAILOUT_RETURN_FATAL_ERROR; + +#ifdef TRACK_SNAPSHOTS + snapIter.spewBailingFrom(); +#endif + + RootedFunction callee(cx, iter.maybeCallee()); + RootedScript scr(cx, iter.script()); + if (callee) { + JitSpew(JitSpew_BaselineBailouts, " Callee function (%s:%" PRIuSIZE ")", + scr->filename(), scr->lineno()); + } else { + JitSpew(JitSpew_BaselineBailouts, " No callee!"); + } + + if (iter.isConstructing()) + JitSpew(JitSpew_BaselineBailouts, " Constructing!"); + else + JitSpew(JitSpew_BaselineBailouts, " Not constructing!"); + + JitSpew(JitSpew_BaselineBailouts, " Restoring frames:"); + size_t frameNo = 0; + + // Reconstruct baseline frames using the builder. + RootedScript caller(cx); + jsbytecode* callerPC = nullptr; + RootedFunction fun(cx, callee); + Rooted> startFrameFormals(cx, GCVector(cx)); + + gc::AutoSuppressGC suppress(cx); + + while (true) { + // Skip recover instructions as they are already recovered by |initInstructionResults|. + snapIter.settleOnFrame(); + + if (frameNo > 0) { + // TraceLogger doesn't create entries for inlined frames. But we + // see them in Baseline. Here we create the start events of those + // entries. So they correspond to what we will see in Baseline. + TraceLoggerEvent scriptEvent(logger, TraceLogger_Scripts, scr); + TraceLogStartEvent(logger, scriptEvent); + TraceLogStartEvent(logger, TraceLogger_Baseline); + } + + JitSpew(JitSpew_BaselineBailouts, " FrameNo %" PRIuSIZE, frameNo); + + // If we are bailing out to a catch or finally block in this frame, + // pass excInfo to InitFromBailout and don't unpack any other frames. + bool handleException = (catchingException && excInfo->frameNo() == frameNo); + + // We also need to pass excInfo if we're bailing out in place for + // debug mode. + bool passExcInfo = handleException || propagatingExceptionForDebugMode; + + jsbytecode* callPC = nullptr; + RootedFunction nextCallee(cx, nullptr); + if (!InitFromBailout(cx, caller, callerPC, fun, scr, iter.ionScript(), + snapIter, invalidate, builder, &startFrameFormals, + &nextCallee, &callPC, passExcInfo ? excInfo : nullptr)) + { + return BAILOUT_RETURN_FATAL_ERROR; + } + + if (!snapIter.moreFrames()) { + MOZ_ASSERT(!callPC); + break; + } + + if (handleException) + break; + + MOZ_ASSERT(nextCallee); + MOZ_ASSERT(callPC); + caller = scr; + callerPC = callPC; + fun = nextCallee; + scr = fun->existingScript(); + + frameNo++; + + snapIter.nextInstruction(); + } + JitSpew(JitSpew_BaselineBailouts, " Done restoring frames"); + + BailoutKind bailoutKind = snapIter.bailoutKind(); + + if (!startFrameFormals.empty()) { + // Set the first frame's formals, see the comment in InitFromBailout. + Value* argv = builder.startFrame()->argv() + 1; // +1 to skip |this|. + mozilla::PodCopy(argv, startFrameFormals.begin(), startFrameFormals.length()); + } + + // Do stack check. + bool overRecursed = false; + BaselineBailoutInfo *info = builder.info(); + uint8_t* newsp = info->incomingStack - (info->copyStackTop - info->copyStackBottom); +#ifdef JS_SIMULATOR + if (Simulator::Current()->overRecursed(uintptr_t(newsp))) + overRecursed = true; +#else + JS_CHECK_RECURSION_WITH_SP_DONT_REPORT(cx, newsp, overRecursed = true); +#endif + if (overRecursed) { + JitSpew(JitSpew_BaselineBailouts, " Overrecursion check failed!"); + return BAILOUT_RETURN_OVERRECURSED; + } + + // Take the reconstructed baseline stack so it doesn't get freed when builder destructs. + info = builder.takeBuffer(); + info->numFrames = frameNo + 1; + info->bailoutKind = bailoutKind; + *bailoutInfo = info; + guardRemoveRematerializedFramesFromDebugger.release(); + return BAILOUT_RETURN_OK; +} + +static void +InvalidateAfterBailout(JSContext* cx, HandleScript outerScript, const char* reason) +{ + // In some cases, the computation of recover instruction can invalidate the + // Ion script before we reach the end of the bailout. Thus, if the outer + // script no longer have any Ion script attached, then we just skip the + // invalidation. + // + // For example, such case can happen if the template object for an unboxed + // objects no longer match the content of its properties (see Bug 1174547) + if (!outerScript->hasIonScript()) { + JitSpew(JitSpew_BaselineBailouts, "Ion script is already invalidated"); + return; + } + + MOZ_ASSERT(!outerScript->ionScript()->invalidated()); + + JitSpew(JitSpew_BaselineBailouts, "Invalidating due to %s", reason); + Invalidate(cx, outerScript); +} + +static void +HandleBoundsCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript) +{ + JitSpew(JitSpew_IonBailouts, "Bounds check failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE, + innerScript->filename(), innerScript->lineno(), + outerScript->filename(), outerScript->lineno()); + + if (!innerScript->failedBoundsCheck()) + innerScript->setFailedBoundsCheck(); + + InvalidateAfterBailout(cx, outerScript, "bounds check failure"); + if (innerScript->hasIonScript()) + Invalidate(cx, innerScript); +} + +static void +HandleShapeGuardFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript) +{ + JitSpew(JitSpew_IonBailouts, "Shape guard failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE, + innerScript->filename(), innerScript->lineno(), + outerScript->filename(), outerScript->lineno()); + + // TODO: Currently this mimic's Ion's handling of this case. Investigate setting + // the flag on innerScript as opposed to outerScript, and maybe invalidating both + // inner and outer scripts, instead of just the outer one. + outerScript->setFailedShapeGuard(); + + InvalidateAfterBailout(cx, outerScript, "shape guard failure"); +} + +static void +HandleBaselineInfoBailout(JSContext* cx, HandleScript outerScript, HandleScript innerScript) +{ + JitSpew(JitSpew_IonBailouts, "Baseline info failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE, + innerScript->filename(), innerScript->lineno(), + outerScript->filename(), outerScript->lineno()); + + InvalidateAfterBailout(cx, outerScript, "invalid baseline info"); +} + +static void +HandleLexicalCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript) +{ + JitSpew(JitSpew_IonBailouts, "Lexical check failure %s:%" PRIuSIZE ", inlined into %s:%" PRIuSIZE, + innerScript->filename(), innerScript->lineno(), + outerScript->filename(), outerScript->lineno()); + + if (!innerScript->failedLexicalCheck()) + innerScript->setFailedLexicalCheck(); + + InvalidateAfterBailout(cx, outerScript, "lexical check failure"); + if (innerScript->hasIonScript()) + Invalidate(cx, innerScript); +} + +static bool +CopyFromRematerializedFrame(JSContext* cx, JitActivation* act, uint8_t* fp, size_t inlineDepth, + BaselineFrame* frame) +{ + RematerializedFrame* rematFrame = act->lookupRematerializedFrame(fp, inlineDepth); + + // We might not have rematerialized a frame if the user never requested a + // Debugger.Frame for it. + if (!rematFrame) + return true; + + MOZ_ASSERT(rematFrame->script() == frame->script()); + MOZ_ASSERT(rematFrame->numActualArgs() == frame->numActualArgs()); + + frame->setEnvironmentChain(rematFrame->environmentChain()); + + if (frame->isFunctionFrame()) + frame->thisArgument() = rematFrame->thisArgument(); + + for (unsigned i = 0; i < frame->numActualArgs(); i++) + frame->argv()[i] = rematFrame->argv()[i]; + + for (size_t i = 0; i < frame->script()->nfixed(); i++) + *frame->valueSlot(i) = rematFrame->locals()[i]; + + frame->setReturnValue(rematFrame->returnValue()); + + if (rematFrame->hasCachedSavedFrame()) + frame->setHasCachedSavedFrame(); + + JitSpew(JitSpew_BaselineBailouts, + " Copied from rematerialized frame at (%p,%" PRIuSIZE ")", + fp, inlineDepth); + + // Propagate the debuggee frame flag. For the case where the Debugger did + // not rematerialize an Ion frame, the baseline frame has its debuggee + // flag set iff its script is considered a debuggee. See the debuggee case + // in InitFromBailout. + if (rematFrame->isDebuggee()) { + frame->setIsDebuggee(); + return Debugger::handleIonBailout(cx, rematFrame, frame); + } + + return true; +} + +uint32_t +jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo) +{ + // The caller pushes R0 and R1 on the stack without rooting them. + // Since GC here is very unlikely just suppress it. + JSContext* cx = GetJSContextFromMainThread(); + js::gc::AutoSuppressGC suppressGC(cx); + + JitSpew(JitSpew_BaselineBailouts, " Done restoring frames"); + + // The current native code pc may not have a corresponding ICEntry, so we + // store the bytecode pc in the frame for frame iterators. This pc is + // cleared at the end of this function. If we return false, we don't clear + // it: the exception handler also needs it and will clear it for us. + BaselineFrame* topFrame = GetTopBaselineFrame(cx); + topFrame->setOverridePc(bailoutInfo->resumePC); + + uint32_t numFrames = bailoutInfo->numFrames; + MOZ_ASSERT(numFrames > 0); + BailoutKind bailoutKind = bailoutInfo->bailoutKind; + bool checkGlobalDeclarationConflicts = bailoutInfo->checkGlobalDeclarationConflicts; + + // Free the bailout buffer. + js_free(bailoutInfo); + bailoutInfo = nullptr; + + if (topFrame->environmentChain()) { + // Ensure the frame has a call object if it needs one. If the env chain + // is nullptr, we will enter baseline code at the prologue so no need to do + // anything in that case. + if (!EnsureHasEnvironmentObjects(cx, topFrame)) + return false; + + // If we bailed out before Ion could do the global declaration + // conflicts check, because we resume in the body instead of the + // prologue for global frames. + if (checkGlobalDeclarationConflicts) { + Rooted lexicalEnv(cx, &cx->global()->lexicalEnvironment()); + RootedScript script(cx, topFrame->script()); + if (!CheckGlobalDeclarationConflicts(cx, script, lexicalEnv, cx->global())) + return false; + } + } + + // Create arguments objects for bailed out frames, to maintain the invariant + // that script->needsArgsObj() implies frame->hasArgsObj(). + RootedScript innerScript(cx, nullptr); + RootedScript outerScript(cx, nullptr); + + MOZ_ASSERT(cx->currentlyRunningInJit()); + JitFrameIterator iter(cx); + uint8_t* outerFp = nullptr; + + // Iter currently points at the exit frame. Get the previous frame + // (which must be a baseline frame), and set it as the last profiling + // frame. + if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) + cx->runtime()->jitActivation->setLastProfilingFrame(iter.prevFp()); + + uint32_t frameno = 0; + while (frameno < numFrames) { + MOZ_ASSERT(!iter.isIonJS()); + + if (iter.isBaselineJS()) { + BaselineFrame* frame = iter.baselineFrame(); + MOZ_ASSERT(frame->script()->hasBaselineScript()); + + // If the frame doesn't even have a env chain set yet, then it's resuming + // into the the prologue before the env chain is initialized. Any + // necessary args object will also be initialized there. + if (frame->environmentChain() && frame->script()->needsArgsObj()) { + ArgumentsObject* argsObj; + if (frame->hasArgsObj()) { + argsObj = &frame->argsObj(); + } else { + argsObj = ArgumentsObject::createExpected(cx, frame); + if (!argsObj) + return false; + } + + // The arguments is a local binding and needsArgsObj does not + // check if it is clobbered. Ensure that the local binding + // restored during bailout before storing the arguments object + // to the slot. + RootedScript script(cx, frame->script()); + SetFrameArgumentsObject(cx, frame, script, argsObj); + } + + if (frameno == 0) + innerScript = frame->script(); + + if (frameno == numFrames - 1) { + outerScript = frame->script(); + outerFp = iter.fp(); + } + + frameno++; + } + + ++iter; + } + + MOZ_ASSERT(innerScript); + MOZ_ASSERT(outerScript); + MOZ_ASSERT(outerFp); + + // If we rematerialized Ion frames due to debug mode toggling, copy their + // values into the baseline frame. We need to do this even when debug mode + // is off, as we should respect the mutations made while debug mode was + // on. + JitActivation* act = cx->runtime()->activation()->asJit(); + if (act->hasRematerializedFrame(outerFp)) { + JitFrameIterator iter(cx); + size_t inlineDepth = numFrames; + bool ok = true; + while (inlineDepth > 0) { + if (iter.isBaselineJS()) { + // We must attempt to copy all rematerialized frames over, + // even if earlier ones failed, to invoke the proper frame + // cleanup in the Debugger. + ok = CopyFromRematerializedFrame(cx, act, outerFp, --inlineDepth, + iter.baselineFrame()); + } + ++iter; + } + + // After copying from all the rematerialized frames, remove them from + // the table to keep the table up to date. + act->removeRematerializedFrame(outerFp); + + if (!ok) + return false; + } + + JitSpew(JitSpew_BaselineBailouts, + " Restored outerScript=(%s:%" PRIuSIZE ",%u) innerScript=(%s:%" PRIuSIZE ",%u) (bailoutKind=%u)", + outerScript->filename(), outerScript->lineno(), outerScript->getWarmUpCount(), + innerScript->filename(), innerScript->lineno(), innerScript->getWarmUpCount(), + (unsigned) bailoutKind); + + switch (bailoutKind) { + // Normal bailouts. + case Bailout_Inevitable: + case Bailout_DuringVMCall: + case Bailout_NonJSFunctionCallee: + case Bailout_DynamicNameNotFound: + case Bailout_StringArgumentsEval: + case Bailout_Overflow: + case Bailout_Round: + case Bailout_NonPrimitiveInput: + case Bailout_PrecisionLoss: + case Bailout_TypeBarrierO: + case Bailout_TypeBarrierV: + case Bailout_MonitorTypes: + case Bailout_Hole: + case Bailout_NegativeIndex: + case Bailout_NonInt32Input: + case Bailout_NonNumericInput: + case Bailout_NonBooleanInput: + case Bailout_NonObjectInput: + case Bailout_NonStringInput: + case Bailout_NonSymbolInput: + case Bailout_UnexpectedSimdInput: + case Bailout_NonSharedTypedArrayInput: + case Bailout_Debugger: + case Bailout_UninitializedThis: + case Bailout_BadDerivedConstructorReturn: + // Do nothing. + break; + + case Bailout_FirstExecution: + // Do not return directly, as this was not frequent in the first place, + // thus rely on the check for frequent bailouts to recompile the current + // script. + break; + + // Invalid assumption based on baseline code. + case Bailout_OverflowInvalidate: + outerScript->setHadOverflowBailout(); + MOZ_FALLTHROUGH; + case Bailout_NonStringInputInvalidate: + case Bailout_DoubleOutput: + case Bailout_ObjectIdentityOrTypeGuard: + HandleBaselineInfoBailout(cx, outerScript, innerScript); + break; + + case Bailout_ArgumentCheck: + // Do nothing, bailout will resume before the argument monitor ICs. + break; + case Bailout_BoundsCheck: + case Bailout_Detached: + HandleBoundsCheckFailure(cx, outerScript, innerScript); + break; + case Bailout_ShapeGuard: + HandleShapeGuardFailure(cx, outerScript, innerScript); + break; + case Bailout_UninitializedLexical: + HandleLexicalCheckFailure(cx, outerScript, innerScript); + break; + case Bailout_IonExceptionDebugMode: + // Return false to resume in HandleException with reconstructed + // baseline frame. + return false; + default: + MOZ_CRASH("Unknown bailout kind!"); + } + + CheckFrequentBailouts(cx, outerScript, bailoutKind); + + // We're returning to JIT code, so we should clear the override pc. + topFrame->clearOverridePc(); + return true; +} diff --git a/js/src/jit/BaselineCacheIR.cpp b/js/src/jit/BaselineCacheIR.cpp new file mode 100644 index 000000000..bf96932d1 --- /dev/null +++ b/js/src/jit/BaselineCacheIR.cpp @@ -0,0 +1,1283 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/BaselineCacheIR.h" + +#include "jit/CacheIR.h" +#include "jit/Linker.h" +#include "jit/SharedICHelpers.h" + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +// OperandLocation represents the location of an OperandId. The operand is +// either in a register or on the stack, and is either boxed or unboxed. +class OperandLocation +{ + public: + enum Kind { + Uninitialized = 0, + PayloadReg, + ValueReg, + PayloadStack, + ValueStack, + }; + + private: + Kind kind_; + + union Data { + struct { + Register reg; + JSValueType type; + } payloadReg; + ValueOperand valueReg; + struct { + uint32_t stackPushed; + JSValueType type; + } payloadStack; + uint32_t valueStackPushed; + + Data() : valueStackPushed(0) {} + }; + Data data_; + + public: + OperandLocation() : kind_(Uninitialized) {} + + Kind kind() const { return kind_; } + + void setUninitialized() { + kind_ = Uninitialized; + } + + ValueOperand valueReg() const { + MOZ_ASSERT(kind_ == ValueReg); + return data_.valueReg; + } + Register payloadReg() const { + MOZ_ASSERT(kind_ == PayloadReg); + return data_.payloadReg.reg; + } + uint32_t payloadStack() const { + MOZ_ASSERT(kind_ == PayloadStack); + return data_.payloadStack.stackPushed; + } + uint32_t valueStack() const { + MOZ_ASSERT(kind_ == ValueStack); + return data_.valueStackPushed; + } + JSValueType payloadType() const { + if (kind_ == PayloadReg) + return data_.payloadReg.type; + MOZ_ASSERT(kind_ == PayloadStack); + return data_.payloadStack.type; + } + void setPayloadReg(Register reg, JSValueType type) { + kind_ = PayloadReg; + data_.payloadReg.reg = reg; + data_.payloadReg.type = type; + } + void setValueReg(ValueOperand reg) { + kind_ = ValueReg; + data_.valueReg = reg; + } + void setPayloadStack(uint32_t stackPushed, JSValueType type) { + kind_ = PayloadStack; + data_.payloadStack.stackPushed = stackPushed; + data_.payloadStack.type = type; + } + void setValueStack(uint32_t stackPushed) { + kind_ = ValueStack; + data_.valueStackPushed = stackPushed; + } + + bool aliasesReg(Register reg) { + if (kind_ == PayloadReg) + return payloadReg() == reg; + if (kind_ == ValueReg) + return valueReg().aliases(reg); + return false; + } + bool aliasesReg(ValueOperand reg) { +#if defined(JS_NUNBOX32) + return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg()); +#else + return aliasesReg(reg.valueReg()); +#endif + } + + bool operator==(const OperandLocation& other) const { + if (kind_ != other.kind_) + return false; + switch (kind()) { + case Uninitialized: + return true; + case PayloadReg: + return payloadReg() == other.payloadReg() && payloadType() == other.payloadType(); + case ValueReg: + return valueReg() == other.valueReg(); + case PayloadStack: + return payloadStack() == other.payloadStack() && payloadType() == other.payloadType(); + case ValueStack: + return valueStack() == other.valueStack(); + } + MOZ_CRASH("Invalid OperandLocation kind"); + } + bool operator!=(const OperandLocation& other) const { return !operator==(other); } +}; + +// Class to track and allocate registers while emitting IC code. +class MOZ_RAII CacheRegisterAllocator +{ + // The original location of the inputs to the cache. + Vector origInputLocations_; + + // The current location of each operand. + Vector operandLocations_; + + // The registers allocated while emitting the current CacheIR op. + // This prevents us from allocating a register and then immediately + // clobbering it for something else, while we're still holding on to it. + LiveGeneralRegisterSet currentOpRegs_; + + // Registers that are currently unused and available. + AllocatableGeneralRegisterSet availableRegs_; + + // The number of bytes pushed on the native stack. + uint32_t stackPushed_; + + // The index of the CacheIR instruction we're currently emitting. + uint32_t currentInstruction_; + + const CacheIRWriter& writer_; + + CacheRegisterAllocator(const CacheRegisterAllocator&) = delete; + CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete; + + public: + friend class AutoScratchRegister; + + explicit CacheRegisterAllocator(const CacheIRWriter& writer) + : stackPushed_(0), + currentInstruction_(0), + writer_(writer) + {} + + MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available) { + availableRegs_ = available; + if (!origInputLocations_.resize(writer_.numInputOperands())) + return false; + if (!operandLocations_.resize(writer_.numOperandIds())) + return false; + return true; + } + + OperandLocation operandLocation(size_t i) const { + return operandLocations_[i]; + } + OperandLocation origInputLocation(size_t i) const { + return origInputLocations_[i]; + } + void initInputLocation(size_t i, ValueOperand reg) { + origInputLocations_[i].setValueReg(reg); + operandLocations_[i] = origInputLocations_[i]; + } + + void nextOp() { + currentOpRegs_.clear(); + currentInstruction_++; + } + + uint32_t stackPushed() const { + return stackPushed_; + } + + // Allocates a new register. + Register allocateRegister(MacroAssembler& masm); + ValueOperand allocateValueRegister(MacroAssembler& masm); + + // Returns the register for the given operand. If the operand is currently + // not in a register, it will load it into one. + ValueOperand useRegister(MacroAssembler& masm, ValOperandId val); + Register useRegister(MacroAssembler& masm, ObjOperandId obj); + + // Allocates an output register for the given operand. + Register defineRegister(MacroAssembler& masm, ObjOperandId obj); +}; + +// RAII class to put a scratch register back in the allocator's availableRegs +// set when we're done with it. +class MOZ_RAII AutoScratchRegister +{ + CacheRegisterAllocator& alloc_; + Register reg_; + + public: + AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm) + : alloc_(alloc) + { + reg_ = alloc.allocateRegister(masm); + MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_)); + } + ~AutoScratchRegister() { + MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_)); + alloc_.availableRegs_.add(reg_); + } + operator Register() const { return reg_; } +}; + +// The FailurePath class stores everything we need to generate a failure path +// at the end of the IC code. The failure path restores the input registers, if +// needed, and jumps to the next stub. +class FailurePath +{ + Vector inputs_; + NonAssertingLabel label_; + uint32_t stackPushed_; + + public: + FailurePath() = default; + + FailurePath(FailurePath&& other) + : inputs_(Move(other.inputs_)), + label_(other.label_), + stackPushed_(other.stackPushed_) + {} + + Label* label() { return &label_; } + + void setStackPushed(uint32_t i) { stackPushed_ = i; } + uint32_t stackPushed() const { return stackPushed_; } + + bool appendInput(OperandLocation loc) { + return inputs_.append(loc); + } + OperandLocation input(size_t i) const { + return inputs_[i]; + } + + // If canShareFailurePath(other) returns true, the same machine code will + // be emitted for two failure paths, so we can share them. + bool canShareFailurePath(const FailurePath& other) const { + if (stackPushed_ != other.stackPushed_) + return false; + + MOZ_ASSERT(inputs_.length() == other.inputs_.length()); + + for (size_t i = 0; i < inputs_.length(); i++) { + if (inputs_[i] != other.inputs_[i]) + return false; + } + return true; + } +}; + +// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler. +class MOZ_RAII CacheIRCompiler +{ + protected: + JSContext* cx_; + CacheIRReader reader; + const CacheIRWriter& writer_; + MacroAssembler masm; + + CacheRegisterAllocator allocator; + Vector failurePaths; + + CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer) + : cx_(cx), + reader(writer), + writer_(writer), + allocator(writer_) + {} + + void emitFailurePath(size_t i); +}; + +void +CacheIRCompiler::emitFailurePath(size_t i) +{ + FailurePath& failure = failurePaths[i]; + + masm.bind(failure.label()); + + uint32_t stackPushed = failure.stackPushed(); + size_t numInputOperands = writer_.numInputOperands(); + + for (size_t j = 0; j < numInputOperands; j++) { + OperandLocation orig = allocator.origInputLocation(j); + OperandLocation cur = failure.input(j); + + MOZ_ASSERT(orig.kind() == OperandLocation::ValueReg); + + // We have a cycle if a destination register will be used later + // as source register. If that happens, just push the current value + // on the stack and later get it from there. + for (size_t k = j + 1; k < numInputOperands; k++) { + OperandLocation laterSource = failure.input(k); + switch (laterSource.kind()) { + case OperandLocation::ValueReg: + if (orig.aliasesReg(laterSource.valueReg())) { + stackPushed += sizeof(js::Value); + masm.pushValue(laterSource.valueReg()); + laterSource.setValueStack(stackPushed); + } + break; + case OperandLocation::PayloadReg: + if (orig.aliasesReg(laterSource.payloadReg())) { + stackPushed += sizeof(uintptr_t); + masm.push(laterSource.payloadReg()); + laterSource.setPayloadStack(stackPushed, laterSource.payloadType()); + } + break; + case OperandLocation::PayloadStack: + case OperandLocation::ValueStack: + case OperandLocation::Uninitialized: + break; + } + } + + switch (cur.kind()) { + case OperandLocation::ValueReg: + masm.moveValue(cur.valueReg(), orig.valueReg()); + break; + case OperandLocation::PayloadReg: + masm.tagValue(cur.payloadType(), cur.payloadReg(), orig.valueReg()); + break; + case OperandLocation::PayloadStack: { + MOZ_ASSERT(stackPushed >= sizeof(uintptr_t)); + Register scratch = orig.valueReg().scratchReg(); + if (cur.payloadStack() == stackPushed) { + masm.pop(scratch); + stackPushed -= sizeof(uintptr_t); + } else { + MOZ_ASSERT(cur.payloadStack() < stackPushed); + masm.loadPtr(Address(masm.getStackPointer(), stackPushed - cur.payloadStack()), + scratch); + } + masm.tagValue(cur.payloadType(), scratch, orig.valueReg()); + break; + } + case OperandLocation::ValueStack: + MOZ_ASSERT(stackPushed >= sizeof(js::Value)); + if (cur.valueStack() == stackPushed) { + masm.popValue(orig.valueReg()); + stackPushed -= sizeof(js::Value); + } else { + MOZ_ASSERT(cur.valueStack() < stackPushed); + masm.loadValue(Address(masm.getStackPointer(), stackPushed - cur.valueStack()), + orig.valueReg()); + } + break; + default: + MOZ_CRASH(); + } + } + + if (stackPushed > 0) + masm.addToStackPtr(Imm32(stackPushed)); +} + +// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code. +class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler +{ + uint32_t stubDataOffset_; + + public: + BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset) + : CacheIRCompiler(cx, writer), + stubDataOffset_(stubDataOffset) + {} + + MOZ_MUST_USE bool init(CacheKind kind); + + JitCode* compile(); + + private: +#define DEFINE_OP(op) MOZ_MUST_USE bool emit##op(); + CACHE_IR_OPS(DEFINE_OP) +#undef DEFINE_OP + + Address stubAddress(uint32_t offset) const { + return Address(ICStubReg, stubDataOffset_ + offset * sizeof(uintptr_t)); + } + + bool addFailurePath(FailurePath** failure) { + FailurePath newFailure; + for (size_t i = 0; i < writer_.numInputOperands(); i++) { + if (!newFailure.appendInput(allocator.operandLocation(i))) + return false; + } + newFailure.setStackPushed(allocator.stackPushed()); + + // Reuse the previous failure path if the current one is the same, to + // avoid emitting duplicate code. + if (failurePaths.length() > 0 && failurePaths.back().canShareFailurePath(newFailure)) { + *failure = &failurePaths.back(); + return true; + } + + if (!failurePaths.append(Move(newFailure))) + return false; + + *failure = &failurePaths.back(); + return true; + } + void emitEnterTypeMonitorIC() { + if (allocator.stackPushed() > 0) + masm.addToStackPtr(Imm32(allocator.stackPushed())); + EmitEnterTypeMonitorIC(masm); + } + void emitReturnFromIC() { + if (allocator.stackPushed() > 0) + masm.addToStackPtr(Imm32(allocator.stackPushed())); + EmitReturnFromIC(masm); + } +}; + +JitCode* +BaselineCacheIRCompiler::compile() +{ +#ifndef JS_USE_LINK_REGISTER + // The first value contains the return addres, + // which we pull into ICTailCallReg for tail calls. + masm.adjustFrame(sizeof(intptr_t)); +#endif +#ifdef JS_CODEGEN_ARM + masm.setSecondScratchReg(BaselineSecondScratchReg); +#endif + + do { + switch (reader.readOp()) { +#define DEFINE_OP(op) \ + case CacheOp::op: \ + if (!emit##op()) \ + return nullptr; \ + break; + CACHE_IR_OPS(DEFINE_OP) +#undef DEFINE_OP + + default: + MOZ_CRASH("Invalid op"); + } + + allocator.nextOp(); + } while (reader.more()); + + // Done emitting the main IC code. Now emit the failure paths. + for (size_t i = 0; i < failurePaths.length(); i++) { + emitFailurePath(i); + EmitStubGuardFailure(masm); + } + + Linker linker(masm); + AutoFlushICache afc("getStubCode"); + Rooted newStubCode(cx_, linker.newCode(cx_, BASELINE_CODE)); + if (!newStubCode) { + cx_->recoverFromOutOfMemory(); + return nullptr; + } + + // All barriers are emitted off-by-default, enable them if needed. + if (cx_->zone()->needsIncrementalBarrier()) + newStubCode->togglePreBarriers(true, DontReprotect); + + return newStubCode; +} + +ValueOperand +CacheRegisterAllocator::useRegister(MacroAssembler& masm, ValOperandId op) +{ + OperandLocation& loc = operandLocations_[op.id()]; + + switch (loc.kind()) { + case OperandLocation::ValueReg: + currentOpRegs_.add(loc.valueReg()); + return loc.valueReg(); + + case OperandLocation::ValueStack: { + // The Value is on the stack. If it's on top of the stack, unbox and + // then pop it. If we need the registers later, we can always spill + // back. If it's not on the top of the stack, just unbox. + ValueOperand reg = allocateValueRegister(masm); + if (loc.valueStack() == stackPushed_) { + masm.popValue(reg); + MOZ_ASSERT(stackPushed_ >= sizeof(js::Value)); + stackPushed_ -= sizeof(js::Value); + } else { + MOZ_ASSERT(loc.valueStack() < stackPushed_); + masm.loadValue(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), reg); + } + loc.setValueReg(reg); + return reg; + } + + // The operand should never be unboxed. + case OperandLocation::PayloadStack: + case OperandLocation::PayloadReg: + case OperandLocation::Uninitialized: + break; + } + + MOZ_CRASH(); +} + +Register +CacheRegisterAllocator::useRegister(MacroAssembler& masm, ObjOperandId op) +{ + OperandLocation& loc = operandLocations_[op.id()]; + switch (loc.kind()) { + case OperandLocation::PayloadReg: + currentOpRegs_.add(loc.payloadReg()); + return loc.payloadReg(); + + case OperandLocation::ValueReg: { + // It's possible the value is still boxed: as an optimization, we unbox + // the first time we use a value as object. + ValueOperand val = loc.valueReg(); + availableRegs_.add(val); + Register reg = val.scratchReg(); + availableRegs_.take(reg); + masm.unboxObject(val, reg); + loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT); + currentOpRegs_.add(reg); + return reg; + } + + case OperandLocation::PayloadStack: { + // The payload is on the stack. If it's on top of the stack we can just + // pop it, else we emit a load. + Register reg = allocateRegister(masm); + if (loc.payloadStack() == stackPushed_) { + masm.pop(reg); + MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t)); + stackPushed_ -= sizeof(uintptr_t); + } else { + MOZ_ASSERT(loc.payloadStack() < stackPushed_); + masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - loc.payloadStack()), reg); + } + loc.setPayloadReg(reg, loc.payloadType()); + return reg; + } + + case OperandLocation::ValueStack: { + // The value is on the stack, but boxed. If it's on top of the stack we + // unbox it and then remove it from the stack, else we just unbox. + Register reg = allocateRegister(masm); + if (loc.valueStack() == stackPushed_) { + masm.unboxObject(Address(masm.getStackPointer(), 0), reg); + masm.addToStackPtr(Imm32(sizeof(js::Value))); + MOZ_ASSERT(stackPushed_ >= sizeof(js::Value)); + stackPushed_ -= sizeof(js::Value); + } else { + MOZ_ASSERT(loc.valueStack() < stackPushed_); + masm.unboxObject(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), + reg); + } + loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT); + return reg; + } + + case OperandLocation::Uninitialized: + break; + } + + MOZ_CRASH(); +} + +Register +CacheRegisterAllocator::defineRegister(MacroAssembler& masm, ObjOperandId op) +{ + OperandLocation& loc = operandLocations_[op.id()]; + MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized); + + Register reg = allocateRegister(masm); + loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT); + return reg; +} + +Register +CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) +{ + if (availableRegs_.empty()) { + // No registers available. See if any operands are dead so we can reuse + // their registers. Note that we skip the input operands, as those are + // also used by failure paths, and we currently don't track those uses. + for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) { + if (!writer_.operandIsDead(i, currentInstruction_)) + continue; + + OperandLocation& loc = operandLocations_[i]; + switch (loc.kind()) { + case OperandLocation::PayloadReg: + availableRegs_.add(loc.payloadReg()); + break; + case OperandLocation::ValueReg: + availableRegs_.add(loc.valueReg()); + break; + case OperandLocation::Uninitialized: + case OperandLocation::PayloadStack: + case OperandLocation::ValueStack: + break; + } + loc.setUninitialized(); + } + } + + if (availableRegs_.empty()) { + // Still no registers available, try to spill unused operands to + // the stack. + for (size_t i = 0; i < operandLocations_.length(); i++) { + OperandLocation& loc = operandLocations_[i]; + if (loc.kind() == OperandLocation::PayloadReg) { + Register reg = loc.payloadReg(); + if (currentOpRegs_.has(reg)) + continue; + + masm.push(reg); + stackPushed_ += sizeof(uintptr_t); + loc.setPayloadStack(stackPushed_, loc.payloadType()); + availableRegs_.add(reg); + break; // We got a register, so break out of the loop. + } + if (loc.kind() == OperandLocation::ValueReg) { + ValueOperand reg = loc.valueReg(); + if (currentOpRegs_.aliases(reg)) + continue; + + masm.pushValue(reg); + stackPushed_ += sizeof(js::Value); + loc.setValueStack(stackPushed_); + availableRegs_.add(reg); + break; // Break out of the loop. + } + } + } + + // At this point, there must be a free register. (Ion ICs don't have as + // many registers available, so once we support Ion code generation, we may + // have to spill some unrelated registers.) + MOZ_RELEASE_ASSERT(!availableRegs_.empty()); + + Register reg = availableRegs_.takeAny(); + currentOpRegs_.add(reg); + return reg; +} + +ValueOperand +CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm) +{ +#ifdef JS_NUNBOX32 + Register reg1 = allocateRegister(masm); + Register reg2 = allocateRegister(masm); + return ValueOperand(reg1, reg2); +#else + Register reg = allocateRegister(masm); + return ValueOperand(reg); +#endif +} + +bool +BaselineCacheIRCompiler::emitGuardIsObject() +{ + ValueOperand input = allocator.useRegister(masm, reader.valOperandId()); + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + masm.branchTestObject(Assembler::NotEqual, input, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardType() +{ + ValueOperand input = allocator.useRegister(masm, reader.valOperandId()); + JSValueType type = reader.valueType(); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + switch (type) { + case JSVAL_TYPE_STRING: + masm.branchTestString(Assembler::NotEqual, input, failure->label()); + break; + case JSVAL_TYPE_SYMBOL: + masm.branchTestSymbol(Assembler::NotEqual, input, failure->label()); + break; + case JSVAL_TYPE_DOUBLE: + masm.branchTestNumber(Assembler::NotEqual, input, failure->label()); + break; + case JSVAL_TYPE_BOOLEAN: + masm.branchTestBoolean(Assembler::NotEqual, input, failure->label()); + break; + default: + MOZ_CRASH("Unexpected type"); + } + + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardShape() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address addr(stubAddress(reader.stubOffset())); + masm.loadPtr(addr, scratch); + masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardGroup() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address addr(stubAddress(reader.stubOffset())); + masm.loadPtr(addr, scratch); + masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardProto() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address addr(stubAddress(reader.stubOffset())); + masm.loadObjProto(obj, scratch); + masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardClass() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + const Class* clasp = nullptr; + switch (reader.guardClassKind()) { + case GuardClassKind::Array: + clasp = &ArrayObject::class_; + break; + case GuardClassKind::UnboxedArray: + clasp = &UnboxedArrayObject::class_; + break; + case GuardClassKind::MappedArguments: + clasp = &MappedArgumentsObject::class_; + break; + case GuardClassKind::UnmappedArguments: + clasp = &UnmappedArgumentsObject::class_; + break; + } + + MOZ_ASSERT(clasp); + masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardSpecificObject() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address addr(stubAddress(reader.stubOffset())); + masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardNoUnboxedExpando() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando()); + masm.branchPtr(Assembler::NotEqual, expandoAddr, ImmWord(0), failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardAndLoadUnboxedExpando() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + Register output = allocator.defineRegister(masm, reader.objOperandId()); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando()); + masm.loadPtr(expandoAddr, output); + masm.branchTestPtr(Assembler::Zero, output, output, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadFixedSlotResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + masm.load32(stubAddress(reader.stubOffset()), scratch); + masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0); + emitEnterTypeMonitorIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadDynamicSlotResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + // We're about to return, so it's safe to clobber obj now. + masm.load32(stubAddress(reader.stubOffset()), scratch); + masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj); + masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0); + emitEnterTypeMonitorIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + JSValueType fieldType = reader.valueType(); + + Address fieldOffset(stubAddress(reader.stubOffset())); + masm.load32(fieldOffset, scratch); + masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType, R0); + + if (fieldType == JSVAL_TYPE_OBJECT) + emitEnterTypeMonitorIC(); + else + emitReturnFromIC(); + + return true; +} + +bool +BaselineCacheIRCompiler::emitGuardNoDetachedTypedObjects() +{ + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + CheckForTypedObjectWithDetachedStorage(cx_, masm, failure->label()); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadTypedObjectResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch1(allocator, masm); + AutoScratchRegister scratch2(allocator, masm); + + TypedThingLayout layout = reader.typedThingLayout(); + uint32_t typeDescr = reader.typeDescrKey(); + Address fieldOffset(stubAddress(reader.stubOffset())); + + // Get the object's data pointer. + LoadTypedThingData(masm, layout, obj, scratch1); + + // Get the address being written to. + masm.load32(fieldOffset, scratch2); + masm.addPtr(scratch2, scratch1); + + // Only monitor the result if the type produced by this stub might vary. + bool monitorLoad; + if (SimpleTypeDescrKeyIsScalar(typeDescr)) { + Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr); + monitorLoad = type == Scalar::Uint32; + + masm.loadFromTypedArray(type, Address(scratch1, 0), R0, /* allowDouble = */ true, + scratch2, nullptr); + } else { + ReferenceTypeDescr::Type type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr); + monitorLoad = type != ReferenceTypeDescr::TYPE_STRING; + + switch (type) { + case ReferenceTypeDescr::TYPE_ANY: + masm.loadValue(Address(scratch1, 0), R0); + break; + + case ReferenceTypeDescr::TYPE_OBJECT: { + Label notNull, done; + masm.loadPtr(Address(scratch1, 0), scratch1); + masm.branchTestPtr(Assembler::NonZero, scratch1, scratch1, ¬Null); + masm.moveValue(NullValue(), R0); + masm.jump(&done); + masm.bind(¬Null); + masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, R0); + masm.bind(&done); + break; + } + + case ReferenceTypeDescr::TYPE_STRING: + masm.loadPtr(Address(scratch1, 0), scratch1); + masm.tagValue(JSVAL_TYPE_STRING, scratch1, R0); + break; + + default: + MOZ_CRASH("Invalid ReferenceTypeDescr"); + } + } + + if (monitorLoad) + emitEnterTypeMonitorIC(); + else + emitReturnFromIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadUndefinedResult() +{ + masm.moveValue(UndefinedValue(), R0); + + // Normally for this op, the result would have to be monitored by TI. + // However, since this stub ALWAYS returns UndefinedValue(), and we can be sure + // that undefined is already registered with the type-set, this can be avoided. + emitReturnFromIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadInt32ArrayLengthResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch); + masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch); + + // Guard length fits in an int32. + masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label()); + masm.tagValue(JSVAL_TYPE_INT32, scratch, R0); + + // The int32 type was monitored when attaching the stub, so we can + // just return. + emitReturnFromIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadUnboxedArrayLengthResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + masm.load32(Address(obj, UnboxedArrayObject::offsetOfLength()), R0.scratchReg()); + masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0); + + // The int32 type was monitored when attaching the stub, so we can + // just return. + emitReturnFromIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadArgumentsObjectLengthResult() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + AutoScratchRegister scratch(allocator, masm); + + FailurePath* failure; + if (!addFailurePath(&failure)) + return false; + + // Get initial length value. + masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), scratch); + + // Test if length has been overridden. + masm.branchTest32(Assembler::NonZero, + scratch, + Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), + failure->label()); + + // Shift out arguments length and return it. No need to type monitor + // because this stub always returns int32. + masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch); + masm.tagValue(JSVAL_TYPE_INT32, scratch, R0); + emitReturnFromIC(); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadObject() +{ + Register reg = allocator.defineRegister(masm, reader.objOperandId()); + masm.loadPtr(stubAddress(reader.stubOffset()), reg); + return true; +} + +bool +BaselineCacheIRCompiler::emitLoadProto() +{ + Register obj = allocator.useRegister(masm, reader.objOperandId()); + Register reg = allocator.defineRegister(masm, reader.objOperandId()); + masm.loadObjProto(obj, reg); + return true; +} + +bool +BaselineCacheIRCompiler::init(CacheKind kind) +{ + size_t numInputs = writer_.numInputOperands(); + if (!allocator.init(ICStubCompiler::availableGeneralRegs(numInputs))) + return false; + + MOZ_ASSERT(numInputs == 1); + allocator.initInputLocation(0, R0); + + return true; +} + +template +static GCPtr* +AsGCPtr(uintptr_t* ptr) +{ + return reinterpret_cast*>(ptr); +} + +template +GCPtr& +CacheIRStubInfo::getStubField(ICStub* stub, uint32_t field) const +{ + uint8_t* stubData = (uint8_t*)stub + stubDataOffset_; + MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0); + + return *AsGCPtr((uintptr_t*)stubData + field); +} + +template GCPtr& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const; +template GCPtr& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const; +template GCPtr& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const; + +template +static void +InitGCPtr(uintptr_t* ptr, uintptr_t val) +{ + AsGCPtr(ptr)->init((T*)val); +} + +void +CacheIRWriter::copyStubData(uint8_t* dest) const +{ + uintptr_t* destWords = reinterpret_cast(dest); + + for (size_t i = 0; i < stubFields_.length(); i++) { + switch (stubFields_[i].gcType) { + case StubField::GCType::NoGCThing: + destWords[i] = stubFields_[i].word; + continue; + case StubField::GCType::Shape: + InitGCPtr(destWords + i, stubFields_[i].word); + continue; + case StubField::GCType::JSObject: + InitGCPtr(destWords + i, stubFields_[i].word); + continue; + case StubField::GCType::ObjectGroup: + InitGCPtr(destWords + i, stubFields_[i].word); + continue; + case StubField::GCType::Limit: + break; + } + MOZ_CRASH(); + } +} + +HashNumber +CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) +{ + HashNumber hash = mozilla::HashBytes(l.code, l.length); + return mozilla::AddToHash(hash, uint32_t(l.kind)); +} + +bool +CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l) +{ + if (entry.stubInfo->kind() != l.kind) + return false; + + if (entry.stubInfo->codeLength() != l.length) + return false; + + if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length)) + return false; + + return true; +} + +CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo) + : CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength()) +{} + +CacheIRStubInfo* +CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWriter& writer) +{ + size_t numStubFields = writer.numStubFields(); + size_t bytesNeeded = sizeof(CacheIRStubInfo) + + writer.codeLength() + + (numStubFields + 1); // +1 for the GCType::Limit terminator. + uint8_t* p = js_pod_malloc(bytesNeeded); + if (!p) + return nullptr; + + // Copy the CacheIR code. + uint8_t* codeStart = p + sizeof(CacheIRStubInfo); + mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength()); + + static_assert(uint32_t(StubField::GCType::Limit) <= UINT8_MAX, + "All StubField::GCTypes must fit in uint8_t"); + + // Copy the GC types of the stub fields. + uint8_t* gcTypes = codeStart + writer.codeLength(); + for (size_t i = 0; i < numStubFields; i++) + gcTypes[i] = uint8_t(writer.stubFieldGCType(i)); + gcTypes[numStubFields] = uint8_t(StubField::GCType::Limit); + + return new(p) CacheIRStubInfo(kind, stubDataOffset, codeStart, writer.codeLength(), gcTypes); +} + +static const size_t MaxOptimizedCacheIRStubs = 16; + +ICStub* +jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind, + ICFallbackStub* stub) +{ + // We shouldn't GC or report OOM (or any other exception) here. + AutoAssertNoPendingException aanpe(cx); + JS::AutoCheckCannotGC nogc; + + if (writer.failed()) + return nullptr; + + // Just a sanity check: the caller should ensure we don't attach an + // unlimited number of stubs. + MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs); + + MOZ_ASSERT(kind == CacheKind::GetProp); + uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored); + + JitCompartment* jitCompartment = cx->compartment()->jitCompartment(); + + // Check if we already have JitCode for this stub. + CacheIRStubInfo* stubInfo; + CacheIRStubKey::Lookup lookup(kind, writer.codeStart(), writer.codeLength()); + JitCode* code = jitCompartment->getCacheIRStubCode(lookup, &stubInfo); + if (!code) { + // We have to generate stub code. + JitContext jctx(cx, nullptr); + BaselineCacheIRCompiler comp(cx, writer, stubDataOffset); + if (!comp.init(kind)) + return nullptr; + + code = comp.compile(); + if (!code) + return nullptr; + + // Allocate the shared CacheIRStubInfo. Note that the putCacheIRStubCode + // call below will transfer ownership to the stub code HashMap, so we + // don't have to worry about freeing it below. + MOZ_ASSERT(!stubInfo); + stubInfo = CacheIRStubInfo::New(kind, stubDataOffset, writer); + if (!stubInfo) + return nullptr; + + CacheIRStubKey key(stubInfo); + if (!jitCompartment->putCacheIRStubCode(lookup, key, code)) + return nullptr; + } + + // We got our shared stub code and stub info. Time to allocate and attach a + // new stub. + + MOZ_ASSERT(code); + MOZ_ASSERT(stubInfo); + MOZ_ASSERT(stub->isMonitoredFallback()); + + size_t bytesNeeded = stubInfo->stubDataOffset() + writer.stubDataSize(); + + // For now, no stubs can make calls so they are all allocated in the + // optimized stub space. + void* newStub = cx->zone()->jitZone()->optimizedStubSpace()->alloc(bytesNeeded); + if (!newStub) + return nullptr; + + ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub(); + new(newStub) ICCacheIR_Monitored(code, monitorStub, stubInfo); + + writer.copyStubData((uint8_t*)newStub + stubInfo->stubDataOffset()); + stub->addNewStub((ICStub*)newStub); + return (ICStub*)newStub; +} + +void +jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo) +{ + uint32_t field = 0; + while (true) { + switch (stubInfo->gcType(field)) { + case StubField::GCType::NoGCThing: + break; + case StubField::GCType::Shape: + TraceNullableEdge(trc, &stubInfo->getStubField(stub, field), + "baseline-cacheir-shape"); + break; + case StubField::GCType::ObjectGroup: + TraceNullableEdge(trc, &stubInfo->getStubField(stub, field), + "baseline-cacheir-group"); + break; + case StubField::GCType::JSObject: + TraceNullableEdge(trc, &stubInfo->getStubField(stub, field), + "baseline-cacheir-object"); + break; + case StubField::GCType::Limit: + return; // Done. + default: + MOZ_CRASH(); + } + field++; + } +} diff --git a/js/src/jit/BaselineCacheIR.h b/js/src/jit/BaselineCacheIR.h new file mode 100644 index 000000000..187d18e3a --- /dev/null +++ b/js/src/jit/BaselineCacheIR.h @@ -0,0 +1,67 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_BaselineCacheIR_h +#define jit_BaselineCacheIR_h + +#include "gc/Barrier.h" +#include "jit/CacheIR.h" + +namespace js { +namespace jit { + +class ICFallbackStub; +class ICStub; + +// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description +// of this class. +class CacheIRStubInfo +{ + CacheKind kind_; + uint8_t stubDataOffset_; + const uint8_t* code_; + uint32_t length_; + const uint8_t* gcTypes_; + + CacheIRStubInfo(CacheKind kind, uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength, + const uint8_t* gcTypes) + : kind_(kind), + stubDataOffset_(stubDataOffset), + code_(code), + length_(codeLength), + gcTypes_(gcTypes) + { + MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t"); + } + + CacheIRStubInfo(const CacheIRStubInfo&) = delete; + CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete; + + public: + CacheKind kind() const { return kind_; } + + const uint8_t* code() const { return code_; } + uint32_t codeLength() const { return length_; } + uint32_t stubDataOffset() const { return stubDataOffset_; } + + StubField::GCType gcType(uint32_t i) const { return (StubField::GCType)gcTypes_[i]; } + + static CacheIRStubInfo* New(CacheKind kind, uint32_t stubDataOffset, + const CacheIRWriter& writer); + + template + js::GCPtr& getStubField(ICStub* stub, uint32_t field) const; +}; + +void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo); + +ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind, + ICFallbackStub* stub); + +} // namespace jit +} // namespace js + +#endif /* jit_BaselineCacheIR_h */ diff --git a/js/src/jit/BaselineCompiler.cpp b/js/src/jit/BaselineCompiler.cpp new file mode 100644 index 000000000..c58367aa3 --- /dev/null +++ b/js/src/jit/BaselineCompiler.cpp @@ -0,0 +1,4527 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/BaselineCompiler.h" + +#include "mozilla/Casting.h" +#include "mozilla/SizePrintfMacros.h" + +#include "jit/BaselineIC.h" +#include "jit/BaselineJIT.h" +#include "jit/FixedList.h" +#include "jit/IonAnalysis.h" +#include "jit/JitcodeMap.h" +#include "jit/JitSpewer.h" +#include "jit/Linker.h" +#ifdef JS_ION_PERF +# include "jit/PerfSpewer.h" +#endif +#include "jit/SharedICHelpers.h" +#include "jit/VMFunctions.h" +#include "js/UniquePtr.h" +#include "vm/AsyncFunction.h" +#include "vm/EnvironmentObject.h" +#include "vm/Interpreter.h" +#include "vm/TraceLogging.h" + +#include "jsscriptinlines.h" + +#include "jit/BaselineFrameInfo-inl.h" +#include "jit/MacroAssembler-inl.h" +#include "vm/Interpreter-inl.h" +#include "vm/NativeObject-inl.h" + +using namespace js; +using namespace js::jit; + +using mozilla::AssertedCast; + +BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc, JSScript* script) + : BaselineCompilerSpecific(cx, alloc, script), + yieldOffsets_(cx), + modifiesArguments_(false) +{ +} + +bool +BaselineCompiler::init() +{ + if (!analysis_.init(alloc_, cx->caches.gsnCache)) + return false; + + if (!labels_.init(alloc_, script->length())) + return false; + + for (size_t i = 0; i < script->length(); i++) + new (&labels_[i]) Label(); + + if (!frame.init(alloc_)) + return false; + + return true; +} + +bool +BaselineCompiler::addPCMappingEntry(bool addIndexEntry) +{ + // Don't add multiple entries for a single pc. + size_t nentries = pcMappingEntries_.length(); + if (nentries > 0 && pcMappingEntries_[nentries - 1].pcOffset == script->pcToOffset(pc)) + return true; + + PCMappingEntry entry; + entry.pcOffset = script->pcToOffset(pc); + entry.nativeOffset = masm.currentOffset(); + entry.slotInfo = getStackTopSlotInfo(); + entry.addIndexEntry = addIndexEntry; + + return pcMappingEntries_.append(entry); +} + +MethodStatus +BaselineCompiler::compile() +{ + JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%" PRIuSIZE " (%p)", + script->filename(), script->lineno(), script); + + JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%" PRIuSIZE, + script->filename(), script->lineno()); + + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + TraceLoggerEvent scriptEvent(logger, TraceLogger_AnnotateScripts, script); + AutoTraceLog logScript(logger, scriptEvent); + AutoTraceLog logCompile(logger, TraceLogger_BaselineCompilation); + + if (!script->ensureHasTypes(cx) || !script->ensureHasAnalyzedArgsUsage(cx)) + return Method_Error; + + // When code coverage is only enabled for optimizations, or when a Debugger + // set the collectCoverageInfo flag, we have to create the ScriptCounts if + // they do not exist. + if (!script->hasScriptCounts() && cx->compartment()->collectCoverage()) { + if (!script->initScriptCounts(cx)) + return Method_Error; + } + + // Pin analysis info during compilation. + AutoEnterAnalysis autoEnterAnalysis(cx); + + MOZ_ASSERT(!script->hasBaselineScript()); + + if (!emitPrologue()) + return Method_Error; + + MethodStatus status = emitBody(); + if (status != Method_Compiled) + return status; + + if (!emitEpilogue()) + return Method_Error; + + if (!emitOutOfLinePostBarrierSlot()) + return Method_Error; + + Linker linker(masm); + if (masm.oom()) { + ReportOutOfMemory(cx); + return Method_Error; + } + + AutoFlushICache afc("Baseline"); + JitCode* code = linker.newCode(cx, BASELINE_CODE); + if (!code) + return Method_Error; + + Rooted templateEnv(cx); + if (script->functionNonDelazifying()) { + RootedFunction fun(cx, script->functionNonDelazifying()); + + if (fun->needsNamedLambdaEnvironment()) { + templateEnv = NamedLambdaObject::createTemplateObject(cx, fun, gc::TenuredHeap); + if (!templateEnv) + return Method_Error; + } + + if (fun->needsCallObject()) { + RootedScript scriptRoot(cx, script); + templateEnv = CallObject::createTemplateObject(cx, scriptRoot, templateEnv, + gc::TenuredHeap); + if (!templateEnv) + return Method_Error; + } + } + + // Encode the pc mapping table. See PCMappingIndexEntry for + // more information. + Vector pcMappingIndexEntries(cx); + CompactBufferWriter pcEntries; + uint32_t previousOffset = 0; + + for (size_t i = 0; i < pcMappingEntries_.length(); i++) { + PCMappingEntry& entry = pcMappingEntries_[i]; + + if (entry.addIndexEntry) { + PCMappingIndexEntry indexEntry; + indexEntry.pcOffset = entry.pcOffset; + indexEntry.nativeOffset = entry.nativeOffset; + indexEntry.bufferOffset = pcEntries.length(); + if (!pcMappingIndexEntries.append(indexEntry)) { + ReportOutOfMemory(cx); + return Method_Error; + } + previousOffset = entry.nativeOffset; + } + + // Use the high bit of the SlotInfo byte to indicate the + // native code offset (relative to the previous op) > 0 and + // comes next in the buffer. + MOZ_ASSERT((entry.slotInfo.toByte() & 0x80) == 0); + + if (entry.nativeOffset == previousOffset) { + pcEntries.writeByte(entry.slotInfo.toByte()); + } else { + MOZ_ASSERT(entry.nativeOffset > previousOffset); + pcEntries.writeByte(0x80 | entry.slotInfo.toByte()); + pcEntries.writeUnsigned(entry.nativeOffset - previousOffset); + } + + previousOffset = entry.nativeOffset; + } + + if (pcEntries.oom()) { + ReportOutOfMemory(cx); + return Method_Error; + } + + // Note: There is an extra entry in the bytecode type map for the search hint, see below. + size_t bytecodeTypeMapEntries = script->nTypeSets() + 1; + UniquePtr baselineScript( + BaselineScript::New(script, prologueOffset_.offset(), + epilogueOffset_.offset(), + profilerEnterFrameToggleOffset_.offset(), + profilerExitFrameToggleOffset_.offset(), + postDebugPrologueOffset_.offset(), + icEntries_.length(), + pcMappingIndexEntries.length(), + pcEntries.length(), + bytecodeTypeMapEntries, + yieldOffsets_.length(), + traceLoggerToggleOffsets_.length()), + JS::DeletePolicy(cx->runtime())); + if (!baselineScript) { + ReportOutOfMemory(cx); + return Method_Error; + } + + baselineScript->setMethod(code); + baselineScript->setTemplateEnvironment(templateEnv); + + JitSpew(JitSpew_BaselineScripts, "Created BaselineScript %p (raw %p) for %s:%" PRIuSIZE, + (void*) baselineScript.get(), (void*) code->raw(), + script->filename(), script->lineno()); + +#ifdef JS_ION_PERF + writePerfSpewerBaselineProfile(script, code); +#endif + + MOZ_ASSERT(pcMappingIndexEntries.length() > 0); + baselineScript->copyPCMappingIndexEntries(&pcMappingIndexEntries[0]); + + MOZ_ASSERT(pcEntries.length() > 0); + baselineScript->copyPCMappingEntries(pcEntries); + + // Copy IC entries + if (icEntries_.length()) + baselineScript->copyICEntries(script, &icEntries_[0], masm); + + // Adopt fallback stubs from the compiler into the baseline script. + baselineScript->adoptFallbackStubs(&stubSpace_); + + // All barriers are emitted off-by-default, toggle them on if needed. + if (cx->zone()->needsIncrementalBarrier()) + baselineScript->toggleBarriers(true, DontReprotect); + + // If profiler instrumentation is enabled, toggle instrumentation on. + if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) + baselineScript->toggleProfilerInstrumentation(true); + + // Patch IC loads using IC entries. + for (size_t i = 0; i < icLoadLabels_.length(); i++) { + CodeOffset label = icLoadLabels_[i].label; + size_t icEntry = icLoadLabels_[i].icEntry; + BaselineICEntry* entryAddr = &(baselineScript->icEntry(icEntry)); + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label), + ImmPtr(entryAddr), + ImmPtr((void*)-1)); + } + + if (modifiesArguments_) + baselineScript->setModifiesArguments(); + +#ifdef JS_TRACE_LOGGING + // Initialize the tracelogger instrumentation. + baselineScript->initTraceLogger(cx->runtime(), script, traceLoggerToggleOffsets_); +#endif + + uint32_t* bytecodeMap = baselineScript->bytecodeTypeMap(); + FillBytecodeTypeMap(script, bytecodeMap); + + // The last entry in the last index found, and is used to avoid binary + // searches for the sought entry when queries are in linear order. + bytecodeMap[script->nTypeSets()] = 0; + + baselineScript->copyYieldEntries(script, yieldOffsets_); + + if (compileDebugInstrumentation_) + baselineScript->setHasDebugInstrumentation(); + + // Always register a native => bytecode mapping entry, since profiler can be + // turned on with baseline jitcode on stack, and baseline jitcode cannot be invalidated. + { + JitSpew(JitSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%" PRIuSIZE " (%p)", + script->filename(), script->lineno(), baselineScript.get()); + + // Generate profiling string. + char* str = JitcodeGlobalEntry::createScriptString(cx, script); + if (!str) + return Method_Error; + + JitcodeGlobalEntry::BaselineEntry entry; + entry.init(code, code->raw(), code->rawEnd(), script, str); + + JitcodeGlobalTable* globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable(); + if (!globalTable->addEntry(entry, cx->runtime())) { + entry.destroy(); + ReportOutOfMemory(cx); + return Method_Error; + } + + // Mark the jitcode as having a bytecode map. + code->setHasBytecodeMap(); + } + + script->setBaselineScript(cx->runtime(), baselineScript.release()); + + return Method_Compiled; +} + +void +BaselineCompiler::emitInitializeLocals() +{ + // Initialize all locals to |undefined|. Lexical bindings are temporal + // dead zoned in bytecode. + + size_t n = frame.nlocals(); + if (n == 0) + return; + + // Use R0 to minimize code size. If the number of locals to push is < + // LOOP_UNROLL_FACTOR, then the initialization pushes are emitted directly + // and inline. Otherwise, they're emitted in a partially unrolled loop. + static const size_t LOOP_UNROLL_FACTOR = 4; + size_t toPushExtra = n % LOOP_UNROLL_FACTOR; + + masm.moveValue(UndefinedValue(), R0); + + // Handle any extra pushes left over by the optional unrolled loop below. + for (size_t i = 0; i < toPushExtra; i++) + masm.pushValue(R0); + + // Partially unrolled loop of pushes. + if (n >= LOOP_UNROLL_FACTOR) { + size_t toPush = n - toPushExtra; + MOZ_ASSERT(toPush % LOOP_UNROLL_FACTOR == 0); + MOZ_ASSERT(toPush >= LOOP_UNROLL_FACTOR); + masm.move32(Imm32(toPush), R1.scratchReg()); + // Emit unrolled loop with 4 pushes per iteration. + Label pushLoop; + masm.bind(&pushLoop); + for (size_t i = 0; i < LOOP_UNROLL_FACTOR; i++) + masm.pushValue(R0); + masm.branchSub32(Assembler::NonZero, + Imm32(LOOP_UNROLL_FACTOR), R1.scratchReg(), &pushLoop); + } +} + +bool +BaselineCompiler::emitPrologue() +{ +#ifdef JS_USE_LINK_REGISTER + // Push link register from generateEnterJIT()'s BLR. + masm.pushReturnAddress(); + masm.checkStackAlignment(); +#endif + emitProfilerEnterFrame(); + + masm.push(BaselineFrameReg); + masm.moveStackPtrTo(BaselineFrameReg); + masm.subFromStackPtr(Imm32(BaselineFrame::Size())); + + // Initialize BaselineFrame. For eval scripts, the scope chain + // is passed in R1, so we have to be careful not to clobber it. + + // Initialize BaselineFrame::flags. + masm.store32(Imm32(0), frame.addressOfFlags()); + + // Handle env chain pre-initialization (in case GC gets run + // during stack check). For global and eval scripts, the env + // chain is in R1. For function scripts, the env chain is in + // the callee, nullptr is stored for now so that GC doesn't choke + // on a bogus EnvironmentChain value in the frame. + if (function()) + masm.storePtr(ImmPtr(nullptr), frame.addressOfEnvironmentChain()); + else + masm.storePtr(R1.scratchReg(), frame.addressOfEnvironmentChain()); + + // Functions with a large number of locals require two stack checks. + // The VMCall for a fallible stack check can only occur after the + // env chain has been initialized, as that is required for proper + // exception handling if the VMCall returns false. The env chain + // initialization can only happen after the UndefinedValues for the + // local slots have been pushed. + // However by that time, the stack might have grown too much. + // In these cases, we emit an extra, early, infallible check + // before pushing the locals. The early check sets a flag on the + // frame if the stack check fails (but otherwise doesn't throw an + // exception). If the flag is set, then the jitcode skips past + // the pushing of the locals, and directly to env chain initialization + // followed by the actual stack check, which will throw the correct + // exception. + Label earlyStackCheckFailed; + if (needsEarlyStackCheck()) { + if (!emitStackCheck(/* earlyCheck = */ true)) + return false; + masm.branchTest32(Assembler::NonZero, + frame.addressOfFlags(), + Imm32(BaselineFrame::OVER_RECURSED), + &earlyStackCheckFailed); + } + + emitInitializeLocals(); + + if (needsEarlyStackCheck()) + masm.bind(&earlyStackCheckFailed); + +#ifdef JS_TRACE_LOGGING + if (!emitTraceLoggerEnter()) + return false; +#endif + + // Record the offset of the prologue, because Ion can bailout before + // the env chain is initialized. + prologueOffset_ = CodeOffset(masm.currentOffset()); + + // When compiling with Debugger instrumentation, set the debuggeeness of + // the frame before any operation that can call into the VM. + emitIsDebuggeeCheck(); + + // Initialize the env chain before any operation that may + // call into the VM and trigger a GC. + if (!initEnvironmentChain()) + return false; + + if (!emitStackCheck()) + return false; + + if (!emitDebugPrologue()) + return false; + + if (!emitWarmUpCounterIncrement()) + return false; + + if (!emitArgumentTypeChecks()) + return false; + + return true; +} + +bool +BaselineCompiler::emitEpilogue() +{ + // Record the offset of the epilogue, so we can do early return from + // Debugger handlers during on-stack recompile. + epilogueOffset_ = CodeOffset(masm.currentOffset()); + + masm.bind(&return_); + +#ifdef JS_TRACE_LOGGING + if (!emitTraceLoggerExit()) + return false; +#endif + + masm.moveToStackPtr(BaselineFrameReg); + masm.pop(BaselineFrameReg); + + emitProfilerExitFrame(); + + masm.ret(); + return true; +} + +// On input: +// R2.scratchReg() contains object being written to. +// Called with the baseline stack synced, except for R0 which is preserved. +// All other registers are usable as scratch. +// This calls: +// void PostWriteBarrier(JSRuntime* rt, JSObject* obj); +bool +BaselineCompiler::emitOutOfLinePostBarrierSlot() +{ + masm.bind(&postBarrierSlot_); + + Register objReg = R2.scratchReg(); + AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); + regs.take(R0); + regs.take(objReg); + regs.take(BaselineFrameReg); + Register scratch = regs.takeAny(); +#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) + // On ARM, save the link register before calling. It contains the return + // address. The |masm.ret()| later will pop this into |pc| to return. + masm.push(lr); +#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + masm.push(ra); +#endif + masm.pushValue(R0); + + masm.setupUnalignedABICall(scratch); + masm.movePtr(ImmPtr(cx->runtime()), scratch); + masm.passABIArg(scratch); + masm.passABIArg(objReg); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier)); + + masm.popValue(R0); + masm.ret(); + return true; +} + +bool +BaselineCompiler::emitIC(ICStub* stub, ICEntry::Kind kind) +{ + BaselineICEntry* entry = allocateICEntry(stub, kind); + if (!entry) + return false; + + CodeOffset patchOffset; + EmitCallIC(&patchOffset, masm); + entry->setReturnOffset(CodeOffset(masm.currentOffset())); + if (!addICLoadLabel(patchOffset)) + return false; + + return true; +} + +typedef bool (*CheckOverRecursedWithExtraFn)(JSContext*, BaselineFrame*, uint32_t, uint32_t); +static const VMFunction CheckOverRecursedWithExtraInfo = + FunctionInfo(CheckOverRecursedWithExtra, + "CheckOverRecursedWithExtra"); + +bool +BaselineCompiler::emitStackCheck(bool earlyCheck) +{ + Label skipCall; + void* limitAddr = cx->runtime()->addressOfJitStackLimit(); + uint32_t slotsSize = script->nslots() * sizeof(Value); + uint32_t tolerance = earlyCheck ? slotsSize : 0; + + masm.moveStackPtrTo(R1.scratchReg()); + + // If this is the early stack check, locals haven't been pushed yet. Adjust the + // stack pointer to account for the locals that would be pushed before performing + // the guard around the vmcall to the stack check. + if (earlyCheck) + masm.subPtr(Imm32(tolerance), R1.scratchReg()); + + // If this is the late stack check for a frame which contains an early stack check, + // then the early stack check might have failed and skipped past the pushing of locals + // on the stack. + // + // If this is a possibility, then the OVER_RECURSED flag should be checked, and the + // VMCall to CheckOverRecursed done unconditionally if it's set. + Label forceCall; + if (!earlyCheck && needsEarlyStackCheck()) { + masm.branchTest32(Assembler::NonZero, + frame.addressOfFlags(), + Imm32(BaselineFrame::OVER_RECURSED), + &forceCall); + } + + masm.branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(limitAddr), R1.scratchReg(), + &skipCall); + + if (!earlyCheck && needsEarlyStackCheck()) + masm.bind(&forceCall); + + prepareVMCall(); + pushArg(Imm32(earlyCheck)); + pushArg(Imm32(tolerance)); + masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg()); + pushArg(R1.scratchReg()); + + CallVMPhase phase = POST_INITIALIZE; + if (earlyCheck) + phase = PRE_INITIALIZE; + else if (needsEarlyStackCheck()) + phase = CHECK_OVER_RECURSED; + + if (!callVMNonOp(CheckOverRecursedWithExtraInfo, phase)) + return false; + + icEntries_.back().setFakeKind(earlyCheck + ? ICEntry::Kind_EarlyStackCheck + : ICEntry::Kind_StackCheck); + + masm.bind(&skipCall); + return true; +} + +void +BaselineCompiler::emitIsDebuggeeCheck() +{ + if (compileDebugInstrumentation_) { + masm.Push(BaselineFrameReg); + masm.setupUnalignedABICall(R0.scratchReg()); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + masm.passABIArg(R0.scratchReg()); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::FrameIsDebuggeeCheck)); + masm.Pop(BaselineFrameReg); + } +} + +typedef bool (*DebugPrologueFn)(JSContext*, BaselineFrame*, jsbytecode*, bool*); +static const VMFunction DebugPrologueInfo = + FunctionInfo(jit::DebugPrologue, "DebugPrologue"); + +bool +BaselineCompiler::emitDebugPrologue() +{ + if (compileDebugInstrumentation_) { + // Load pointer to BaselineFrame in R0. + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + prepareVMCall(); + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + if (!callVM(DebugPrologueInfo)) + return false; + + // Fix up the fake ICEntry appended by callVM for on-stack recompilation. + icEntries_.back().setFakeKind(ICEntry::Kind_DebugPrologue); + + // If the stub returns |true|, we have to return the value stored in the + // frame's return value slot. + Label done; + masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done); + { + masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand); + masm.jump(&return_); + } + masm.bind(&done); + } + + postDebugPrologueOffset_ = CodeOffset(masm.currentOffset()); + + return true; +} + +typedef bool (*CheckGlobalOrEvalDeclarationConflictsFn)(JSContext*, BaselineFrame*); +static const VMFunction CheckGlobalOrEvalDeclarationConflictsInfo = + FunctionInfo(jit::CheckGlobalOrEvalDeclarationConflicts, + "CheckGlobalOrEvalDeclarationConflicts"); + +typedef bool (*InitFunctionEnvironmentObjectsFn)(JSContext*, BaselineFrame*); +static const VMFunction InitFunctionEnvironmentObjectsInfo = + FunctionInfo(jit::InitFunctionEnvironmentObjects, + "InitFunctionEnvironmentObjects"); + +bool +BaselineCompiler::initEnvironmentChain() +{ + CallVMPhase phase = POST_INITIALIZE; + if (needsEarlyStackCheck()) + phase = CHECK_OVER_RECURSED; + + RootedFunction fun(cx, function()); + if (fun) { + // Use callee->environment as scope chain. Note that we do this also + // for needsSomeEnvironmentObject functions, so that the scope chain + // slot is properly initialized if the call triggers GC. + Register callee = R0.scratchReg(); + Register scope = R1.scratchReg(); + masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), callee); + masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), scope); + masm.storePtr(scope, frame.addressOfEnvironmentChain()); + + if (fun->needsFunctionEnvironmentObjects()) { + // Call into the VM to create the proper environment objects. + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVMNonOp(InitFunctionEnvironmentObjectsInfo, phase)) + return false; + } + } else if (module()) { + // Modules use a pre-created scope object. + Register scope = R1.scratchReg(); + masm.movePtr(ImmGCPtr(&module()->initialEnvironment()), scope); + masm.storePtr(scope, frame.addressOfEnvironmentChain()); + } else { + // EnvironmentChain pointer in BaselineFrame has already been initialized + // in prologue, but we need to check for redeclaration errors. + + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVMNonOp(CheckGlobalOrEvalDeclarationConflictsInfo, phase)) + return false; + } + + return true; +} + +typedef bool (*InterruptCheckFn)(JSContext*); +static const VMFunction InterruptCheckInfo = + FunctionInfo(InterruptCheck, "InterruptCheck"); + +bool +BaselineCompiler::emitInterruptCheck() +{ + frame.syncStack(0); + + Label done; + void* interrupt = cx->runtimeAddressOfInterruptUint32(); + masm.branch32(Assembler::Equal, AbsoluteAddress(interrupt), Imm32(0), &done); + + prepareVMCall(); + if (!callVM(InterruptCheckInfo)) + return false; + + masm.bind(&done); + return true; +} + +typedef bool (*IonCompileScriptForBaselineFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction IonCompileScriptForBaselineInfo = + FunctionInfo(IonCompileScriptForBaseline, + "IonCompileScriptForBaseline"); + +bool +BaselineCompiler::emitWarmUpCounterIncrement(bool allowOsr) +{ + // Emit no warm-up counter increments or bailouts if Ion is not + // enabled, or if the script will never be Ion-compileable + + if (!ionCompileable_ && !ionOSRCompileable_) + return true; + + frame.assertSyncedStack(); + + Register scriptReg = R2.scratchReg(); + Register countReg = R0.scratchReg(); + Address warmUpCounterAddr(scriptReg, JSScript::offsetOfWarmUpCounter()); + + masm.movePtr(ImmGCPtr(script), scriptReg); + masm.load32(warmUpCounterAddr, countReg); + masm.add32(Imm32(1), countReg); + masm.store32(countReg, warmUpCounterAddr); + + // If this is a loop inside a catch or finally block, increment the warmup + // counter but don't attempt OSR (Ion only compiles the try block). + if (analysis_.info(pc).loopEntryInCatchOrFinally) { + MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY); + return true; + } + + // OSR not possible at this loop entry. + if (!allowOsr) { + MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPENTRY); + return true; + } + + Label skipCall; + + const OptimizationInfo* info = IonOptimizations.get(IonOptimizations.firstLevel()); + uint32_t warmUpThreshold = info->compilerWarmUpThreshold(script, pc); + masm.branch32(Assembler::LessThan, countReg, Imm32(warmUpThreshold), &skipCall); + + masm.branchPtr(Assembler::Equal, + Address(scriptReg, JSScript::offsetOfIonScript()), + ImmPtr(ION_COMPILING_SCRIPT), &skipCall); + + // Try to compile and/or finish a compilation. + if (JSOp(*pc) == JSOP_LOOPENTRY) { + // During the loop entry we can try to OSR into ion. + // The ic has logic for this. + ICWarmUpCounter_Fallback::Compiler stubCompiler(cx); + if (!emitNonOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + } else { + // To call stubs we need to have an opcode. This code handles the + // prologue and there is no dedicatd opcode present. Therefore use an + // annotated vm call. + prepareVMCall(); + + masm.Push(ImmPtr(pc)); + masm.PushBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + if (!callVM(IonCompileScriptForBaselineInfo)) + return false; + + // Annotate the ICEntry as warmup counter. + icEntries_.back().setFakeKind(ICEntry::Kind_WarmupCounter); + } + masm.bind(&skipCall); + + return true; +} + +bool +BaselineCompiler::emitArgumentTypeChecks() +{ + if (!function()) + return true; + + frame.pushThis(); + frame.popRegsAndSync(1); + + ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline, + (uint32_t) 0); + if (!emitNonOpIC(compiler.getStub(&stubSpace_))) + return false; + + for (size_t i = 0; i < function()->nargs(); i++) { + frame.pushArg(i); + frame.popRegsAndSync(1); + + ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline, + i + 1); + if (!emitNonOpIC(compiler.getStub(&stubSpace_))) + return false; + } + + return true; +} + +bool +BaselineCompiler::emitDebugTrap() +{ + MOZ_ASSERT(compileDebugInstrumentation_); + MOZ_ASSERT(frame.numUnsyncedSlots() == 0); + + bool enabled = script->stepModeEnabled() || script->hasBreakpointsAt(pc); + + // Emit patchable call to debug trap handler. + JitCode* handler = cx->runtime()->jitRuntime()->debugTrapHandler(cx); + if (!handler) + return false; + mozilla::DebugOnly offset = masm.toggledCall(handler, enabled); + +#ifdef DEBUG + // Patchable call offset has to match the pc mapping offset. + PCMappingEntry& entry = pcMappingEntries_.back(); + MOZ_ASSERT((&offset)->offset() == entry.nativeOffset); +#endif + + // Add an IC entry for the return offset -> pc mapping. + return appendICEntry(ICEntry::Kind_DebugTrap, masm.currentOffset()); +} + +#ifdef JS_TRACE_LOGGING +bool +BaselineCompiler::emitTraceLoggerEnter() +{ + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + AllocatableRegisterSet regs(RegisterSet::Volatile()); + Register loggerReg = regs.takeAnyGeneral(); + Register scriptReg = regs.takeAnyGeneral(); + + Label noTraceLogger; + if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger))) + return false; + + masm.Push(loggerReg); + masm.Push(scriptReg); + + masm.movePtr(ImmPtr(logger), loggerReg); + + // Script start. + masm.movePtr(ImmGCPtr(script), scriptReg); + masm.loadPtr(Address(scriptReg, JSScript::offsetOfBaselineScript()), scriptReg); + Address scriptEvent(scriptReg, BaselineScript::offsetOfTraceLoggerScriptEvent()); + masm.computeEffectiveAddress(scriptEvent, scriptReg); + masm.tracelogStartEvent(loggerReg, scriptReg); + + // Engine start. + masm.tracelogStartId(loggerReg, TraceLogger_Baseline, /* force = */ true); + + masm.Pop(scriptReg); + masm.Pop(loggerReg); + + masm.bind(&noTraceLogger); + + return true; +} + +bool +BaselineCompiler::emitTraceLoggerExit() +{ + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + AllocatableRegisterSet regs(RegisterSet::Volatile()); + Register loggerReg = regs.takeAnyGeneral(); + + Label noTraceLogger; + if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger))) + return false; + + masm.Push(loggerReg); + masm.movePtr(ImmPtr(logger), loggerReg); + + masm.tracelogStopId(loggerReg, TraceLogger_Baseline, /* force = */ true); + masm.tracelogStopId(loggerReg, TraceLogger_Scripts, /* force = */ true); + + masm.Pop(loggerReg); + + masm.bind(&noTraceLogger); + + return true; +} + +bool +BaselineCompiler::emitTraceLoggerResume(Register baselineScript, AllocatableGeneralRegisterSet& regs) +{ + Register scriptId = regs.takeAny(); + Register loggerReg = regs.takeAny(); + + Label noTraceLogger; + if (!traceLoggerToggleOffsets_.append(masm.toggledJump(&noTraceLogger))) + return false; + + TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime()); + masm.movePtr(ImmPtr(logger), loggerReg); + + Address scriptEvent(baselineScript, BaselineScript::offsetOfTraceLoggerScriptEvent()); + masm.computeEffectiveAddress(scriptEvent, scriptId); + masm.tracelogStartEvent(loggerReg, scriptId); + masm.tracelogStartId(loggerReg, TraceLogger_Baseline, /* force = */ true); + + regs.add(loggerReg); + regs.add(scriptId); + + masm.bind(&noTraceLogger); + + return true; +} +#endif + +void +BaselineCompiler::emitProfilerEnterFrame() +{ + // Store stack position to lastProfilingFrame variable, guarded by a toggled jump. + // Starts off initially disabled. + Label noInstrument; + CodeOffset toggleOffset = masm.toggledJump(&noInstrument); + masm.profilerEnterFrame(masm.getStackPointer(), R0.scratchReg()); + masm.bind(&noInstrument); + + // Store the start offset in the appropriate location. + MOZ_ASSERT(!profilerEnterFrameToggleOffset_.bound()); + profilerEnterFrameToggleOffset_ = toggleOffset; +} + +void +BaselineCompiler::emitProfilerExitFrame() +{ + // Store previous frame to lastProfilingFrame variable, guarded by a toggled jump. + // Starts off initially disabled. + Label noInstrument; + CodeOffset toggleOffset = masm.toggledJump(&noInstrument); + masm.profilerExitFrame(); + masm.bind(&noInstrument); + + // Store the start offset in the appropriate location. + MOZ_ASSERT(!profilerExitFrameToggleOffset_.bound()); + profilerExitFrameToggleOffset_ = toggleOffset; +} + +MethodStatus +BaselineCompiler::emitBody() +{ + MOZ_ASSERT(pc == script->code()); + + bool lastOpUnreachable = false; + uint32_t emittedOps = 0; + mozilla::DebugOnly prevpc = pc; + + while (true) { + JSOp op = JSOp(*pc); + JitSpew(JitSpew_BaselineOp, "Compiling op @ %d: %s", + int(script->pcToOffset(pc)), CodeName[op]); + + BytecodeInfo* info = analysis_.maybeInfo(pc); + + // Skip unreachable ops. + if (!info) { + // Test if last instructions and stop emitting in that case. + pc += GetBytecodeLength(pc); + if (pc >= script->codeEnd()) + break; + + lastOpUnreachable = true; + prevpc = pc; + continue; + } + + // Fully sync the stack if there are incoming jumps. + if (info->jumpTarget) { + frame.syncStack(0); + frame.setStackDepth(info->stackDepth); + } + + // Always sync in debug mode. + if (compileDebugInstrumentation_) + frame.syncStack(0); + + // At the beginning of any op, at most the top 2 stack-values are unsynced. + if (frame.stackDepth() > 2) + frame.syncStack(2); + + frame.assertValidState(*info); + + masm.bind(labelOf(pc)); + + // Add a PC -> native mapping entry for the current op. These entries are + // used when we need the native code address for a given pc, for instance + // for bailouts from Ion, the debugger and exception handling. See + // PCMappingIndexEntry for more information. + bool addIndexEntry = (pc == script->code() || lastOpUnreachable || emittedOps > 100); + if (addIndexEntry) + emittedOps = 0; + if (!addPCMappingEntry(addIndexEntry)) { + ReportOutOfMemory(cx); + return Method_Error; + } + + // Emit traps for breakpoints and step mode. + if (compileDebugInstrumentation_ && !emitDebugTrap()) + return Method_Error; + + switch (op) { + default: + JitSpew(JitSpew_BaselineAbort, "Unhandled op: %s", CodeName[op]); + return Method_CantCompile; + +#define EMIT_OP(OP) \ + case OP: \ + if (!this->emit_##OP()) \ + return Method_Error; \ + break; +OPCODE_LIST(EMIT_OP) +#undef EMIT_OP + } + + // If the main instruction is not a jump target, then we emit the + // corresponding code coverage counter. + if (pc == script->main() && !BytecodeIsJumpTarget(op)) { + if (!emit_JSOP_JUMPTARGET()) + return Method_Error; + } + + // Test if last instructions and stop emitting in that case. + pc += GetBytecodeLength(pc); + if (pc >= script->codeEnd()) + break; + + emittedOps++; + lastOpUnreachable = false; +#ifdef DEBUG + prevpc = pc; +#endif + } + + MOZ_ASSERT(JSOp(*prevpc) == JSOP_RETRVAL); + return Method_Compiled; +} + +bool +BaselineCompiler::emit_JSOP_NOP() +{ + return true; +} + +bool +BaselineCompiler::emit_JSOP_NOP_DESTRUCTURING() +{ + return true; +} + +bool +BaselineCompiler::emit_JSOP_LABEL() +{ + return true; +} + +bool +BaselineCompiler::emit_JSOP_POP() +{ + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_POPN() +{ + frame.popn(GET_UINT16(pc)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_DUPAT() +{ + frame.syncStack(0); + + // DUPAT takes a value on the stack and re-pushes it on top. It's like + // GETLOCAL but it addresses from the top of the stack instead of from the + // stack frame. + + int depth = -(GET_UINT24(pc) + 1); + masm.loadValue(frame.addressOfStackValue(frame.peek(depth)), R0); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_DUP() +{ + // Keep top stack value in R0, sync the rest so that we can use R1. We use + // separate registers because every register can be used by at most one + // StackValue. + frame.popRegsAndSync(1); + masm.moveValue(R0, R1); + + // inc/dec ops use DUP followed by ONE, ADD. Push R0 last to avoid a move. + frame.push(R1); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_DUP2() +{ + frame.syncStack(0); + + masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1); + + frame.push(R0); + frame.push(R1); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SWAP() +{ + // Keep top stack values in R0 and R1. + frame.popRegsAndSync(2); + + frame.push(R1); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_PICK() +{ + frame.syncStack(0); + + // Pick takes a value on the stack and moves it to the top. + // For instance, pick 2: + // before: A B C D E + // after : A B D E C + + // First, move value at -(amount + 1) into R0. + int depth = -(GET_INT8(pc) + 1); + masm.loadValue(frame.addressOfStackValue(frame.peek(depth)), R0); + + // Move the other values down. + depth++; + for (; depth < 0; depth++) { + Address source = frame.addressOfStackValue(frame.peek(depth)); + Address dest = frame.addressOfStackValue(frame.peek(depth - 1)); + masm.loadValue(source, R1); + masm.storeValue(R1, dest); + } + + // Push R0. + frame.pop(); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GOTO() +{ + frame.syncStack(0); + + jsbytecode* target = pc + GET_JUMP_OFFSET(pc); + masm.jump(labelOf(target)); + return true; +} + +bool +BaselineCompiler::emitToBoolean() +{ + Label skipIC; + masm.branchTestBoolean(Assembler::Equal, R0, &skipIC); + + // Call IC + ICToBool_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + masm.bind(&skipIC); + return true; +} + +bool +BaselineCompiler::emitTest(bool branchIfTrue) +{ + bool knownBoolean = frame.peek(-1)->isKnownBoolean(); + + // Keep top stack value in R0. + frame.popRegsAndSync(1); + + if (!knownBoolean && !emitToBoolean()) + return false; + + // IC will leave a BooleanValue in R0, just need to branch on it. + masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_IFEQ() +{ + return emitTest(false); +} + +bool +BaselineCompiler::emit_JSOP_IFNE() +{ + return emitTest(true); +} + +bool +BaselineCompiler::emitAndOr(bool branchIfTrue) +{ + bool knownBoolean = frame.peek(-1)->isKnownBoolean(); + + // AND and OR leave the original value on the stack. + frame.syncStack(0); + + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + if (!knownBoolean && !emitToBoolean()) + return false; + + masm.branchTestBooleanTruthy(branchIfTrue, R0, labelOf(pc + GET_JUMP_OFFSET(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_AND() +{ + return emitAndOr(false); +} + +bool +BaselineCompiler::emit_JSOP_OR() +{ + return emitAndOr(true); +} + +bool +BaselineCompiler::emit_JSOP_NOT() +{ + bool knownBoolean = frame.peek(-1)->isKnownBoolean(); + + // Keep top stack value in R0. + frame.popRegsAndSync(1); + + if (!knownBoolean && !emitToBoolean()) + return false; + + masm.notBoolean(R0); + + frame.push(R0, JSVAL_TYPE_BOOLEAN); + return true; +} + +bool +BaselineCompiler::emit_JSOP_POS() +{ + // Keep top stack value in R0. + frame.popRegsAndSync(1); + + // Inline path for int32 and double. + Label done; + masm.branchTestNumber(Assembler::Equal, R0, &done); + + // Call IC. + ICToNumber_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + masm.bind(&done); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_LOOPHEAD() +{ + if (!emit_JSOP_JUMPTARGET()) + return false; + return emitInterruptCheck(); +} + +bool +BaselineCompiler::emit_JSOP_LOOPENTRY() +{ + if (!emit_JSOP_JUMPTARGET()) + return false; + frame.syncStack(0); + return emitWarmUpCounterIncrement(LoopEntryCanIonOsr(pc)); +} + +bool +BaselineCompiler::emit_JSOP_VOID() +{ + frame.pop(); + frame.push(UndefinedValue()); + return true; +} + +bool +BaselineCompiler::emit_JSOP_UNDEFINED() +{ + // If this ever changes, change what JSOP_GIMPLICITTHIS does too. + frame.push(UndefinedValue()); + return true; +} + +bool +BaselineCompiler::emit_JSOP_HOLE() +{ + frame.push(MagicValue(JS_ELEMENTS_HOLE)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_NULL() +{ + frame.push(NullValue()); + return true; +} + +typedef bool (*ThrowCheckIsObjectFn)(JSContext*, CheckIsObjectKind); +static const VMFunction ThrowCheckIsObjectInfo = + FunctionInfo(ThrowCheckIsObject, "ThrowCheckIsObject"); + +bool +BaselineCompiler::emit_JSOP_CHECKISOBJ() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + Label ok; + masm.branchTestObject(Assembler::Equal, R0, &ok); + + prepareVMCall(); + + pushArg(Imm32(GET_UINT8(pc))); + if (!callVM(ThrowCheckIsObjectInfo)) + return false; + + masm.bind(&ok); + return true; +} + +typedef bool (*ThrowUninitializedThisFn)(JSContext*, BaselineFrame* frame); +static const VMFunction ThrowUninitializedThisInfo = + FunctionInfo(BaselineThrowUninitializedThis, + "BaselineThrowUninitializedThis"); + +bool +BaselineCompiler::emit_JSOP_CHECKTHIS() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + return emitCheckThis(R0); +} + +bool +BaselineCompiler::emitCheckThis(ValueOperand val) +{ + Label thisOK; + masm.branchTestMagic(Assembler::NotEqual, val, &thisOK); + + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, val.scratchReg()); + pushArg(val.scratchReg()); + + if (!callVM(ThrowUninitializedThisInfo)) + return false; + + masm.bind(&thisOK); + return true; +} + +typedef bool (*ThrowBadDerivedReturnFn)(JSContext*, HandleValue); +static const VMFunction ThrowBadDerivedReturnInfo = + FunctionInfo(jit::ThrowBadDerivedReturn, "ThrowBadDerivedReturn"); + +bool +BaselineCompiler::emit_JSOP_CHECKRETURN() +{ + MOZ_ASSERT(script->isDerivedClassConstructor()); + + // Load |this| in R0, return value in R1. + frame.popRegsAndSync(1); + emitLoadReturnValue(R1); + + Label done, returnOK; + masm.branchTestObject(Assembler::Equal, R1, &done); + masm.branchTestUndefined(Assembler::Equal, R1, &returnOK); + + prepareVMCall(); + pushArg(R1); + if (!callVM(ThrowBadDerivedReturnInfo)) + return false; + masm.assumeUnreachable("Should throw on bad derived constructor return"); + + masm.bind(&returnOK); + + if (!emitCheckThis(R0)) + return false; + + // Store |this| in the return value slot. + masm.storeValue(R0, frame.addressOfReturnValue()); + masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags()); + + masm.bind(&done); + return true; +} + +typedef bool (*GetFunctionThisFn)(JSContext*, BaselineFrame*, MutableHandleValue); +static const VMFunction GetFunctionThisInfo = + FunctionInfo(jit::BaselineGetFunctionThis, "BaselineGetFunctionThis"); + +bool +BaselineCompiler::emit_JSOP_FUNCTIONTHIS() +{ + MOZ_ASSERT(function()); + MOZ_ASSERT(!function()->isArrow()); + + frame.pushThis(); + + // In strict mode code or self-hosted functions, |this| is left alone. + if (script->strict() || (function() && function()->isSelfHostedBuiltin())) + return true; + + // Load |thisv| in R0. Skip the call if it's already an object. + Label skipCall; + frame.popRegsAndSync(1); + masm.branchTestObject(Assembler::Equal, R0, &skipCall); + + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg()); + + pushArg(R1.scratchReg()); + + if (!callVM(GetFunctionThisInfo)) + return false; + + masm.bind(&skipCall); + frame.push(R0); + return true; +} + +typedef bool (*GetNonSyntacticGlobalThisFn)(JSContext*, HandleObject, MutableHandleValue); +static const VMFunction GetNonSyntacticGlobalThisInfo = + FunctionInfo(js::GetNonSyntacticGlobalThis, + "GetNonSyntacticGlobalThis"); + +bool +BaselineCompiler::emit_JSOP_GLOBALTHIS() +{ + frame.syncStack(0); + + if (!script->hasNonSyntacticScope()) { + LexicalEnvironmentObject* globalLexical = &script->global().lexicalEnvironment(); + masm.moveValue(globalLexical->thisValue(), R0); + frame.push(R0); + return true; + } + + prepareVMCall(); + + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVM(GetNonSyntacticGlobalThisInfo)) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_TRUE() +{ + frame.push(BooleanValue(true)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_FALSE() +{ + frame.push(BooleanValue(false)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_ZERO() +{ + frame.push(Int32Value(0)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_ONE() +{ + frame.push(Int32Value(1)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INT8() +{ + frame.push(Int32Value(GET_INT8(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INT32() +{ + frame.push(Int32Value(GET_INT32(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_UINT16() +{ + frame.push(Int32Value(GET_UINT16(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_UINT24() +{ + frame.push(Int32Value(GET_UINT24(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_DOUBLE() +{ + frame.push(script->getConst(GET_UINT32_INDEX(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRING() +{ + frame.push(StringValue(script->getAtom(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SYMBOL() +{ + unsigned which = GET_UINT8(pc); + JS::Symbol* sym = cx->runtime()->wellKnownSymbols->get(which); + frame.push(SymbolValue(sym)); + return true; +} + +typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind); +static const VMFunction DeepCloneObjectLiteralInfo = + FunctionInfo(DeepCloneObjectLiteral, "DeepCloneObjectLiteral"); + +bool +BaselineCompiler::emit_JSOP_OBJECT() +{ + JSCompartment* comp = cx->compartment(); + if (comp->creationOptions().cloneSingletons()) { + RootedObject obj(cx, script->getObject(GET_UINT32_INDEX(pc))); + if (!obj) + return false; + + prepareVMCall(); + + pushArg(ImmWord(TenuredObject)); + pushArg(ImmGCPtr(obj)); + + if (!callVM(DeepCloneObjectLiteralInfo)) + return false; + + // Box and push return value. + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; + } + + comp->behaviors().setSingletonsAsValues(); + frame.push(ObjectValue(*script->getObject(pc))); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CALLSITEOBJ() +{ + RootedObject cso(cx, script->getObject(pc)); + RootedObject raw(cx, script->getObject(GET_UINT32_INDEX(pc) + 1)); + if (!cso || !raw) + return false; + RootedValue rawValue(cx); + rawValue.setObject(*raw); + + if (!ProcessCallSiteObjOperation(cx, cso, raw, rawValue)) + return false; + + frame.push(ObjectValue(*cso)); + return true; +} + +typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, JSObject*); +static const VMFunction CloneRegExpObjectInfo = + FunctionInfo(CloneRegExpObject, "CloneRegExpObject"); + +bool +BaselineCompiler::emit_JSOP_REGEXP() +{ + RootedObject reObj(cx, script->getRegExp(pc)); + + prepareVMCall(); + pushArg(ImmGCPtr(reObj)); + if (!callVM(CloneRegExpObjectInfo)) + return false; + + // Box and push return value. + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject); +static const VMFunction LambdaInfo = FunctionInfo(js::Lambda, "Lambda"); + +bool +BaselineCompiler::emit_JSOP_LAMBDA() +{ + RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc))); + + prepareVMCall(); + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + pushArg(R0.scratchReg()); + pushArg(ImmGCPtr(fun)); + + if (!callVM(LambdaInfo)) + return false; + + // Box and push return value. + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue); +static const VMFunction LambdaArrowInfo = + FunctionInfo(js::LambdaArrow, "LambdaArrow"); + +bool +BaselineCompiler::emit_JSOP_LAMBDA_ARROW() +{ + // Keep pushed newTarget in R0. + frame.popRegsAndSync(1); + + RootedFunction fun(cx, script->getFunction(GET_UINT32_INDEX(pc))); + + prepareVMCall(); + masm.loadPtr(frame.addressOfEnvironmentChain(), R2.scratchReg()); + + pushArg(R0); + pushArg(R2.scratchReg()); + pushArg(ImmGCPtr(fun)); + + if (!callVM(LambdaArrowInfo)) + return false; + + // Box and push return value. + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +void +BaselineCompiler::storeValue(const StackValue* source, const Address& dest, + const ValueOperand& scratch) +{ + switch (source->kind()) { + case StackValue::Constant: + masm.storeValue(source->constant(), dest); + break; + case StackValue::Register: + masm.storeValue(source->reg(), dest); + break; + case StackValue::LocalSlot: + masm.loadValue(frame.addressOfLocal(source->localSlot()), scratch); + masm.storeValue(scratch, dest); + break; + case StackValue::ArgSlot: + masm.loadValue(frame.addressOfArg(source->argSlot()), scratch); + masm.storeValue(scratch, dest); + break; + case StackValue::ThisSlot: + masm.loadValue(frame.addressOfThis(), scratch); + masm.storeValue(scratch, dest); + break; + case StackValue::EvalNewTargetSlot: + MOZ_ASSERT(script->isForEval()); + masm.loadValue(frame.addressOfEvalNewTarget(), scratch); + masm.storeValue(scratch, dest); + break; + case StackValue::Stack: + masm.loadValue(frame.addressOfStackValue(source), scratch); + masm.storeValue(scratch, dest); + break; + default: + MOZ_CRASH("Invalid kind"); + } +} + +bool +BaselineCompiler::emit_JSOP_BITOR() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_BITXOR() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_BITAND() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_LSH() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_RSH() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_URSH() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_ADD() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_SUB() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_MUL() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_DIV() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_MOD() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_POW() +{ + return emitBinaryArith(); +} + +bool +BaselineCompiler::emitBinaryArith() +{ + // Keep top JSStack value in R0 and R2 + frame.popRegsAndSync(2); + + // Call IC + ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emitUnaryArith() +{ + // Keep top stack value in R0. + frame.popRegsAndSync(1); + + // Call IC + ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_BITNOT() +{ + return emitUnaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_NEG() +{ + return emitUnaryArith(); +} + +bool +BaselineCompiler::emit_JSOP_LT() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_LE() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_GT() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_GE() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_EQ() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_NE() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emitCompare() +{ + // CODEGEN + + // Keep top JSStack value in R0 and R1. + frame.popRegsAndSync(2); + + // Call IC. + ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0, JSVAL_TYPE_BOOLEAN); + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRICTEQ() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_STRICTNE() +{ + return emitCompare(); +} + +bool +BaselineCompiler::emit_JSOP_CONDSWITCH() +{ + return true; +} + +bool +BaselineCompiler::emit_JSOP_CASE() +{ + frame.popRegsAndSync(2); + frame.push(R0); + frame.syncStack(0); + + // Call IC. + ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + Register payload = masm.extractInt32(R0, R0.scratchReg()); + jsbytecode* target = pc + GET_JUMP_OFFSET(pc); + + Label done; + masm.branch32(Assembler::Equal, payload, Imm32(0), &done); + { + // Pop the switch value if the case matches. + masm.addToStackPtr(Imm32(sizeof(Value))); + masm.jump(labelOf(target)); + } + masm.bind(&done); + return true; +} + +bool +BaselineCompiler::emit_JSOP_DEFAULT() +{ + frame.pop(); + return emit_JSOP_GOTO(); +} + +bool +BaselineCompiler::emit_JSOP_LINENO() +{ + return true; +} + +bool +BaselineCompiler::emit_JSOP_NEWARRAY() +{ + frame.syncStack(0); + + uint32_t length = GET_UINT32(pc); + MOZ_ASSERT(length <= INT32_MAX, + "the bytecode emitter must fail to compile code that would " + "produce JSOP_NEWARRAY with a length exceeding int32_t range"); + + // Pass length in R0. + masm.move32(Imm32(AssertedCast(length)), R0.scratchReg()); + + ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array); + if (!group) + return false; + + ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SPREADCALLARRAY() +{ + return emit_JSOP_NEWARRAY(); +} + +typedef JSObject* (*NewArrayCopyOnWriteFn)(JSContext*, HandleArrayObject, gc::InitialHeap); +const VMFunction jit::NewArrayCopyOnWriteInfo = + FunctionInfo(js::NewDenseCopyOnWriteArray, "NewDenseCopyOnWriteArray"); + +bool +BaselineCompiler::emit_JSOP_NEWARRAY_COPYONWRITE() +{ + RootedScript scriptRoot(cx, script); + JSObject* obj = ObjectGroup::getOrFixupCopyOnWriteObject(cx, scriptRoot, pc); + if (!obj) + return false; + + prepareVMCall(); + + pushArg(Imm32(gc::DefaultHeap)); + pushArg(ImmGCPtr(obj)); + + if (!callVM(NewArrayCopyOnWriteInfo)) + return false; + + // Box and push return value. + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITELEM_ARRAY() +{ + // Keep the object and rhs on the stack. + frame.syncStack(0); + + // Load object in R0, index in R1. + masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0); + uint32_t index = GET_UINT32(pc); + MOZ_ASSERT(index <= INT32_MAX, + "the bytecode emitter must fail to compile code that would " + "produce JSOP_INITELEM_ARRAY with a length exceeding " + "int32_t range"); + masm.moveValue(Int32Value(AssertedCast(index)), R1); + + // Call IC. + ICSetElem_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Pop the rhs, so that the object is on the top of the stack. + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_NEWOBJECT() +{ + frame.syncStack(0); + + ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_NEWINIT() +{ + frame.syncStack(0); + JSProtoKey key = JSProtoKey(GET_UINT8(pc)); + + if (key == JSProto_Array) { + // Pass length in R0. + masm.move32(Imm32(0), R0.scratchReg()); + + ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array); + if (!group) + return false; + + ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + } else { + MOZ_ASSERT(key == JSProto_Object); + + ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + } + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITELEM() +{ + // Store RHS in the scratch slot. + storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2); + frame.pop(); + + // Keep object and index in R0 and R1. + frame.popRegsAndSync(2); + + // Push the object to store the result of the IC. + frame.push(R0); + frame.syncStack(0); + + // Keep RHS on the stack. + frame.pushScratchValue(); + + // Call IC. + ICSetElem_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Pop the rhs, so that the object is on the top of the stack. + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENELEM() +{ + return emit_JSOP_INITELEM(); +} + +typedef bool (*MutateProtoFn)(JSContext* cx, HandlePlainObject obj, HandleValue newProto); +static const VMFunction MutateProtoInfo = + FunctionInfo(MutatePrototype, "MutatePrototype"); + +bool +BaselineCompiler::emit_JSOP_MUTATEPROTO() +{ + // Keep values on the stack for the decompiler. + frame.syncStack(0); + + masm.extractObject(frame.addressOfStackValue(frame.peek(-2)), R0.scratchReg()); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1); + + prepareVMCall(); + + pushArg(R1); + pushArg(R0.scratchReg()); + + if (!callVM(MutateProtoInfo)) + return false; + + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITPROP() +{ + // Keep lhs in R0, rhs in R1. + frame.popRegsAndSync(2); + + // Push the object to store the result of the IC. + frame.push(R0); + frame.syncStack(0); + + // Call IC. + ICSetProp_Fallback::Compiler compiler(cx); + return emitOpIC(compiler.getStub(&stubSpace_)); +} + +bool +BaselineCompiler::emit_JSOP_INITLOCKEDPROP() +{ + return emit_JSOP_INITPROP(); +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENPROP() +{ + return emit_JSOP_INITPROP(); +} + +typedef bool (*NewbornArrayPushFn)(JSContext*, HandleObject, const Value&); +static const VMFunction NewbornArrayPushInfo = + FunctionInfo(NewbornArrayPush, "NewbornArrayPush"); + +bool +BaselineCompiler::emit_JSOP_ARRAYPUSH() +{ + // Keep value in R0, object in R1. + frame.popRegsAndSync(2); + masm.unboxObject(R1, R1.scratchReg()); + + prepareVMCall(); + + pushArg(R0); + pushArg(R1.scratchReg()); + + return callVM(NewbornArrayPushInfo); +} + +bool +BaselineCompiler::emit_JSOP_GETELEM() +{ + // Keep top two stack values in R0 and R1. + frame.popRegsAndSync(2); + + // Call IC. + ICGetElem_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CALLELEM() +{ + return emit_JSOP_GETELEM(); +} + +bool +BaselineCompiler::emit_JSOP_SETELEM() +{ + // Store RHS in the scratch slot. + storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2); + frame.pop(); + + // Keep object and index in R0 and R1. + frame.popRegsAndSync(2); + + // Keep RHS on the stack. + frame.pushScratchValue(); + + // Call IC. + ICSetElem_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRICTSETELEM() +{ + return emit_JSOP_SETELEM(); +} + +typedef bool (*DeleteElementFn)(JSContext*, HandleValue, HandleValue, bool*); +static const VMFunction DeleteElementStrictInfo + = FunctionInfo(DeleteElementJit, "DeleteElementStrict"); +static const VMFunction DeleteElementNonStrictInfo + = FunctionInfo(DeleteElementJit, "DeleteElementNonStrict"); + +bool +BaselineCompiler::emit_JSOP_DELELEM() +{ + // Keep values on the stack for the decompiler. + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1); + + prepareVMCall(); + + pushArg(R1); + pushArg(R0); + + bool strict = JSOp(*pc) == JSOP_STRICTDELELEM; + if (!callVM(strict ? DeleteElementStrictInfo : DeleteElementNonStrictInfo)) + return false; + + masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1); + frame.popn(2); + frame.push(R1); + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRICTDELELEM() +{ + return emit_JSOP_DELELEM(); +} + +bool +BaselineCompiler::emit_JSOP_IN() +{ + frame.popRegsAndSync(2); + + ICIn_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETGNAME() +{ + if (script->hasNonSyntacticScope()) + return emit_JSOP_GETNAME(); + + RootedPropertyName name(cx, script->getName(pc)); + + // These names are non-configurable on the global and cannot be shadowed. + if (name == cx->names().undefined) { + frame.push(UndefinedValue()); + return true; + } + if (name == cx->names().NaN) { + frame.push(cx->runtime()->NaNValue); + return true; + } + if (name == cx->names().Infinity) { + frame.push(cx->runtime()->positiveInfinityValue); + return true; + } + + frame.syncStack(0); + + masm.movePtr(ImmGCPtr(&script->global().lexicalEnvironment()), R0.scratchReg()); + + // Call IC. + ICGetName_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_BINDGNAME() +{ + if (!script->hasNonSyntacticScope()) { + // We can bind name to the global lexical scope if the binding already + // exists, is initialized, and is writable (i.e., an initialized + // 'let') at compile time. + RootedPropertyName name(cx, script->getName(pc)); + Rooted env(cx, &script->global().lexicalEnvironment()); + if (Shape* shape = env->lookup(cx, name)) { + if (shape->writable() && + !env->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL)) + { + frame.push(ObjectValue(*env)); + return true; + } + } else if (Shape* shape = script->global().lookup(cx, name)) { + // If the property does not currently exist on the global lexical + // scope, we can bind name to the global object if the property + // exists on the global and is non-configurable, as then it cannot + // be shadowed. + if (!shape->configurable()) { + frame.push(ObjectValue(script->global())); + return true; + } + } + + // Otherwise we have to use the dynamic scope chain. + } + + return emit_JSOP_BINDNAME(); +} + +typedef JSObject* (*BindVarFn)(JSContext*, HandleObject); +static const VMFunction BindVarInfo = FunctionInfo(jit::BindVar, "BindVar"); + +bool +BaselineCompiler::emit_JSOP_BINDVAR() +{ + frame.syncStack(0); + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + prepareVMCall(); + pushArg(R0.scratchReg()); + + if (!callVM(BindVarInfo)) + return false; + + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SETPROP() +{ + // Keep lhs in R0, rhs in R1. + frame.popRegsAndSync(2); + + // Call IC. + ICSetProp_Fallback::Compiler compiler(cx); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + // The IC will return the RHS value in R0, mark it as pushed value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRICTSETPROP() +{ + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_SETNAME() +{ + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_STRICTSETNAME() +{ + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_SETGNAME() +{ + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_STRICTSETGNAME() +{ + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_GETPROP() +{ + // Keep object in R0. + frame.popRegsAndSync(1); + + // Call IC. + ICGetProp_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CALLPROP() +{ + return emit_JSOP_GETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_LENGTH() +{ + return emit_JSOP_GETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_GETXPROP() +{ + return emit_JSOP_GETPROP(); +} + +typedef bool (*DeletePropertyFn)(JSContext*, HandleValue, HandlePropertyName, bool*); +static const VMFunction DeletePropertyStrictInfo = + FunctionInfo(DeletePropertyJit, "DeletePropertyStrict"); +static const VMFunction DeletePropertyNonStrictInfo = + FunctionInfo(DeletePropertyJit, "DeletePropertyNonStrict"); + +bool +BaselineCompiler::emit_JSOP_DELPROP() +{ + // Keep value on the stack for the decompiler. + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + prepareVMCall(); + + pushArg(ImmGCPtr(script->getName(pc))); + pushArg(R0); + + bool strict = JSOp(*pc) == JSOP_STRICTDELPROP; + if (!callVM(strict ? DeletePropertyStrictInfo : DeletePropertyNonStrictInfo)) + return false; + + masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R1); + frame.pop(); + frame.push(R1); + return true; +} + +bool +BaselineCompiler::emit_JSOP_STRICTDELPROP() +{ + return emit_JSOP_DELPROP(); +} + +void +BaselineCompiler::getEnvironmentCoordinateObject(Register reg) +{ + EnvironmentCoordinate ec(pc); + + masm.loadPtr(frame.addressOfEnvironmentChain(), reg); + for (unsigned i = ec.hops(); i; i--) + masm.extractObject(Address(reg, EnvironmentObject::offsetOfEnclosingEnvironment()), reg); +} + +Address +BaselineCompiler::getEnvironmentCoordinateAddressFromObject(Register objReg, Register reg) +{ + EnvironmentCoordinate ec(pc); + Shape* shape = EnvironmentCoordinateToEnvironmentShape(script, pc); + + Address addr; + if (shape->numFixedSlots() <= ec.slot()) { + masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), reg); + return Address(reg, (ec.slot() - shape->numFixedSlots()) * sizeof(Value)); + } + + return Address(objReg, NativeObject::getFixedSlotOffset(ec.slot())); +} + +Address +BaselineCompiler::getEnvironmentCoordinateAddress(Register reg) +{ + getEnvironmentCoordinateObject(reg); + return getEnvironmentCoordinateAddressFromObject(reg, reg); +} + +bool +BaselineCompiler::emit_JSOP_GETALIASEDVAR() +{ + frame.syncStack(0); + + Address address = getEnvironmentCoordinateAddress(R0.scratchReg()); + masm.loadValue(address, R0); + + if (ionCompileable_) { + // No need to monitor types if we know Ion can't compile this script. + ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline, + (ICMonitoredFallbackStub*) nullptr); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + } + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SETALIASEDVAR() +{ + JSScript* outerScript = EnvironmentCoordinateFunctionScript(script, pc); + if (outerScript && outerScript->treatAsRunOnce()) { + // Type updates for this operation might need to be tracked, so treat + // this as a SETPROP. + + // Load rhs into R1. + frame.syncStack(1); + frame.popValue(R1); + + // Load and box lhs into R0. + getEnvironmentCoordinateObject(R2.scratchReg()); + masm.tagValue(JSVAL_TYPE_OBJECT, R2.scratchReg(), R0); + + // Call SETPROP IC. + ICSetProp_Fallback::Compiler compiler(cx); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + // The IC will return the RHS value in R0, mark it as pushed value. + frame.push(R0); + return true; + } + + // Keep rvalue in R0. + frame.popRegsAndSync(1); + Register objReg = R2.scratchReg(); + + getEnvironmentCoordinateObject(objReg); + Address address = getEnvironmentCoordinateAddressFromObject(objReg, R1.scratchReg()); + masm.patchableCallPreBarrier(address, MIRType::Value); + masm.storeValue(R0, address); + frame.push(R0); + + // Only R0 is live at this point. + // Scope coordinate object is already in R2.scratchReg(). + Register temp = R1.scratchReg(); + + Label skipBarrier; + masm.branchPtrInNurseryChunk(Assembler::Equal, objReg, temp, &skipBarrier); + masm.branchValueIsNurseryObject(Assembler::NotEqual, R0, temp, &skipBarrier); + + masm.call(&postBarrierSlot_); // Won't clobber R0 + + masm.bind(&skipBarrier); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETNAME() +{ + frame.syncStack(0); + + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + // Call IC. + ICGetName_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_BINDNAME() +{ + frame.syncStack(0); + + if (*pc == JSOP_BINDGNAME && !script->hasNonSyntacticScope()) + masm.movePtr(ImmGCPtr(&script->global().lexicalEnvironment()), R0.scratchReg()); + else + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + // Call IC. + ICBindName_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +typedef bool (*DeleteNameFn)(JSContext*, HandlePropertyName, HandleObject, + MutableHandleValue); +static const VMFunction DeleteNameInfo = + FunctionInfo(DeleteNameOperation, "DeleteNameOperation"); + +bool +BaselineCompiler::emit_JSOP_DELNAME() +{ + frame.syncStack(0); + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + prepareVMCall(); + + pushArg(R0.scratchReg()); + pushArg(ImmGCPtr(script->getName(pc))); + + if (!callVM(DeleteNameInfo)) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETIMPORT() +{ + ModuleEnvironmentObject* env = GetModuleEnvironmentForScript(script); + MOZ_ASSERT(env); + + ModuleEnvironmentObject* targetEnv; + Shape* shape; + MOZ_ALWAYS_TRUE(env->lookupImport(NameToId(script->getName(pc)), &targetEnv, &shape)); + + EnsureTrackPropertyTypes(cx, targetEnv, shape->propid()); + + frame.syncStack(0); + + uint32_t slot = shape->slot(); + Register scratch = R0.scratchReg(); + masm.movePtr(ImmGCPtr(targetEnv), scratch); + if (slot < targetEnv->numFixedSlots()) { + masm.loadValue(Address(scratch, NativeObject::getFixedSlotOffset(slot)), R0); + } else { + masm.loadPtr(Address(scratch, NativeObject::offsetOfSlots()), scratch); + masm.loadValue(Address(scratch, (slot - targetEnv->numFixedSlots()) * sizeof(Value)), R0); + } + + // Imports are initialized by this point except in rare circumstances, so + // don't emit a check unless we have to. + if (targetEnv->getSlot(shape->slot()).isMagic(JS_UNINITIALIZED_LEXICAL)) + if (!emitUninitializedLexicalCheck(R0)) + return false; + + if (ionCompileable_) { + // No need to monitor types if we know Ion can't compile this script. + ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline, + (ICMonitoredFallbackStub*) nullptr); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + } + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETINTRINSIC() +{ + frame.syncStack(0); + + ICGetIntrinsic_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +typedef bool (*DefVarFn)(JSContext*, HandlePropertyName, unsigned, HandleObject); +static const VMFunction DefVarInfo = FunctionInfo(DefVar, "DefVar"); + +bool +BaselineCompiler::emit_JSOP_DEFVAR() +{ + frame.syncStack(0); + + unsigned attrs = JSPROP_ENUMERATE; + if (!script->isForEval()) + attrs |= JSPROP_PERMANENT; + MOZ_ASSERT(attrs <= UINT32_MAX); + + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + prepareVMCall(); + + pushArg(R0.scratchReg()); + pushArg(Imm32(attrs)); + pushArg(ImmGCPtr(script->getName(pc))); + + return callVM(DefVarInfo); +} + +typedef bool (*DefLexicalFn)(JSContext*, HandlePropertyName, unsigned, HandleObject); +static const VMFunction DefLexicalInfo = FunctionInfo(DefLexical, "DefLexical"); + +bool +BaselineCompiler::emit_JSOP_DEFCONST() +{ + return emit_JSOP_DEFLET(); +} + +bool +BaselineCompiler::emit_JSOP_DEFLET() +{ + frame.syncStack(0); + + unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT; + if (*pc == JSOP_DEFCONST) + attrs |= JSPROP_READONLY; + MOZ_ASSERT(attrs <= UINT32_MAX); + + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + prepareVMCall(); + + pushArg(R0.scratchReg()); + pushArg(Imm32(attrs)); + pushArg(ImmGCPtr(script->getName(pc))); + + return callVM(DefLexicalInfo); +} + +typedef bool (*DefFunOperationFn)(JSContext*, HandleScript, HandleObject, HandleFunction); +static const VMFunction DefFunOperationInfo = + FunctionInfo(DefFunOperation, "DefFunOperation"); + +bool +BaselineCompiler::emit_JSOP_DEFFUN() +{ + frame.popRegsAndSync(1); + masm.unboxObject(R0, R0.scratchReg()); + masm.loadPtr(frame.addressOfEnvironmentChain(), R1.scratchReg()); + + prepareVMCall(); + + pushArg(R0.scratchReg()); + pushArg(R1.scratchReg()); + pushArg(ImmGCPtr(script)); + + return callVM(DefFunOperationInfo); +} + +typedef bool (*InitPropGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName, + HandleObject); +static const VMFunction InitPropGetterSetterInfo = + FunctionInfo(InitGetterSetterOperation, + "InitPropGetterSetterOperation"); + +bool +BaselineCompiler::emitInitPropGetterSetter() +{ + MOZ_ASSERT(JSOp(*pc) == JSOP_INITPROP_GETTER || + JSOp(*pc) == JSOP_INITHIDDENPROP_GETTER || + JSOp(*pc) == JSOP_INITPROP_SETTER || + JSOp(*pc) == JSOP_INITHIDDENPROP_SETTER); + + // Keep values on the stack for the decompiler. + frame.syncStack(0); + + prepareVMCall(); + + masm.extractObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg()); + masm.extractObject(frame.addressOfStackValue(frame.peek(-2)), R1.scratchReg()); + + pushArg(R0.scratchReg()); + pushArg(ImmGCPtr(script->getName(pc))); + pushArg(R1.scratchReg()); + pushArg(ImmPtr(pc)); + + if (!callVM(InitPropGetterSetterInfo)) + return false; + + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITPROP_GETTER() +{ + return emitInitPropGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENPROP_GETTER() +{ + return emitInitPropGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITPROP_SETTER() +{ + return emitInitPropGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENPROP_SETTER() +{ + return emitInitPropGetterSetter(); +} + +typedef bool (*InitElemGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandleValue, + HandleObject); +static const VMFunction InitElemGetterSetterInfo = + FunctionInfo(InitGetterSetterOperation, + "InitElemGetterSetterOperation"); + +bool +BaselineCompiler::emitInitElemGetterSetter() +{ + MOZ_ASSERT(JSOp(*pc) == JSOP_INITELEM_GETTER || + JSOp(*pc) == JSOP_INITHIDDENELEM_GETTER || + JSOp(*pc) == JSOP_INITELEM_SETTER || + JSOp(*pc) == JSOP_INITHIDDENELEM_SETTER); + + // Load index and value in R0 and R1, but keep values on the stack for the + // decompiler. + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0); + masm.extractObject(frame.addressOfStackValue(frame.peek(-1)), R1.scratchReg()); + + prepareVMCall(); + + pushArg(R1.scratchReg()); + pushArg(R0); + masm.extractObject(frame.addressOfStackValue(frame.peek(-3)), R0.scratchReg()); + pushArg(R0.scratchReg()); + pushArg(ImmPtr(pc)); + + if (!callVM(InitElemGetterSetterInfo)) + return false; + + frame.popn(2); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITELEM_GETTER() +{ + return emitInitElemGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENELEM_GETTER() +{ + return emitInitElemGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITELEM_SETTER() +{ + return emitInitElemGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITHIDDENELEM_SETTER() +{ + return emitInitElemGetterSetter(); +} + +bool +BaselineCompiler::emit_JSOP_INITELEM_INC() +{ + // Keep the object and rhs on the stack. + frame.syncStack(0); + + // Load object in R0, index in R1. + masm.loadValue(frame.addressOfStackValue(frame.peek(-3)), R0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R1); + + // Call IC. + ICSetElem_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Pop the rhs + frame.pop(); + + // Increment index + Address indexAddr = frame.addressOfStackValue(frame.peek(-1)); + masm.incrementInt32Value(indexAddr); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETLOCAL() +{ + frame.pushLocal(GET_LOCALNO(pc)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SETLOCAL() +{ + // Ensure no other StackValue refers to the old value, for instance i + (i = 3). + // This also allows us to use R0 as scratch below. + frame.syncStack(1); + + uint32_t local = GET_LOCALNO(pc); + storeValue(frame.peek(-1), frame.addressOfLocal(local), R0); + return true; +} + +bool +BaselineCompiler::emitFormalArgAccess(uint32_t arg, bool get) +{ + // Fast path: the script does not use |arguments| or formals don't + // alias the arguments object. + if (!script->argumentsAliasesFormals()) { + if (get) { + frame.pushArg(arg); + } else { + // See the comment in emit_JSOP_SETLOCAL. + frame.syncStack(1); + storeValue(frame.peek(-1), frame.addressOfArg(arg), R0); + } + + return true; + } + + // Sync so that we can use R0. + frame.syncStack(0); + + // If the script is known to have an arguments object, we can just use it. + // Else, we *may* have an arguments object (because we can't invalidate + // when needsArgsObj becomes |true|), so we have to test HAS_ARGS_OBJ. + Label done; + if (!script->needsArgsObj()) { + Label hasArgsObj; + masm.branchTest32(Assembler::NonZero, frame.addressOfFlags(), + Imm32(BaselineFrame::HAS_ARGS_OBJ), &hasArgsObj); + if (get) + masm.loadValue(frame.addressOfArg(arg), R0); + else + storeValue(frame.peek(-1), frame.addressOfArg(arg), R0); + masm.jump(&done); + masm.bind(&hasArgsObj); + } + + // Load the arguments object data vector. + Register reg = R2.scratchReg(); + masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()), reg); + masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg); + + // Load/store the argument. + Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value)); + if (get) { + masm.loadValue(argAddr, R0); + frame.push(R0); + } else { + masm.patchableCallPreBarrier(argAddr, MIRType::Value); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + masm.storeValue(R0, argAddr); + + MOZ_ASSERT(frame.numUnsyncedSlots() == 0); + + Register temp = R1.scratchReg(); + + // Reload the arguments object + Register reg = R2.scratchReg(); + masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()), reg); + + Label skipBarrier; + + masm.branchPtrInNurseryChunk(Assembler::Equal, reg, temp, &skipBarrier); + masm.branchValueIsNurseryObject(Assembler::NotEqual, R0, temp, &skipBarrier); + + masm.call(&postBarrierSlot_); + + masm.bind(&skipBarrier); + } + + masm.bind(&done); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GETARG() +{ + uint32_t arg = GET_ARGNO(pc); + return emitFormalArgAccess(arg, /* get = */ true); +} + +bool +BaselineCompiler::emit_JSOP_SETARG() +{ + // Ionmonkey can't inline functions with SETARG with magic arguments. + if (!script->argsObjAliasesFormals() && script->argumentsAliasesFormals()) + script->setUninlineable(); + + modifiesArguments_ = true; + + uint32_t arg = GET_ARGNO(pc); + return emitFormalArgAccess(arg, /* get = */ false); +} + +bool +BaselineCompiler::emit_JSOP_NEWTARGET() +{ + if (script->isForEval()) { + frame.pushEvalNewTarget(); + return true; + } + + MOZ_ASSERT(function()); + frame.syncStack(0); + + if (function()->isArrow()) { + // Arrow functions store their |new.target| value in an + // extended slot. + Register scratch = R0.scratchReg(); + masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), scratch); + masm.loadValue(Address(scratch, FunctionExtended::offsetOfArrowNewTargetSlot()), R0); + frame.push(R0); + return true; + } + + // if (isConstructing()) push(argv[Max(numActualArgs, numFormalArgs)]) + Label notConstructing, done; + masm.branchTestPtr(Assembler::Zero, frame.addressOfCalleeToken(), + Imm32(CalleeToken_FunctionConstructing), ¬Constructing); + + Register argvLen = R0.scratchReg(); + + Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()); + masm.loadPtr(actualArgs, argvLen); + + Label useNFormals; + + masm.branchPtr(Assembler::Below, argvLen, Imm32(function()->nargs()), + &useNFormals); + + { + BaseValueIndex newTarget(BaselineFrameReg, argvLen, BaselineFrame::offsetOfArg(0)); + masm.loadValue(newTarget, R0); + masm.jump(&done); + } + + masm.bind(&useNFormals); + + { + Address newTarget(BaselineFrameReg, + BaselineFrame::offsetOfArg(0) + (function()->nargs() * sizeof(Value))); + masm.loadValue(newTarget, R0); + masm.jump(&done); + } + + // else push(undefined) + masm.bind(¬Constructing); + masm.moveValue(UndefinedValue(), R0); + + masm.bind(&done); + frame.push(R0); + + return true; +} + +typedef bool (*ThrowRuntimeLexicalErrorFn)(JSContext* cx, unsigned); +static const VMFunction ThrowRuntimeLexicalErrorInfo = + FunctionInfo(jit::ThrowRuntimeLexicalError, + "ThrowRuntimeLexicalError"); + +bool +BaselineCompiler::emitThrowConstAssignment() +{ + prepareVMCall(); + pushArg(Imm32(JSMSG_BAD_CONST_ASSIGN)); + return callVM(ThrowRuntimeLexicalErrorInfo); +} + +bool +BaselineCompiler::emit_JSOP_THROWSETCONST() +{ + return emitThrowConstAssignment(); +} + +bool +BaselineCompiler::emit_JSOP_THROWSETALIASEDCONST() +{ + return emitThrowConstAssignment(); +} + +bool +BaselineCompiler::emit_JSOP_THROWSETCALLEE() +{ + return emitThrowConstAssignment(); +} + +bool +BaselineCompiler::emitUninitializedLexicalCheck(const ValueOperand& val) +{ + Label done; + masm.branchTestMagicValue(Assembler::NotEqual, val, JS_UNINITIALIZED_LEXICAL, &done); + + prepareVMCall(); + pushArg(Imm32(JSMSG_UNINITIALIZED_LEXICAL)); + if (!callVM(ThrowRuntimeLexicalErrorInfo)) + return false; + + masm.bind(&done); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CHECKLEXICAL() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfLocal(GET_LOCALNO(pc)), R0); + return emitUninitializedLexicalCheck(R0); +} + +bool +BaselineCompiler::emit_JSOP_INITLEXICAL() +{ + return emit_JSOP_SETLOCAL(); +} + +bool +BaselineCompiler::emit_JSOP_INITGLEXICAL() +{ + frame.popRegsAndSync(1); + frame.push(ObjectValue(script->global().lexicalEnvironment())); + frame.push(R0); + return emit_JSOP_SETPROP(); +} + +bool +BaselineCompiler::emit_JSOP_CHECKALIASEDLEXICAL() +{ + frame.syncStack(0); + masm.loadValue(getEnvironmentCoordinateAddress(R0.scratchReg()), R0); + return emitUninitializedLexicalCheck(R0); +} + +bool +BaselineCompiler::emit_JSOP_INITALIASEDLEXICAL() +{ + return emit_JSOP_SETALIASEDVAR(); +} + +bool +BaselineCompiler::emit_JSOP_UNINITIALIZED() +{ + frame.push(MagicValue(JS_UNINITIALIZED_LEXICAL)); + return true; +} + +bool +BaselineCompiler::emitCall() +{ + MOZ_ASSERT(IsCallPC(pc)); + + bool construct = JSOp(*pc) == JSOP_NEW || JSOp(*pc) == JSOP_SUPERCALL; + uint32_t argc = GET_ARGC(pc); + + frame.syncStack(0); + masm.move32(Imm32(argc), R0.scratchReg()); + + // Call IC + ICCall_Fallback::Compiler stubCompiler(cx, /* isConstructing = */ construct, + /* isSpread = */ false); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Update FrameInfo. + frame.popn(2 + argc + construct); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emitSpreadCall() +{ + MOZ_ASSERT(IsCallPC(pc)); + + frame.syncStack(0); + masm.move32(Imm32(1), R0.scratchReg()); + + // Call IC + bool construct = JSOp(*pc) == JSOP_SPREADNEW || JSOp(*pc) == JSOP_SPREADSUPERCALL; + ICCall_Fallback::Compiler stubCompiler(cx, /* isConstructing = */ construct, + /* isSpread = */ true); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + // Update FrameInfo. + frame.popn(3 + construct); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CALL() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_CALLITER() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_NEW() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_SUPERCALL() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_FUNCALL() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_FUNAPPLY() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_EVAL() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_STRICTEVAL() +{ + return emitCall(); +} + +bool +BaselineCompiler::emit_JSOP_SPREADCALL() +{ + return emitSpreadCall(); +} + +bool +BaselineCompiler::emit_JSOP_SPREADNEW() +{ + return emitSpreadCall(); +} + +bool +BaselineCompiler::emit_JSOP_SPREADSUPERCALL() +{ + return emitSpreadCall(); +} + +bool +BaselineCompiler::emit_JSOP_SPREADEVAL() +{ + return emitSpreadCall(); +} + +bool +BaselineCompiler::emit_JSOP_STRICTSPREADEVAL() +{ + return emitSpreadCall(); +} + +typedef bool (*OptimizeSpreadCallFn)(JSContext*, HandleValue, bool*); +static const VMFunction OptimizeSpreadCallInfo = + FunctionInfo(OptimizeSpreadCall, "OptimizeSpreadCall"); + +bool +BaselineCompiler::emit_JSOP_OPTIMIZE_SPREADCALL() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + prepareVMCall(); + pushArg(R0); + + if (!callVM(OptimizeSpreadCallInfo)) + return false; + + masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, ReturnReg, R0); + frame.push(R0); + return true; +} + +typedef bool (*ImplicitThisFn)(JSContext*, HandleObject, HandlePropertyName, + MutableHandleValue); +static const VMFunction ImplicitThisInfo = + FunctionInfo(ImplicitThisOperation, "ImplicitThisOperation"); + +bool +BaselineCompiler::emit_JSOP_IMPLICITTHIS() +{ + frame.syncStack(0); + masm.loadPtr(frame.addressOfEnvironmentChain(), R0.scratchReg()); + + prepareVMCall(); + + pushArg(ImmGCPtr(script->getName(pc))); + pushArg(R0.scratchReg()); + + if (!callVM(ImplicitThisInfo)) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_GIMPLICITTHIS() +{ + if (!script->hasNonSyntacticScope()) { + frame.push(UndefinedValue()); + return true; + } + + return emit_JSOP_IMPLICITTHIS(); +} + +bool +BaselineCompiler::emit_JSOP_INSTANCEOF() +{ + frame.popRegsAndSync(2); + + ICInstanceOf_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_TYPEOF() +{ + frame.popRegsAndSync(1); + + ICTypeOf_Fallback::Compiler stubCompiler(cx); + if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_TYPEOFEXPR() +{ + return emit_JSOP_TYPEOF(); +} + +typedef bool (*ThrowMsgFn)(JSContext*, const unsigned); +static const VMFunction ThrowMsgInfo = + FunctionInfo(js::ThrowMsgOperation, "ThrowMsgOperation"); + +bool +BaselineCompiler::emit_JSOP_THROWMSG() +{ + prepareVMCall(); + pushArg(Imm32(GET_UINT16(pc))); + return callVM(ThrowMsgInfo); +} + +typedef bool (*ThrowFn)(JSContext*, HandleValue); +static const VMFunction ThrowInfo = FunctionInfo(js::Throw, "Throw"); + +bool +BaselineCompiler::emit_JSOP_THROW() +{ + // Keep value to throw in R0. + frame.popRegsAndSync(1); + + prepareVMCall(); + pushArg(R0); + + return callVM(ThrowInfo); +} + +typedef bool (*ThrowingFn)(JSContext*, HandleValue); +static const VMFunction ThrowingInfo = + FunctionInfo(js::ThrowingOperation, "ThrowingOperation"); + +bool +BaselineCompiler::emit_JSOP_THROWING() +{ + // Keep value to throw in R0. + frame.popRegsAndSync(1); + + prepareVMCall(); + pushArg(R0); + + return callVM(ThrowingInfo); +} + +bool +BaselineCompiler::emit_JSOP_TRY() +{ + if (!emit_JSOP_JUMPTARGET()) + return false; + + // Ionmonkey can't inline function with JSOP_TRY. + script->setUninlineable(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_FINALLY() +{ + // JSOP_FINALLY has a def count of 2, but these values are already on the + // stack (they're pushed by JSOP_GOSUB). Update the compiler's stack state. + frame.setStackDepth(frame.stackDepth() + 2); + + // To match the interpreter, emit an interrupt check at the start of the + // finally block. + return emitInterruptCheck(); +} + +bool +BaselineCompiler::emit_JSOP_GOSUB() +{ + // Push |false| so that RETSUB knows the value on top of the + // stack is not an exception but the offset to the op following + // this GOSUB. + frame.push(BooleanValue(false)); + + int32_t nextOffset = script->pcToOffset(GetNextPc(pc)); + frame.push(Int32Value(nextOffset)); + + // Jump to the finally block. + frame.syncStack(0); + jsbytecode* target = pc + GET_JUMP_OFFSET(pc); + masm.jump(labelOf(target)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_RETSUB() +{ + frame.popRegsAndSync(2); + + ICRetSub_Fallback::Compiler stubCompiler(cx); + return emitOpIC(stubCompiler.getStub(&stubSpace_)); +} + +typedef bool (*PushLexicalEnvFn)(JSContext*, BaselineFrame*, Handle); +static const VMFunction PushLexicalEnvInfo = + FunctionInfo(jit::PushLexicalEnv, "PushLexicalEnv"); + +bool +BaselineCompiler::emit_JSOP_PUSHLEXICALENV() +{ + LexicalScope& scope = script->getScope(pc)->as(); + + // Call a stub to push the block on the block chain. + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + pushArg(ImmGCPtr(&scope)); + pushArg(R0.scratchReg()); + + return callVM(PushLexicalEnvInfo); +} + +typedef bool (*PopLexicalEnvFn)(JSContext*, BaselineFrame*); +static const VMFunction PopLexicalEnvInfo = + FunctionInfo(jit::PopLexicalEnv, "PopLexicalEnv"); + +typedef bool (*DebugLeaveThenPopLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction DebugLeaveThenPopLexicalEnvInfo = + FunctionInfo(jit::DebugLeaveThenPopLexicalEnv, + "DebugLeaveThenPopLexicalEnv"); + +bool +BaselineCompiler::emit_JSOP_POPLEXICALENV() +{ + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + if (compileDebugInstrumentation_) { + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + return callVM(DebugLeaveThenPopLexicalEnvInfo); + } + + pushArg(R0.scratchReg()); + return callVM(PopLexicalEnvInfo); +} + +typedef bool (*FreshenLexicalEnvFn)(JSContext*, BaselineFrame*); +static const VMFunction FreshenLexicalEnvInfo = + FunctionInfo(jit::FreshenLexicalEnv, "FreshenLexicalEnv"); + +typedef bool (*DebugLeaveThenFreshenLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction DebugLeaveThenFreshenLexicalEnvInfo = + FunctionInfo(jit::DebugLeaveThenFreshenLexicalEnv, + "DebugLeaveThenFreshenLexicalEnv"); + +bool +BaselineCompiler::emit_JSOP_FRESHENLEXICALENV() +{ + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + if (compileDebugInstrumentation_) { + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + return callVM(DebugLeaveThenFreshenLexicalEnvInfo); + } + + pushArg(R0.scratchReg()); + return callVM(FreshenLexicalEnvInfo); +} + + +typedef bool (*RecreateLexicalEnvFn)(JSContext*, BaselineFrame*); +static const VMFunction RecreateLexicalEnvInfo = + FunctionInfo(jit::RecreateLexicalEnv, "RecreateLexicalEnv"); + +typedef bool (*DebugLeaveThenRecreateLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction DebugLeaveThenRecreateLexicalEnvInfo = + FunctionInfo(jit::DebugLeaveThenRecreateLexicalEnv, + "DebugLeaveThenRecreateLexicalEnv"); + +bool +BaselineCompiler::emit_JSOP_RECREATELEXICALENV() +{ + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + if (compileDebugInstrumentation_) { + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + return callVM(DebugLeaveThenRecreateLexicalEnvInfo); + } + + pushArg(R0.scratchReg()); + return callVM(RecreateLexicalEnvInfo); +} + +typedef bool (*DebugLeaveLexicalEnvFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction DebugLeaveLexicalEnvInfo = + FunctionInfo(jit::DebugLeaveLexicalEnv, "DebugLeaveLexicalEnv"); + +bool +BaselineCompiler::emit_JSOP_DEBUGLEAVELEXICALENV() +{ + if (!compileDebugInstrumentation_) + return true; + + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + + return callVM(DebugLeaveLexicalEnvInfo); +} + +typedef bool (*PushVarEnvFn)(JSContext*, BaselineFrame*, HandleScope); +static const VMFunction PushVarEnvInfo = + FunctionInfo(jit::PushVarEnv, "PushVarEnv"); + +bool +BaselineCompiler::emit_JSOP_PUSHVARENV() +{ + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(ImmGCPtr(script->getScope(pc))); + pushArg(R0.scratchReg()); + + return callVM(PushVarEnvInfo); +} + +typedef bool (*PopVarEnvFn)(JSContext*, BaselineFrame*); +static const VMFunction PopVarEnvInfo = + FunctionInfo(jit::PopVarEnv, "PopVarEnv"); + +bool +BaselineCompiler::emit_JSOP_POPVARENV() +{ + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + return callVM(PopVarEnvInfo); +} + +typedef bool (*EnterWithFn)(JSContext*, BaselineFrame*, HandleValue, Handle); +static const VMFunction EnterWithInfo = + FunctionInfo(jit::EnterWith, "EnterWith"); + +bool +BaselineCompiler::emit_JSOP_ENTERWITH() +{ + WithScope& withScope = script->getScope(pc)->as(); + + // Pop "with" object to R0. + frame.popRegsAndSync(1); + + // Call a stub to push the object onto the scope chain. + prepareVMCall(); + masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg()); + + pushArg(ImmGCPtr(&withScope)); + pushArg(R0); + pushArg(R1.scratchReg()); + + return callVM(EnterWithInfo); +} + +typedef bool (*LeaveWithFn)(JSContext*, BaselineFrame*); +static const VMFunction LeaveWithInfo = + FunctionInfo(jit::LeaveWith, "LeaveWith"); + +bool +BaselineCompiler::emit_JSOP_LEAVEWITH() +{ + // Call a stub to pop the with object from the scope chain. + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + return callVM(LeaveWithInfo); +} + +typedef bool (*GetAndClearExceptionFn)(JSContext*, MutableHandleValue); +static const VMFunction GetAndClearExceptionInfo = + FunctionInfo(GetAndClearException, "GetAndClearException"); + +bool +BaselineCompiler::emit_JSOP_EXCEPTION() +{ + prepareVMCall(); + + if (!callVM(GetAndClearExceptionInfo)) + return false; + + frame.push(R0); + return true; +} + +typedef bool (*OnDebuggerStatementFn)(JSContext*, BaselineFrame*, jsbytecode* pc, bool*); +static const VMFunction OnDebuggerStatementInfo = + FunctionInfo(jit::OnDebuggerStatement, "OnDebuggerStatement"); + +bool +BaselineCompiler::emit_JSOP_DEBUGGER() +{ + prepareVMCall(); + pushArg(ImmPtr(pc)); + + frame.assertSyncedStack(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVM(OnDebuggerStatementInfo)) + return false; + + // If the stub returns |true|, return the frame's return value. + Label done; + masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done); + { + masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand); + masm.jump(&return_); + } + masm.bind(&done); + return true; +} + +typedef bool (*DebugEpilogueFn)(JSContext*, BaselineFrame*, jsbytecode*); +static const VMFunction DebugEpilogueInfo = + FunctionInfo(jit::DebugEpilogueOnBaselineReturn, + "DebugEpilogueOnBaselineReturn"); + +bool +BaselineCompiler::emitReturn() +{ + if (compileDebugInstrumentation_) { + // Move return value into the frame's rval slot. + masm.storeValue(JSReturnOperand, frame.addressOfReturnValue()); + masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags()); + + // Load BaselineFrame pointer in R0. + frame.syncStack(0); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + prepareVMCall(); + pushArg(ImmPtr(pc)); + pushArg(R0.scratchReg()); + if (!callVM(DebugEpilogueInfo)) + return false; + + // Fix up the fake ICEntry appended by callVM for on-stack recompilation. + icEntries_.back().setFakeKind(ICEntry::Kind_DebugEpilogue); + + masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand); + } + + // Only emit the jump if this JSOP_RETRVAL is not the last instruction. + // Not needed for last instruction, because last instruction flows + // into return label. + if (pc + GetBytecodeLength(pc) < script->codeEnd()) + masm.jump(&return_); + + return true; +} + +bool +BaselineCompiler::emit_JSOP_RETURN() +{ + MOZ_ASSERT(frame.stackDepth() == 1); + + frame.popValue(JSReturnOperand); + return emitReturn(); +} + +void +BaselineCompiler::emitLoadReturnValue(ValueOperand val) +{ + Label done, noRval; + masm.branchTest32(Assembler::Zero, frame.addressOfFlags(), + Imm32(BaselineFrame::HAS_RVAL), &noRval); + masm.loadValue(frame.addressOfReturnValue(), val); + masm.jump(&done); + + masm.bind(&noRval); + masm.moveValue(UndefinedValue(), val); + + masm.bind(&done); +} + +bool +BaselineCompiler::emit_JSOP_RETRVAL() +{ + MOZ_ASSERT(frame.stackDepth() == 0); + + masm.moveValue(UndefinedValue(), JSReturnOperand); + + if (!script->noScriptRval()) { + // Return the value in the return value slot, if any. + Label done; + Address flags = frame.addressOfFlags(); + masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done); + masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand); + masm.bind(&done); + } + + return emitReturn(); +} + +typedef bool (*ToIdFn)(JSContext*, HandleScript, jsbytecode*, HandleValue, MutableHandleValue); +static const VMFunction ToIdInfo = FunctionInfo(js::ToIdOperation, "ToIdOperation"); + +bool +BaselineCompiler::emit_JSOP_TOID() +{ + // Load index in R0, but keep values on the stack for the decompiler. + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + // No-op if index is int32. + Label done; + masm.branchTestInt32(Assembler::Equal, R0, &done); + + prepareVMCall(); + + pushArg(R0); + pushArg(ImmPtr(pc)); + pushArg(ImmGCPtr(script)); + + if (!callVM(ToIdInfo)) + return false; + + masm.bind(&done); + frame.pop(); // Pop index. + frame.push(R0); + return true; +} + +typedef JSObject* (*ToAsyncFn)(JSContext*, HandleFunction); +static const VMFunction ToAsyncInfo = FunctionInfo(js::WrapAsyncFunction, "ToAsync"); + +bool +BaselineCompiler::emit_JSOP_TOASYNC() +{ + frame.syncStack(0); + masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg()); + + prepareVMCall(); + pushArg(R0.scratchReg()); + + if (!callVM(ToAsyncInfo)) + return false; + + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.pop(); + frame.push(R0); + return true; +} + +typedef bool (*ThrowObjectCoercibleFn)(JSContext*, HandleValue); +static const VMFunction ThrowObjectCoercibleInfo = + FunctionInfo(ThrowObjectCoercible, "ThrowObjectCoercible"); + +bool +BaselineCompiler::emit_JSOP_CHECKOBJCOERCIBLE() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + Label fail, done; + + masm.branchTestUndefined(Assembler::Equal, R0, &fail); + masm.branchTestNull(Assembler::NotEqual, R0, &done); + + masm.bind(&fail); + prepareVMCall(); + + pushArg(R0); + + if (!callVM(ThrowObjectCoercibleInfo)) + return false; + + masm.bind(&done); + return true; +} + +typedef JSString* (*ToStringFn)(JSContext*, HandleValue); +static const VMFunction ToStringInfo = FunctionInfo(ToStringSlow, "ToStringSlow"); + +bool +BaselineCompiler::emit_JSOP_TOSTRING() +{ + // Keep top stack value in R0. + frame.popRegsAndSync(1); + + // Inline path for string. + Label done; + masm.branchTestString(Assembler::Equal, R0, &done); + + prepareVMCall(); + + pushArg(R0); + + // Call ToStringSlow which doesn't handle string inputs. + if (!callVM(ToStringInfo)) + return false; + + masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, R0); + + masm.bind(&done); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_TABLESWITCH() +{ + frame.popRegsAndSync(1); + + // Call IC. + ICTableSwitch::Compiler compiler(cx, pc); + return emitOpIC(compiler.getStub(&stubSpace_)); +} + +bool +BaselineCompiler::emit_JSOP_ITER() +{ + frame.popRegsAndSync(1); + + ICIteratorNew_Fallback::Compiler compiler(cx); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_MOREITER() +{ + frame.syncStack(0); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + ICIteratorMore_Fallback::Compiler compiler(cx); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_ISNOITER() +{ + frame.syncStack(0); + + Label isMagic, done; + masm.branchTestMagic(Assembler::Equal, frame.addressOfStackValue(frame.peek(-1)), + &isMagic); + masm.moveValue(BooleanValue(false), R0); + masm.jump(&done); + + masm.bind(&isMagic); + masm.moveValue(BooleanValue(true), R0); + + masm.bind(&done); + frame.push(R0, JSVAL_TYPE_BOOLEAN); + return true; +} + +bool +BaselineCompiler::emit_JSOP_ENDITER() +{ + if (!emit_JSOP_JUMPTARGET()) + return false; + frame.popRegsAndSync(1); + + ICIteratorClose_Fallback::Compiler compiler(cx); + return emitOpIC(compiler.getStub(&stubSpace_)); +} + +bool +BaselineCompiler::emit_JSOP_GETRVAL() +{ + frame.syncStack(0); + + emitLoadReturnValue(R0); + + frame.push(R0); + return true; +} + +bool +BaselineCompiler::emit_JSOP_SETRVAL() +{ + // Store to the frame's return value slot. + storeValue(frame.peek(-1), frame.addressOfReturnValue(), R2); + masm.or32(Imm32(BaselineFrame::HAS_RVAL), frame.addressOfFlags()); + frame.pop(); + return true; +} + +bool +BaselineCompiler::emit_JSOP_CALLEE() +{ + MOZ_ASSERT(function()); + frame.syncStack(0); + masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), R0.scratchReg()); + masm.tagValue(JSVAL_TYPE_OBJECT, R0.scratchReg(), R0); + frame.push(R0); + return true; +} + +typedef bool (*NewArgumentsObjectFn)(JSContext*, BaselineFrame*, MutableHandleValue); +static const VMFunction NewArgumentsObjectInfo = + FunctionInfo(jit::NewArgumentsObject, "NewArgumentsObject"); + +bool +BaselineCompiler::emit_JSOP_ARGUMENTS() +{ + frame.syncStack(0); + + Label done; + if (!script->argumentsHasVarBinding() || !script->needsArgsObj()) { + // We assume the script does not need an arguments object. However, this + // assumption can be invalidated later, see argumentsOptimizationFailed + // in JSScript. Because we can't invalidate baseline JIT code, we set a + // flag on BaselineScript when that happens and guard on it here. + masm.moveValue(MagicValue(JS_OPTIMIZED_ARGUMENTS), R0); + + // Load script->baseline. + Register scratch = R1.scratchReg(); + masm.movePtr(ImmGCPtr(script), scratch); + masm.loadPtr(Address(scratch, JSScript::offsetOfBaselineScript()), scratch); + + // If we don't need an arguments object, skip the VM call. + masm.branchTest32(Assembler::Zero, Address(scratch, BaselineScript::offsetOfFlags()), + Imm32(BaselineScript::NEEDS_ARGS_OBJ), &done); + } + + prepareVMCall(); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVM(NewArgumentsObjectInfo)) + return false; + + masm.bind(&done); + frame.push(R0); + return true; +} + +typedef bool (*RunOnceScriptPrologueFn)(JSContext*, HandleScript); +static const VMFunction RunOnceScriptPrologueInfo = + FunctionInfo(js::RunOnceScriptPrologue, "RunOnceScriptPrologue"); + +bool +BaselineCompiler::emit_JSOP_RUNONCE() +{ + frame.syncStack(0); + + prepareVMCall(); + + masm.movePtr(ImmGCPtr(script), R0.scratchReg()); + pushArg(R0.scratchReg()); + + return callVM(RunOnceScriptPrologueInfo); +} + +bool +BaselineCompiler::emit_JSOP_REST() +{ + frame.syncStack(0); + + JSObject* templateObject = + ObjectGroup::newArrayObject(cx, nullptr, 0, TenuredObject, + ObjectGroup::NewArrayKind::UnknownIndex); + if (!templateObject) + return false; + + // Call IC. + ICRest_Fallback::Compiler compiler(cx, &templateObject->as()); + if (!emitOpIC(compiler.getStub(&stubSpace_))) + return false; + + // Mark R0 as pushed stack value. + frame.push(R0); + return true; +} + +typedef JSObject* (*CreateGeneratorFn)(JSContext*, BaselineFrame*); +static const VMFunction CreateGeneratorInfo = + FunctionInfo(jit::CreateGenerator, "CreateGenerator"); + +bool +BaselineCompiler::emit_JSOP_GENERATOR() +{ + MOZ_ASSERT(frame.stackDepth() == 0); + + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + prepareVMCall(); + pushArg(R0.scratchReg()); + if (!callVM(CreateGeneratorInfo)) + return false; + + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.push(R0); + return true; +} + +bool +BaselineCompiler::addYieldOffset() +{ + MOZ_ASSERT(*pc == JSOP_INITIALYIELD || *pc == JSOP_YIELD); + + uint32_t yieldIndex = GET_UINT24(pc); + + while (yieldIndex >= yieldOffsets_.length()) { + if (!yieldOffsets_.append(0)) + return false; + } + + static_assert(JSOP_INITIALYIELD_LENGTH == JSOP_YIELD_LENGTH, + "code below assumes INITIALYIELD and YIELD have same length"); + yieldOffsets_[yieldIndex] = script->pcToOffset(pc + JSOP_YIELD_LENGTH); + return true; +} + +bool +BaselineCompiler::emit_JSOP_INITIALYIELD() +{ + if (!addYieldOffset()) + return false; + + frame.syncStack(0); + MOZ_ASSERT(frame.stackDepth() == 1); + + Register genObj = R2.scratchReg(); + masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), genObj); + + MOZ_ASSERT(GET_UINT24(pc) == 0); + masm.storeValue(Int32Value(0), Address(genObj, GeneratorObject::offsetOfYieldIndexSlot())); + + Register envObj = R0.scratchReg(); + Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot()); + masm.loadPtr(frame.addressOfEnvironmentChain(), envObj); + masm.patchableCallPreBarrier(envChainSlot, MIRType::Value); + masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot); + + Register temp = R1.scratchReg(); + Label skipBarrier; + masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier); + masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier); + masm.push(genObj); + MOZ_ASSERT(genObj == R2.scratchReg()); + masm.call(&postBarrierSlot_); + masm.pop(genObj); + masm.bind(&skipBarrier); + + masm.tagValue(JSVAL_TYPE_OBJECT, genObj, JSReturnOperand); + return emitReturn(); +} + +typedef bool (*NormalSuspendFn)(JSContext*, HandleObject, BaselineFrame*, jsbytecode*, uint32_t); +static const VMFunction NormalSuspendInfo = + FunctionInfo(jit::NormalSuspend, "NormalSuspend"); + +bool +BaselineCompiler::emit_JSOP_YIELD() +{ + if (!addYieldOffset()) + return false; + + // Store generator in R0. + frame.popRegsAndSync(1); + + Register genObj = R2.scratchReg(); + masm.unboxObject(R0, genObj); + + MOZ_ASSERT(frame.stackDepth() >= 1); + + if (frame.stackDepth() == 1 && !script->isLegacyGenerator()) { + // If the expression stack is empty, we can inline the YIELD. Don't do + // this for legacy generators: we have to throw an exception if the + // generator is in the closing state, see GeneratorObject::suspend. + + masm.storeValue(Int32Value(GET_UINT24(pc)), + Address(genObj, GeneratorObject::offsetOfYieldIndexSlot())); + + Register envObj = R0.scratchReg(); + Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot()); + masm.loadPtr(frame.addressOfEnvironmentChain(), envObj); + masm.patchableCallPreBarrier(envChainSlot, MIRType::Value); + masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot); + + Register temp = R1.scratchReg(); + Label skipBarrier; + masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier); + masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier); + MOZ_ASSERT(genObj == R2.scratchReg()); + masm.call(&postBarrierSlot_); + masm.bind(&skipBarrier); + } else { + masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg()); + + prepareVMCall(); + pushArg(Imm32(frame.stackDepth())); + pushArg(ImmPtr(pc)); + pushArg(R1.scratchReg()); + pushArg(genObj); + + if (!callVM(NormalSuspendInfo)) + return false; + } + + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), JSReturnOperand); + return emitReturn(); +} + +typedef bool (*DebugAfterYieldFn)(JSContext*, BaselineFrame*); +static const VMFunction DebugAfterYieldInfo = + FunctionInfo(jit::DebugAfterYield, "DebugAfterYield"); + +bool +BaselineCompiler::emit_JSOP_DEBUGAFTERYIELD() +{ + if (!compileDebugInstrumentation_) + return true; + + frame.assertSyncedStack(); + masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + prepareVMCall(); + pushArg(R0.scratchReg()); + return callVM(DebugAfterYieldInfo); +} + +typedef bool (*FinalSuspendFn)(JSContext*, HandleObject, BaselineFrame*, jsbytecode*); +static const VMFunction FinalSuspendInfo = + FunctionInfo(jit::FinalSuspend, "FinalSuspend"); + +bool +BaselineCompiler::emit_JSOP_FINALYIELDRVAL() +{ + // Store generator in R0, BaselineFrame pointer in R1. + frame.popRegsAndSync(1); + masm.unboxObject(R0, R0.scratchReg()); + masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg()); + + prepareVMCall(); + pushArg(ImmPtr(pc)); + pushArg(R1.scratchReg()); + pushArg(R0.scratchReg()); + + if (!callVM(FinalSuspendInfo)) + return false; + + masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand); + return emitReturn(); +} + +typedef bool (*InterpretResumeFn)(JSContext*, HandleObject, HandleValue, HandlePropertyName, + MutableHandleValue); +static const VMFunction InterpretResumeInfo = + FunctionInfo(jit::InterpretResume, "InterpretResume"); + +typedef bool (*GeneratorThrowFn)(JSContext*, BaselineFrame*, Handle, + HandleValue, uint32_t); +static const VMFunction GeneratorThrowInfo = + FunctionInfo(jit::GeneratorThrowOrClose, "GeneratorThrowOrClose", TailCall); + +bool +BaselineCompiler::emit_JSOP_RESUME() +{ + GeneratorObject::ResumeKind resumeKind = GeneratorObject::getResumeKind(pc); + + frame.syncStack(0); + masm.checkStackAlignment(); + + AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); + regs.take(BaselineFrameReg); + + // Load generator object. + Register genObj = regs.takeAny(); + masm.unboxObject(frame.addressOfStackValue(frame.peek(-2)), genObj); + + // Load callee. + Register callee = regs.takeAny(); + masm.unboxObject(Address(genObj, GeneratorObject::offsetOfCalleeSlot()), callee); + + // Load the script. Note that we don't relazify generator scripts, so it's + // guaranteed to be non-lazy. + Register scratch1 = regs.takeAny(); + masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), scratch1); + + // Load the BaselineScript or call a stub if we don't have one. + Label interpret; + masm.loadPtr(Address(scratch1, JSScript::offsetOfBaselineScript()), scratch1); + masm.branchPtr(Assembler::BelowOrEqual, scratch1, ImmPtr(BASELINE_DISABLED_SCRIPT), &interpret); + +#ifdef JS_TRACE_LOGGING + if (!emitTraceLoggerResume(scratch1, regs)) + return false; +#endif + + Register constructing = regs.takeAny(); + ValueOperand newTarget = regs.takeAnyValue(); + masm.loadValue(Address(genObj, GeneratorObject::offsetOfNewTargetSlot()), newTarget); + masm.move32(Imm32(0), constructing); + { + Label notConstructing; + masm.branchTestObject(Assembler::NotEqual, newTarget, ¬Constructing); + masm.pushValue(newTarget); + masm.move32(Imm32(CalleeToken_FunctionConstructing), constructing); + masm.bind(¬Constructing); + } + regs.add(newTarget); + + // Push |undefined| for all formals. + Register scratch2 = regs.takeAny(); + Label loop, loopDone; + masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch2); + masm.bind(&loop); + masm.branchTest32(Assembler::Zero, scratch2, scratch2, &loopDone); + { + masm.pushValue(UndefinedValue()); + masm.sub32(Imm32(1), scratch2); + masm.jump(&loop); + } + masm.bind(&loopDone); + + // Push |undefined| for |this|. + masm.pushValue(UndefinedValue()); + + // Update BaselineFrame frameSize field and create the frame descriptor. + masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset), + scratch2); + masm.subStackPtrFrom(scratch2); + masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + masm.makeFrameDescriptor(scratch2, JitFrame_BaselineJS, JitFrameLayout::Size()); + + masm.Push(Imm32(0)); // actual argc + + // Duplicate PushCalleeToken with a variable instead. + masm.orPtr(constructing, callee); + masm.Push(callee); + masm.Push(scratch2); // frame descriptor + + regs.add(callee); + regs.add(constructing); + + // Load the return value. + ValueOperand retVal = regs.takeAnyValue(); + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), retVal); + + // Push a fake return address on the stack. We will resume here when the + // generator returns. + Label genStart, returnTarget; +#ifdef JS_USE_LINK_REGISTER + masm.call(&genStart); +#else + masm.callAndPushReturnAddress(&genStart); +#endif + + // Add an IC entry so the return offset -> pc mapping works. + if (!appendICEntry(ICEntry::Kind_Op, masm.currentOffset())) + return false; + + masm.jump(&returnTarget); + masm.bind(&genStart); +#ifdef JS_USE_LINK_REGISTER + masm.pushReturnAddress(); +#endif + + // If profiler instrumentation is on, update lastProfilingFrame on + // current JitActivation + { + Register scratchReg = scratch2; + Label skip; + AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled()); + masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skip); + masm.loadPtr(AbsoluteAddress(cx->runtime()->addressOfProfilingActivation()), scratchReg); + masm.storePtr(masm.getStackPointer(), + Address(scratchReg, JitActivation::offsetOfLastProfilingFrame())); + masm.bind(&skip); + } + + // Construct BaselineFrame. + masm.push(BaselineFrameReg); + masm.moveStackPtrTo(BaselineFrameReg); + masm.subFromStackPtr(Imm32(BaselineFrame::Size())); + masm.checkStackAlignment(); + + // Store flags and env chain. + masm.store32(Imm32(BaselineFrame::HAS_INITIAL_ENV), frame.addressOfFlags()); + masm.unboxObject(Address(genObj, GeneratorObject::offsetOfEnvironmentChainSlot()), scratch2); + masm.storePtr(scratch2, frame.addressOfEnvironmentChain()); + + // Store the arguments object if there is one. + Label noArgsObj; + masm.unboxObject(Address(genObj, GeneratorObject::offsetOfArgsObjSlot()), scratch2); + masm.branchTestPtr(Assembler::Zero, scratch2, scratch2, &noArgsObj); + { + masm.storePtr(scratch2, frame.addressOfArgsObj()); + masm.or32(Imm32(BaselineFrame::HAS_ARGS_OBJ), frame.addressOfFlags()); + } + masm.bind(&noArgsObj); + + // Push expression slots if needed. + Label noExprStack; + Address exprStackSlot(genObj, GeneratorObject::offsetOfExpressionStackSlot()); + masm.branchTestNull(Assembler::Equal, exprStackSlot, &noExprStack); + { + masm.unboxObject(exprStackSlot, scratch2); + + Register initLength = regs.takeAny(); + masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2); + masm.load32(Address(scratch2, ObjectElements::offsetOfInitializedLength()), initLength); + + Label loop, loopDone; + masm.bind(&loop); + masm.branchTest32(Assembler::Zero, initLength, initLength, &loopDone); + { + masm.pushValue(Address(scratch2, 0)); + masm.addPtr(Imm32(sizeof(Value)), scratch2); + masm.sub32(Imm32(1), initLength); + masm.jump(&loop); + } + masm.bind(&loopDone); + + masm.patchableCallPreBarrier(exprStackSlot, MIRType::Value); + masm.storeValue(NullValue(), exprStackSlot); + regs.add(initLength); + } + + masm.bind(&noExprStack); + masm.pushValue(retVal); + + if (resumeKind == GeneratorObject::NEXT) { + // Determine the resume address based on the yieldIndex and the + // yieldIndex -> native table in the BaselineScript. + masm.load32(Address(scratch1, BaselineScript::offsetOfYieldEntriesOffset()), scratch2); + masm.addPtr(scratch2, scratch1); + masm.unboxInt32(Address(genObj, GeneratorObject::offsetOfYieldIndexSlot()), scratch2); + masm.loadPtr(BaseIndex(scratch1, scratch2, ScaleFromElemWidth(sizeof(uintptr_t))), scratch1); + + // Mark as running and jump to the generator's JIT code. + masm.storeValue(Int32Value(GeneratorObject::YIELD_INDEX_RUNNING), + Address(genObj, GeneratorObject::offsetOfYieldIndexSlot())); + masm.jump(scratch1); + } else { + MOZ_ASSERT(resumeKind == GeneratorObject::THROW || resumeKind == GeneratorObject::CLOSE); + + // Update the frame's frameSize field. + masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset), + scratch2); + masm.movePtr(scratch2, scratch1); + masm.subStackPtrFrom(scratch2); + masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + masm.loadBaselineFramePtr(BaselineFrameReg, scratch2); + + prepareVMCall(); + pushArg(Imm32(resumeKind)); + pushArg(retVal); + pushArg(genObj); + pushArg(scratch2); + + JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(GeneratorThrowInfo); + if (!code) + return false; + + // Create the frame descriptor. + masm.subStackPtrFrom(scratch1); + masm.makeFrameDescriptor(scratch1, JitFrame_BaselineJS, ExitFrameLayout::Size()); + + // Push the frame descriptor and a dummy return address (it doesn't + // matter what we push here, frame iterators will use the frame pc + // set in jit::GeneratorThrowOrClose). + masm.push(scratch1); + + // On ARM64, the callee will push the return address. +#ifndef JS_CODEGEN_ARM64 + masm.push(ImmWord(0)); +#endif + masm.jump(code); + } + + // If the generator script has no JIT code, call into the VM. + masm.bind(&interpret); + + prepareVMCall(); + if (resumeKind == GeneratorObject::NEXT) { + pushArg(ImmGCPtr(cx->names().next)); + } else if (resumeKind == GeneratorObject::THROW) { + pushArg(ImmGCPtr(cx->names().throw_)); + } else { + MOZ_ASSERT(resumeKind == GeneratorObject::CLOSE); + pushArg(ImmGCPtr(cx->names().close)); + } + + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), retVal); + pushArg(retVal); + pushArg(genObj); + + if (!callVM(InterpretResumeInfo)) + return false; + + // After the generator returns, we restore the stack pointer, push the + // return value and we're done. + masm.bind(&returnTarget); + masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), masm.getStackPointer()); + frame.popn(2); + frame.push(R0); + return true; +} + +typedef bool (*CheckSelfHostedFn)(JSContext*, HandleValue); +static const VMFunction CheckSelfHostedInfo = + FunctionInfo(js::Debug_CheckSelfHosted, "Debug_CheckSelfHosted"); + +bool +BaselineCompiler::emit_JSOP_DEBUGCHECKSELFHOSTED() +{ +#ifdef DEBUG + frame.syncStack(0); + + masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0); + + prepareVMCall(); + pushArg(R0); + if (!callVM(CheckSelfHostedInfo)) + return false; +#endif + return true; + +} + +bool +BaselineCompiler::emit_JSOP_IS_CONSTRUCTING() +{ + frame.push(MagicValue(JS_IS_CONSTRUCTING)); + return true; +} + +bool +BaselineCompiler::emit_JSOP_JUMPTARGET() +{ + if (!script->hasScriptCounts()) + return true; + PCCounts* counts = script->maybeGetPCCounts(pc); + uint64_t* counterAddr = &counts->numExec(); + masm.inc64(AbsoluteAddress(counterAddr)); + return true; +} diff --git a/js/src/jit/BaselineCompiler.h b/js/src/jit/BaselineCompiler.h new file mode 100644 index 000000000..9adf65c27 --- /dev/null +++ b/js/src/jit/BaselineCompiler.h @@ -0,0 +1,357 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_BaselineCompiler_h +#define jit_BaselineCompiler_h + +#include "jit/FixedList.h" +#if defined(JS_CODEGEN_X86) +# include "jit/x86/BaselineCompiler-x86.h" +#elif defined(JS_CODEGEN_X64) +# include "jit/x64/BaselineCompiler-x64.h" +#elif defined(JS_CODEGEN_ARM) +# include "jit/arm/BaselineCompiler-arm.h" +#elif defined(JS_CODEGEN_ARM64) +# include "jit/arm64/BaselineCompiler-arm64.h" +#elif defined(JS_CODEGEN_MIPS32) +# include "jit/mips32/BaselineCompiler-mips32.h" +#elif defined(JS_CODEGEN_MIPS64) +# include "jit/mips64/BaselineCompiler-mips64.h" +#elif defined(JS_CODEGEN_NONE) +# include "jit/none/BaselineCompiler-none.h" +#else +# error "Unknown architecture!" +#endif + +namespace js { +namespace jit { + +#define OPCODE_LIST(_) \ + _(JSOP_NOP) \ + _(JSOP_NOP_DESTRUCTURING) \ + _(JSOP_LABEL) \ + _(JSOP_POP) \ + _(JSOP_POPN) \ + _(JSOP_DUPAT) \ + _(JSOP_ENTERWITH) \ + _(JSOP_LEAVEWITH) \ + _(JSOP_DUP) \ + _(JSOP_DUP2) \ + _(JSOP_SWAP) \ + _(JSOP_PICK) \ + _(JSOP_GOTO) \ + _(JSOP_IFEQ) \ + _(JSOP_IFNE) \ + _(JSOP_AND) \ + _(JSOP_OR) \ + _(JSOP_NOT) \ + _(JSOP_POS) \ + _(JSOP_LOOPHEAD) \ + _(JSOP_LOOPENTRY) \ + _(JSOP_VOID) \ + _(JSOP_UNDEFINED) \ + _(JSOP_HOLE) \ + _(JSOP_NULL) \ + _(JSOP_TRUE) \ + _(JSOP_FALSE) \ + _(JSOP_ZERO) \ + _(JSOP_ONE) \ + _(JSOP_INT8) \ + _(JSOP_INT32) \ + _(JSOP_UINT16) \ + _(JSOP_UINT24) \ + _(JSOP_DOUBLE) \ + _(JSOP_STRING) \ + _(JSOP_SYMBOL) \ + _(JSOP_OBJECT) \ + _(JSOP_CALLSITEOBJ) \ + _(JSOP_REGEXP) \ + _(JSOP_LAMBDA) \ + _(JSOP_LAMBDA_ARROW) \ + _(JSOP_BITOR) \ + _(JSOP_BITXOR) \ + _(JSOP_BITAND) \ + _(JSOP_LSH) \ + _(JSOP_RSH) \ + _(JSOP_URSH) \ + _(JSOP_ADD) \ + _(JSOP_SUB) \ + _(JSOP_MUL) \ + _(JSOP_DIV) \ + _(JSOP_MOD) \ + _(JSOP_POW) \ + _(JSOP_LT) \ + _(JSOP_LE) \ + _(JSOP_GT) \ + _(JSOP_GE) \ + _(JSOP_EQ) \ + _(JSOP_NE) \ + _(JSOP_STRICTEQ) \ + _(JSOP_STRICTNE) \ + _(JSOP_CONDSWITCH) \ + _(JSOP_CASE) \ + _(JSOP_DEFAULT) \ + _(JSOP_LINENO) \ + _(JSOP_BITNOT) \ + _(JSOP_NEG) \ + _(JSOP_NEWARRAY) \ + _(JSOP_SPREADCALLARRAY) \ + _(JSOP_NEWARRAY_COPYONWRITE) \ + _(JSOP_INITELEM_ARRAY) \ + _(JSOP_NEWOBJECT) \ + _(JSOP_NEWINIT) \ + _(JSOP_INITELEM) \ + _(JSOP_INITELEM_GETTER) \ + _(JSOP_INITELEM_SETTER) \ + _(JSOP_INITELEM_INC) \ + _(JSOP_MUTATEPROTO) \ + _(JSOP_INITPROP) \ + _(JSOP_INITLOCKEDPROP) \ + _(JSOP_INITHIDDENPROP) \ + _(JSOP_INITPROP_GETTER) \ + _(JSOP_INITPROP_SETTER) \ + _(JSOP_ARRAYPUSH) \ + _(JSOP_GETELEM) \ + _(JSOP_SETELEM) \ + _(JSOP_STRICTSETELEM) \ + _(JSOP_CALLELEM) \ + _(JSOP_DELELEM) \ + _(JSOP_STRICTDELELEM) \ + _(JSOP_IN) \ + _(JSOP_GETGNAME) \ + _(JSOP_BINDGNAME) \ + _(JSOP_SETGNAME) \ + _(JSOP_STRICTSETGNAME) \ + _(JSOP_SETNAME) \ + _(JSOP_STRICTSETNAME) \ + _(JSOP_GETPROP) \ + _(JSOP_SETPROP) \ + _(JSOP_STRICTSETPROP) \ + _(JSOP_CALLPROP) \ + _(JSOP_DELPROP) \ + _(JSOP_STRICTDELPROP) \ + _(JSOP_LENGTH) \ + _(JSOP_GETXPROP) \ + _(JSOP_GETALIASEDVAR) \ + _(JSOP_SETALIASEDVAR) \ + _(JSOP_GETNAME) \ + _(JSOP_BINDNAME) \ + _(JSOP_DELNAME) \ + _(JSOP_GETIMPORT) \ + _(JSOP_GETINTRINSIC) \ + _(JSOP_BINDVAR) \ + _(JSOP_DEFVAR) \ + _(JSOP_DEFCONST) \ + _(JSOP_DEFLET) \ + _(JSOP_DEFFUN) \ + _(JSOP_GETLOCAL) \ + _(JSOP_SETLOCAL) \ + _(JSOP_GETARG) \ + _(JSOP_SETARG) \ + _(JSOP_CHECKLEXICAL) \ + _(JSOP_INITLEXICAL) \ + _(JSOP_INITGLEXICAL) \ + _(JSOP_CHECKALIASEDLEXICAL) \ + _(JSOP_INITALIASEDLEXICAL) \ + _(JSOP_UNINITIALIZED) \ + _(JSOP_CALL) \ + _(JSOP_CALLITER) \ + _(JSOP_FUNCALL) \ + _(JSOP_FUNAPPLY) \ + _(JSOP_NEW) \ + _(JSOP_EVAL) \ + _(JSOP_STRICTEVAL) \ + _(JSOP_SPREADCALL) \ + _(JSOP_SPREADNEW) \ + _(JSOP_SPREADEVAL) \ + _(JSOP_STRICTSPREADEVAL) \ + _(JSOP_OPTIMIZE_SPREADCALL)\ + _(JSOP_IMPLICITTHIS) \ + _(JSOP_GIMPLICITTHIS) \ + _(JSOP_INSTANCEOF) \ + _(JSOP_TYPEOF) \ + _(JSOP_TYPEOFEXPR) \ + _(JSOP_THROWMSG) \ + _(JSOP_THROW) \ + _(JSOP_THROWING) \ + _(JSOP_TRY) \ + _(JSOP_FINALLY) \ + _(JSOP_GOSUB) \ + _(JSOP_RETSUB) \ + _(JSOP_PUSHLEXICALENV) \ + _(JSOP_POPLEXICALENV) \ + _(JSOP_FRESHENLEXICALENV) \ + _(JSOP_RECREATELEXICALENV) \ + _(JSOP_DEBUGLEAVELEXICALENV) \ + _(JSOP_PUSHVARENV) \ + _(JSOP_POPVARENV) \ + _(JSOP_EXCEPTION) \ + _(JSOP_DEBUGGER) \ + _(JSOP_ARGUMENTS) \ + _(JSOP_RUNONCE) \ + _(JSOP_REST) \ + _(JSOP_TOASYNC) \ + _(JSOP_TOID) \ + _(JSOP_TOSTRING) \ + _(JSOP_TABLESWITCH) \ + _(JSOP_ITER) \ + _(JSOP_MOREITER) \ + _(JSOP_ISNOITER) \ + _(JSOP_ENDITER) \ + _(JSOP_GENERATOR) \ + _(JSOP_INITIALYIELD) \ + _(JSOP_YIELD) \ + _(JSOP_DEBUGAFTERYIELD) \ + _(JSOP_FINALYIELDRVAL) \ + _(JSOP_RESUME) \ + _(JSOP_CALLEE) \ + _(JSOP_GETRVAL) \ + _(JSOP_SETRVAL) \ + _(JSOP_RETRVAL) \ + _(JSOP_RETURN) \ + _(JSOP_FUNCTIONTHIS) \ + _(JSOP_GLOBALTHIS) \ + _(JSOP_CHECKISOBJ) \ + _(JSOP_CHECKTHIS) \ + _(JSOP_CHECKRETURN) \ + _(JSOP_NEWTARGET) \ + _(JSOP_SUPERCALL) \ + _(JSOP_SPREADSUPERCALL) \ + _(JSOP_THROWSETCONST) \ + _(JSOP_THROWSETALIASEDCONST) \ + _(JSOP_THROWSETCALLEE) \ + _(JSOP_INITHIDDENPROP_GETTER) \ + _(JSOP_INITHIDDENPROP_SETTER) \ + _(JSOP_INITHIDDENELEM) \ + _(JSOP_INITHIDDENELEM_GETTER) \ + _(JSOP_INITHIDDENELEM_SETTER) \ + _(JSOP_CHECKOBJCOERCIBLE) \ + _(JSOP_DEBUGCHECKSELFHOSTED) \ + _(JSOP_JUMPTARGET) \ + _(JSOP_IS_CONSTRUCTING) + +class BaselineCompiler : public BaselineCompilerSpecific +{ + FixedList