/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "jit/CodeGenerator.h" #include "mozilla/Assertions.h" #include "mozilla/Attributes.h" #include "mozilla/Casting.h" #include "mozilla/DebugOnly.h" #include "mozilla/EnumeratedArray.h" #include "mozilla/EnumeratedRange.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/ScopeExit.h" #include "mozilla/SizePrintfMacros.h" #include "jslibmath.h" #include "jsmath.h" #include "jsnum.h" #include "jsprf.h" #include "jsstr.h" #include "builtin/Eval.h" #include "builtin/TypedObject.h" #include "gc/Nursery.h" #include "gc/StoreBuffer-inl.h" #include "irregexp/NativeRegExpMacroAssembler.h" #include "jit/AtomicOperations.h" #include "jit/BaselineCompiler.h" #include "jit/IonBuilder.h" #include "jit/IonCaches.h" #include "jit/IonOptimizationLevels.h" #include "jit/JitcodeMap.h" #include "jit/JitSpewer.h" #include "jit/Linker.h" #include "jit/Lowering.h" #include "jit/MIRGenerator.h" #include "jit/MoveEmitter.h" #include "jit/RangeAnalysis.h" #include "jit/SharedICHelpers.h" #include "vm/AsyncFunction.h" #include "vm/AsyncIteration.h" #include "vm/MatchPairs.h" #include "vm/RegExpObject.h" #include "vm/RegExpStatics.h" #include "vm/TraceLogging.h" #include "vm/Unicode.h" #include "jsboolinlines.h" #include "jit/MacroAssembler-inl.h" #include "jit/shared/CodeGenerator-shared-inl.h" #include "jit/shared/Lowering-shared-inl.h" #include "vm/Interpreter-inl.h" using namespace js; using namespace js::jit; using mozilla::AssertedCast; using mozilla::DebugOnly; using mozilla::FloatingPoint; using mozilla::Maybe; using mozilla::NegativeInfinity; using mozilla::PositiveInfinity; using JS::GenericNaN; namespace js { namespace jit { // This out-of-line cache is used to do a double dispatch including it-self and // the wrapped IonCache. class OutOfLineUpdateCache : public OutOfLineCodeBase, public IonCacheVisitor { private: LInstruction* lir_; size_t cacheIndex_; RepatchLabel entry_; public: OutOfLineUpdateCache(LInstruction* lir, size_t cacheIndex) : lir_(lir), cacheIndex_(cacheIndex) { } void bind(MacroAssembler* masm) { // The binding of the initial jump is done in // CodeGenerator::visitOutOfLineCache. } size_t getCacheIndex() const { return cacheIndex_; } LInstruction* lir() const { return lir_; } RepatchLabel& entry() { return entry_; } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineCache(this); } // ICs' visit functions delegating the work to the CodeGen visit funtions. #define VISIT_CACHE_FUNCTION(op) \ void visit##op##IC(CodeGenerator* codegen) { \ CodeGenerator::DataPtr ic(codegen, getCacheIndex()); \ codegen->visit##op##IC(this, ic); \ } IONCACHE_KIND_LIST(VISIT_CACHE_FUNCTION) #undef VISIT_CACHE_FUNCTION }; // This function is declared here because it needs to instantiate an // OutOfLineUpdateCache, but we want to keep it visible inside the // CodeGeneratorShared such as we can specialize inline caches in function of // the architecture. void CodeGeneratorShared::addCache(LInstruction* lir, size_t cacheIndex) { if (cacheIndex == SIZE_MAX) { masm.setOOM(); return; } DataPtr cache(this, cacheIndex); MInstruction* mir = lir->mirRaw()->toInstruction(); if (mir->resumePoint()) cache->setScriptedLocation(mir->block()->info().script(), mir->resumePoint()->pc()); else cache->setIdempotent(); OutOfLineUpdateCache* ool = new(alloc()) OutOfLineUpdateCache(lir, cacheIndex); addOutOfLineCode(ool, mir); cache->emitInitialJump(masm, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache* ool) { DataPtr cache(this, ool->getCacheIndex()); // Register the location of the OOL path in the IC. cache->setFallbackLabel(masm.labelForPatch()); masm.bind(&ool->entry()); // Dispatch to ICs' accept functions. cache->accept(this, ool); } StringObject* MNewStringObject::templateObj() const { return &templateObj_->as(); } CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) : CodeGeneratorSpecific(gen, graph, masm) , ionScriptLabels_(gen->alloc()) , scriptCounts_(nullptr) , simdRefreshTemplatesDuringLink_(0) { } CodeGenerator::~CodeGenerator() { MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numSymbolicAccesses() == 0); js_delete(scriptCounts_); } typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*); static const VMFunction StringToNumberInfo = FunctionInfo(StringToNumber, "StringToNumber"); void CodeGenerator::visitValueToInt32(LValueToInt32* lir) { ValueOperand operand = ToValue(lir, LValueToInt32::Input); Register output = ToRegister(lir->output()); FloatRegister temp = ToFloatRegister(lir->tempFloat()); MDefinition* input; if (lir->mode() == LValueToInt32::NORMAL) input = lir->mirNormal()->input(); else input = lir->mirTruncate()->input(); Label fails; if (lir->mode() == LValueToInt32::TRUNCATE) { OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir()); // We can only handle strings in truncation contexts, like bitwise // operations. Label* stringEntry; Label* stringRejoin; Register stringReg; if (input->mightBeType(MIRType::String)) { stringReg = ToRegister(lir->temp()); OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg), StoreFloatRegisterTo(temp)); stringEntry = oolString->entry(); stringRejoin = oolString->rejoin(); } else { stringReg = InvalidReg; stringEntry = nullptr; stringRejoin = nullptr; } masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin, oolDouble->entry(), stringReg, temp, output, &fails); masm.bind(oolDouble->rejoin()); } else { masm.convertValueToInt32(operand, input, temp, output, &fails, lir->mirNormal()->canBeNegativeZero(), lir->mirNormal()->conversion()); } bailoutFrom(&fails, lir->snapshot()); } void CodeGenerator::visitValueToDouble(LValueToDouble* lir) { MToDouble* mir = lir->mir(); ValueOperand operand = ToValue(lir, LValueToDouble::Input); FloatRegister output = ToFloatRegister(lir->output()); Register tag = masm.splitTagForTest(operand); Label isDouble, isInt32, isBool, isNull, isUndefined, done; bool hasBoolean = false, hasNull = false, hasUndefined = false; masm.branchTestDouble(Assembler::Equal, tag, &isDouble); masm.branchTestInt32(Assembler::Equal, tag, &isInt32); if (mir->conversion() != MToFPInstruction::NumbersOnly) { masm.branchTestBoolean(Assembler::Equal, tag, &isBool); masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined); hasBoolean = true; hasUndefined = true; if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) { masm.branchTestNull(Assembler::Equal, tag, &isNull); hasNull = true; } } bailout(lir->snapshot()); if (hasNull) { masm.bind(&isNull); masm.loadConstantDouble(0.0, output); masm.jump(&done); } if (hasUndefined) { masm.bind(&isUndefined); masm.loadConstantDouble(GenericNaN(), output); masm.jump(&done); } if (hasBoolean) { masm.bind(&isBool); masm.boolValueToDouble(operand, output); masm.jump(&done); } masm.bind(&isInt32); masm.int32ValueToDouble(operand, output); masm.jump(&done); masm.bind(&isDouble); masm.unboxDouble(operand, output); masm.bind(&done); } void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) { MToFloat32* mir = lir->mir(); ValueOperand operand = ToValue(lir, LValueToFloat32::Input); FloatRegister output = ToFloatRegister(lir->output()); Register tag = masm.splitTagForTest(operand); Label isDouble, isInt32, isBool, isNull, isUndefined, done; bool hasBoolean = false, hasNull = false, hasUndefined = false; masm.branchTestDouble(Assembler::Equal, tag, &isDouble); masm.branchTestInt32(Assembler::Equal, tag, &isInt32); if (mir->conversion() != MToFPInstruction::NumbersOnly) { masm.branchTestBoolean(Assembler::Equal, tag, &isBool); masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined); hasBoolean = true; hasUndefined = true; if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) { masm.branchTestNull(Assembler::Equal, tag, &isNull); hasNull = true; } } bailout(lir->snapshot()); if (hasNull) { masm.bind(&isNull); masm.loadConstantFloat32(0.0f, output); masm.jump(&done); } if (hasUndefined) { masm.bind(&isUndefined); masm.loadConstantFloat32(float(GenericNaN()), output); masm.jump(&done); } if (hasBoolean) { masm.bind(&isBool); masm.boolValueToFloat32(operand, output); masm.jump(&done); } masm.bind(&isInt32); masm.int32ValueToFloat32(operand, output); masm.jump(&done); masm.bind(&isDouble); // ARM and MIPS may not have a double register available if we've // allocated output as a float32. #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) masm.unboxDouble(operand, ScratchDoubleReg); masm.convertDoubleToFloat32(ScratchDoubleReg, output); #else masm.unboxDouble(operand, output); masm.convertDoubleToFloat32(output, output); #endif masm.bind(&done); } void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) { masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) { masm.convertFloat32ToDouble(ToFloatRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) { masm.convertDoubleToFloat32(ToFloatRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) { masm.convertInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) { Label fail; FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); masm.convertDoubleToInt32(input, output, &fail, lir->mir()->canBeNegativeZero()); bailoutFrom(&fail, lir->snapshot()); } void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) { Label fail; FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); masm.convertFloat32ToInt32(input, output, &fail, lir->mir()->canBeNegativeZero()); bailoutFrom(&fail, lir->snapshot()); } void CodeGenerator::emitOOLTestObject(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch) { saveVolatile(scratch); masm.setupUnalignedABICall(scratch); masm.passABIArg(objreg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined)); masm.storeCallBoolResult(scratch); restoreVolatile(scratch); masm.branchIfTrueBool(scratch, ifEmulatesUndefined); masm.jump(ifDoesntEmulateUndefined); } // Base out-of-line code generator for all tests of the truthiness of an // object, where the object might not be truthy. (Recall that per spec all // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class // flag to permit objects to look like |undefined| in certain contexts, // including in object truthiness testing.) We check truthiness inline except // when we're testing it on a proxy (or if TI guarantees us that the specified // object will never emulate |undefined|), in which case out-of-line code will // call EmulatesUndefined for a conclusive answer. class OutOfLineTestObject : public OutOfLineCodeBase { Register objreg_; Register scratch_; Label* ifEmulatesUndefined_; Label* ifDoesntEmulateUndefined_; #ifdef DEBUG bool initialized() { return ifEmulatesUndefined_ != nullptr; } #endif public: OutOfLineTestObject() #ifdef DEBUG : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) #endif { } void accept(CodeGenerator* codegen) final override { MOZ_ASSERT(initialized()); codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_, ifDoesntEmulateUndefined_, scratch_); } // Specify the register where the object to be tested is found, labels to // jump to if the object is truthy or falsy, and a scratch register for // use in the out-of-line path. void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch) { MOZ_ASSERT(!initialized()); MOZ_ASSERT(ifEmulatesUndefined); objreg_ = objreg; scratch_ = scratch; ifEmulatesUndefined_ = ifEmulatesUndefined; ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined; } }; // A subclass of OutOfLineTestObject containing two extra labels, for use when // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line // code. The user should bind these labels in inline code, and specify them as // targets via setInputAndTargets, as appropriate. class OutOfLineTestObjectWithLabels : public OutOfLineTestObject { Label label1_; Label label2_; public: OutOfLineTestObjectWithLabels() { } Label* label1() { return &label1_; } Label* label2() { return &label2_; } }; void CodeGenerator::testObjectEmulatesUndefinedKernel(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch); // Perform a fast-path check of the object's class flags if the object's // not a proxy. Let out-of-line code handle the slow cases that require // saving registers, making a function call, and restoring registers. masm.branchTestObjectTruthy(false, objreg, scratch, ool->entry(), ifEmulatesUndefined); } void CodeGenerator::branchTestObjectEmulatesUndefined(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(), "ifDoesntEmulateUndefined will be bound to the fallthrough path"); testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch, ool); masm.bind(ifDoesntEmulateUndefined); } void CodeGenerator::testObjectEmulatesUndefined(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch, ool); masm.jump(ifDoesntEmulateUndefined); } void CodeGenerator::testValueTruthyKernel(const ValueOperand& value, const LDefinition* scratch1, const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) { // Count the number of possible type tags we might have, so we'll know when // we've checked them all and hence can avoid emitting a tag check for the // last one. In particular, whenever tagCount is 1 that means we've tried // all but one of them already so we know exactly what's left based on the // mightBe* booleans. bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined); bool mightBeNull = valueMIR->mightBeType(MIRType::Null); bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean); bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32); bool mightBeObject = valueMIR->mightBeType(MIRType::Object); bool mightBeString = valueMIR->mightBeType(MIRType::String); bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol); bool mightBeDouble = valueMIR->mightBeType(MIRType::Double); int tagCount = int(mightBeUndefined) + int(mightBeNull) + int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) + int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble); MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0); // If we know we're null or undefined, we're definitely falsy, no // need to even check the tag. if (int(mightBeNull) + int(mightBeUndefined) == tagCount) { masm.jump(ifFalsy); return; } Register tag = masm.splitTagForTest(value); if (mightBeUndefined) { MOZ_ASSERT(tagCount > 1); masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy); --tagCount; } if (mightBeNull) { MOZ_ASSERT(tagCount > 1); masm.branchTestNull(Assembler::Equal, tag, ifFalsy); --tagCount; } if (mightBeBoolean) { MOZ_ASSERT(tagCount != 0); Label notBoolean; if (tagCount != 1) masm.branchTestBoolean(Assembler::NotEqual, tag, ¬Boolean); masm.branchTestBooleanTruthy(false, value, ifFalsy); if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬Boolean); --tagCount; } if (mightBeInt32) { MOZ_ASSERT(tagCount != 0); Label notInt32; if (tagCount != 1) masm.branchTestInt32(Assembler::NotEqual, tag, ¬Int32); masm.branchTestInt32Truthy(false, value, ifFalsy); if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬Int32); --tagCount; } if (mightBeObject) { MOZ_ASSERT(tagCount != 0); if (ool) { Label notObject; if (tagCount != 1) masm.branchTestObject(Assembler::NotEqual, tag, ¬Object); Register objreg = masm.extractObject(value, ToRegister(scratch1)); testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool); masm.bind(¬Object); } else { if (tagCount != 1) masm.branchTestObject(Assembler::Equal, tag, ifTruthy); // Else just fall through to truthiness. } --tagCount; } else { MOZ_ASSERT(!ool, "We better not have an unused OOL path, since the code generator will try to " "generate code for it but we never set up its labels, which will cause null " "derefs of those labels."); } if (mightBeString) { // Test if a string is non-empty. MOZ_ASSERT(tagCount != 0); Label notString; if (tagCount != 1) masm.branchTestString(Assembler::NotEqual, tag, ¬String); masm.branchTestStringTruthy(false, value, ifFalsy); if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬String); --tagCount; } if (mightBeSymbol) { // All symbols are truthy. MOZ_ASSERT(tagCount != 0); if (tagCount != 1) masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy); // Else fall through to ifTruthy. --tagCount; } if (mightBeDouble) { MOZ_ASSERT(tagCount == 1); // If we reach here the value is a double. masm.unboxDouble(value, fr); masm.branchTestDoubleTruthy(false, fr, ifFalsy); --tagCount; } MOZ_ASSERT(tagCount == 0); // Fall through for truthy. } void CodeGenerator::testValueTruthy(const ValueOperand& value, const LDefinition* scratch1, const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) { testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool, valueMIR); masm.jump(ifTruthy); } void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) { MIRType inputType = lir->mir()->input()->type(); MOZ_ASSERT(inputType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(), "If the object couldn't emulate undefined, this should have been folded."); Label* truthy = getJumpLabelForBranch(lir->ifTruthy()); Label* falsy = getJumpLabelForBranch(lir->ifFalsy()); Register input = ToRegister(lir->input()); if (lir->mir()->operandMightEmulateUndefined()) { if (inputType == MIRType::ObjectOrNull) masm.branchTestPtr(Assembler::Zero, input, input, falsy); OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject(); addOutOfLineCode(ool, lir->mir()); testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()), ool); } else { MOZ_ASSERT(inputType == MIRType::ObjectOrNull); testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(), lir->ifFalsy()); } } void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) { OutOfLineTestObject* ool = nullptr; MDefinition* input = lir->mir()->input(); // Unfortunately, it's possible that someone (e.g. phi elimination) switched // out our input after we did cacheOperandMightEmulateUndefined. So we // might think it can emulate undefined _and_ know that it can't be an // object. if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType::Object)) { ool = new(alloc()) OutOfLineTestObject(); addOutOfLineCode(ool, lir->mir()); } Label* truthy = getJumpLabelForBranch(lir->ifTruthy()); Label* falsy = getJumpLabelForBranch(lir->ifFalsy()); testValueTruthy(ToValue(lir, LTestVAndBranch::Input), lir->temp1(), lir->temp2(), ToFloatRegister(lir->tempFloat()), truthy, falsy, ool, input); } void CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir) { MFunctionDispatch* mir = lir->mir(); Register input = ToRegister(lir->input()); Label* lastLabel; size_t casesWithFallback; // Determine if the last case is fallback or an ordinary case. if (!mir->hasFallback()) { MOZ_ASSERT(mir->numCases() > 0); casesWithFallback = mir->numCases(); lastLabel = skipTrivialBlocks(mir->getCaseBlock(mir->numCases() - 1))->lir()->label(); } else { casesWithFallback = mir->numCases() + 1; lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label(); } // Compare function pointers, except for the last case. for (size_t i = 0; i < casesWithFallback - 1; i++) { MOZ_ASSERT(i < mir->numCases()); LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir(); if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) { masm.branchPtr(Assembler::Equal, Address(input, JSObject::offsetOfGroup()), ImmGCPtr(funcGroup), target->label()); } else { JSFunction* func = mir->getCase(i); masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label()); } } // Jump to the last case. masm.jump(lastLabel); } void CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir) { MObjectGroupDispatch* mir = lir->mir(); Register input = ToRegister(lir->input()); Register temp = ToRegister(lir->temp()); // Load the incoming ObjectGroup in temp. masm.loadPtr(Address(input, JSObject::offsetOfGroup()), temp); // Compare ObjectGroups. MacroAssembler::BranchGCPtr lastBranch; LBlock* lastBlock = nullptr; InlinePropertyTable* propTable = mir->propTable(); for (size_t i = 0; i < mir->numCases(); i++) { JSFunction* func = mir->getCase(i); LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir(); DebugOnly found = false; for (size_t j = 0; j < propTable->numEntries(); j++) { if (propTable->getFunction(j) != func) continue; if (lastBranch.isInitialized()) lastBranch.emit(masm); ObjectGroup* group = propTable->getObjectGroup(j); lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(group), target->label()); lastBlock = target; found = true; } MOZ_ASSERT(found); } // Jump to fallback block if we have an unknown ObjectGroup. If there's no // fallback block, we should have handled all cases. if (!mir->hasFallback()) { MOZ_ASSERT(lastBranch.isInitialized()); #ifdef DEBUG Label ok; lastBranch.relink(&ok); lastBranch.emit(masm); masm.assumeUnreachable("Unexpected ObjectGroup"); masm.bind(&ok); #endif if (!isNextBlock(lastBlock)) masm.jump(lastBlock->label()); return; } LBlock* fallback = skipTrivialBlocks(mir->getFallback())->lir(); if (!lastBranch.isInitialized()) { if (!isNextBlock(fallback)) masm.jump(fallback->label()); return; } lastBranch.invertCondition(); lastBranch.relink(fallback->label()); lastBranch.emit(masm); if (!isNextBlock(lastBlock)) masm.jump(lastBlock->label()); } void CodeGenerator::visitBooleanToString(LBooleanToString* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); const JSAtomState& names = GetJitContext()->runtime->names(); Label true_, done; masm.branchTest32(Assembler::NonZero, input, input, &true_); masm.movePtr(ImmGCPtr(names.false_), output); masm.jump(&done); masm.bind(&true_); masm.movePtr(ImmGCPtr(names.true_), output); masm.bind(&done); } void CodeGenerator::emitIntToString(Register input, Register output, Label* ool) { masm.branch32(Assembler::AboveOrEqual, input, Imm32(StaticStrings::INT_STATIC_LIMIT), ool); // Fast path for small integers. masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().intStaticTable), output); masm.loadPtr(BaseIndex(output, input, ScalePointer), output); } typedef JSFlatString* (*IntToStringFn)(ExclusiveContext*, int); static const VMFunction IntToStringInfo = FunctionInfo(Int32ToString, "Int32ToString"); void CodeGenerator::visitIntToString(LIntToString* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); emitIntToString(input, output, ool->entry()); masm.bind(ool->rejoin()); } typedef JSString* (*DoubleToStringFn)(ExclusiveContext*, double); static const VMFunction DoubleToStringInfo = FunctionInfo(NumberToString, "NumberToString"); void CodeGenerator::visitDoubleToString(LDoubleToString* lir) { FloatRegister input = ToFloatRegister(lir->input()); Register temp = ToRegister(lir->tempInt()); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); // Try double to integer conversion and run integer to string code. masm.convertDoubleToInt32(input, temp, ool->entry(), true); emitIntToString(temp, output, ool->entry()); masm.bind(ool->rejoin()); } typedef JSString* (*PrimitiveToStringFn)(JSContext*, HandleValue); static const VMFunction PrimitiveToStringInfo = FunctionInfo(ToStringSlow, "ToStringSlow"); void CodeGenerator::visitValueToString(LValueToString* lir) { ValueOperand input = ToValue(lir, LValueToString::Input); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); Label done; Register tag = masm.splitTagForTest(input); const JSAtomState& names = GetJitContext()->runtime->names(); // String if (lir->mir()->input()->mightBeType(MIRType::String)) { Label notString; masm.branchTestString(Assembler::NotEqual, tag, ¬String); masm.unboxString(input, output); masm.jump(&done); masm.bind(¬String); } // Integer if (lir->mir()->input()->mightBeType(MIRType::Int32)) { Label notInteger; masm.branchTestInt32(Assembler::NotEqual, tag, ¬Integer); Register unboxed = ToTempUnboxRegister(lir->tempToUnbox()); unboxed = masm.extractInt32(input, unboxed); emitIntToString(unboxed, output, ool->entry()); masm.jump(&done); masm.bind(¬Integer); } // Double if (lir->mir()->input()->mightBeType(MIRType::Double)) { // Note: no fastpath. Need two extra registers and can only convert doubles // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT. masm.branchTestDouble(Assembler::Equal, tag, ool->entry()); } // Undefined if (lir->mir()->input()->mightBeType(MIRType::Undefined)) { Label notUndefined; masm.branchTestUndefined(Assembler::NotEqual, tag, ¬Undefined); masm.movePtr(ImmGCPtr(names.undefined), output); masm.jump(&done); masm.bind(¬Undefined); } // Null if (lir->mir()->input()->mightBeType(MIRType::Null)) { Label notNull; masm.branchTestNull(Assembler::NotEqual, tag, ¬Null); masm.movePtr(ImmGCPtr(names.null), output); masm.jump(&done); masm.bind(¬Null); } // Boolean if (lir->mir()->input()->mightBeType(MIRType::Boolean)) { Label notBoolean, true_; masm.branchTestBoolean(Assembler::NotEqual, tag, ¬Boolean); masm.branchTestBooleanTruthy(true, input, &true_); masm.movePtr(ImmGCPtr(names.false_), output); masm.jump(&done); masm.bind(&true_); masm.movePtr(ImmGCPtr(names.true_), output); masm.jump(&done); masm.bind(¬Boolean); } // Object if (lir->mir()->input()->mightBeType(MIRType::Object)) { // Bail. MOZ_ASSERT(lir->mir()->fallible()); Label bail; masm.branchTestObject(Assembler::Equal, tag, &bail); bailoutFrom(&bail, lir->snapshot()); } // Symbol if (lir->mir()->input()->mightBeType(MIRType::Symbol)) masm.branchTestSymbol(Assembler::Equal, tag, ool->entry()); #ifdef DEBUG masm.assumeUnreachable("Unexpected type for MValueToString."); #endif masm.bind(&done); masm.bind(ool->rejoin()); } typedef JSObject* (*ToObjectFn)(JSContext*, HandleValue, bool); static const VMFunction ToObjectInfo = FunctionInfo(ToObjectSlow, "ToObjectSlow"); void CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir) { ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)), StoreRegisterTo(output)); Label done; masm.branchTestObject(Assembler::Equal, input, &done); masm.branchTestNull(Assembler::NotEqual, input, ool->entry()); masm.bind(&done); masm.unboxNonDouble(input, output); masm.bind(ool->rejoin()); } typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, JSObject*); static const VMFunction CloneRegExpObjectInfo = FunctionInfo(CloneRegExpObject, "CloneRegExpObject"); void CodeGenerator::visitRegExp(LRegExp* lir) { pushArg(ImmGCPtr(lir->mir()->source())); callVM(CloneRegExpObjectInfo, lir); } // Amount of space to reserve on the stack when executing RegExps inline. static const size_t RegExpReservedStack = sizeof(irregexp::InputOutputData) + sizeof(MatchPairs) + RegExpObject::MaxPairCount * sizeof(MatchPair); static size_t RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset) { return inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + sizeof(MatchPairs); } static Address RegExpPairCountAddress(MacroAssembler& masm, size_t inputOutputDataStartOffset) { return Address(masm.getStackPointer(), inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + MatchPairs::offsetOfPairCount()); } // Prepare an InputOutputData and optional MatchPairs which space has been // allocated for on the stack, and try to execute a RegExp on a string input. // If the RegExp was successfully executed and matched the input, fallthrough, // otherwise jump to notFound or failure. static bool PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input, Register lastIndex, Register temp1, Register temp2, Register temp3, size_t inputOutputDataStartOffset, RegExpShared::CompilationMode mode, Label* notFound, Label* failure) { size_t matchPairsStartOffset = inputOutputDataStartOffset + sizeof(irregexp::InputOutputData); size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address inputStartAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputStart)); Address inputEndAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputEnd)); Address matchesPointerAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, matches)); Address startIndexAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, startIndex)); Address endIndexAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, endIndex)); Address matchResultAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, result)); Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset); Address pairsPointerAddress(masm.getStackPointer(), matchPairsStartOffset + MatchPairs::offsetOfPairs()); Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset); RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global()); if (!res) return false; #ifdef JS_USE_LINK_REGISTER if (mode != RegExpShared::MatchOnly) masm.pushReturnAddress(); #endif if (mode == RegExpShared::Normal) { // First, fill in a skeletal MatchPairs instance on the stack. This will be // passed to the OOL stub in the caller if we aren't able to execute the // RegExp inline, and that stub needs to be able to determine whether the // execution finished successfully. masm.store32(Imm32(1), pairCountAddress); masm.store32(Imm32(-1), pairsVectorAddress); masm.computeEffectiveAddress(pairsVectorAddress, temp1); masm.storePtr(temp1, pairsPointerAddress); } // Check for a linear input string. masm.branchIfRopeOrExternal(input, temp1, failure); // Get the RegExpShared for the RegExp. masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp1); masm.branchPtr(Assembler::Equal, temp1, ImmWord(0), failure); // ES6 21.2.2.2 step 2. // See RegExp.cpp ExecuteRegExp for more detail. { Label done; masm.branchTest32(Assembler::Zero, Address(temp1, RegExpShared::offsetOfFlags()), Imm32(UnicodeFlag), &done); // If input is latin1, there should not be surrogate pair. masm.branchLatin1String(input, &done); // Check if |lastIndex > 0 && lastIndex < input->length()|. // lastIndex should already have no sign here. masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done); masm.loadStringLength(input, temp2); masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done); // Check if input[lastIndex] is trail surrogate. masm.loadStringChars(input, temp2); masm.computeEffectiveAddress(BaseIndex(temp2, lastIndex, TimesTwo), temp3); masm.load16ZeroExtend(Address(temp3, 0), temp3); masm.branch32(Assembler::Below, temp3, Imm32(unicode::TrailSurrogateMin), &done); masm.branch32(Assembler::Above, temp3, Imm32(unicode::TrailSurrogateMax), &done); // Check if input[lastIndex-1] is lead surrogate. masm.move32(lastIndex, temp3); masm.sub32(Imm32(1), temp3); masm.computeEffectiveAddress(BaseIndex(temp2, temp3, TimesTwo), temp3); masm.load16ZeroExtend(Address(temp3, 0), temp3); masm.branch32(Assembler::Below, temp3, Imm32(unicode::LeadSurrogateMin), &done); masm.branch32(Assembler::Above, temp3, Imm32(unicode::LeadSurrogateMax), &done); // Move lastIndex to lead surrogate. masm.subPtr(Imm32(1), lastIndex); masm.bind(&done); } if (mode == RegExpShared::Normal) { // Don't handle RegExps with excessive parens. masm.load32(Address(temp1, RegExpShared::offsetOfParenCount()), temp2); masm.branch32(Assembler::AboveOrEqual, temp2, Imm32(RegExpObject::MaxPairCount), failure); // Fill in the paren count in the MatchPairs on the stack. masm.add32(Imm32(1), temp2); masm.store32(temp2, pairCountAddress); } // Load the code pointer for the type of input string we have, and compute // the input start/end pointers in the InputOutputData. Register codePointer = temp1; { masm.loadStringChars(input, temp2); masm.storePtr(temp2, inputStartAddress); masm.loadStringLength(input, temp3); Label isLatin1, done; masm.branchLatin1String(input, &isLatin1); { masm.lshiftPtr(Imm32(1), temp3); masm.loadPtr(Address(temp1, RegExpShared::offsetOfTwoByteJitCode(mode)), codePointer); } masm.jump(&done); { masm.bind(&isLatin1); masm.loadPtr(Address(temp1, RegExpShared::offsetOfLatin1JitCode(mode)), codePointer); } masm.bind(&done); masm.addPtr(temp3, temp2); masm.storePtr(temp2, inputEndAddress); } // Check the RegExpShared has been compiled for this type of input. masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure); masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer); // Finish filling in the InputOutputData instance on the stack. if (mode == RegExpShared::Normal) { masm.computeEffectiveAddress(Address(masm.getStackPointer(), matchPairsStartOffset), temp2); masm.storePtr(temp2, matchesPointerAddress); } else { // Use InputOutputData.endIndex itself for output. masm.computeEffectiveAddress(endIndexAddress, temp2); masm.storePtr(temp2, endIndexAddress); } masm.storePtr(lastIndex, startIndexAddress); masm.store32(Imm32(0), matchResultAddress); // Save any volatile inputs. LiveGeneralRegisterSet volatileRegs; if (lastIndex.volatile_()) volatileRegs.add(lastIndex); if (input.volatile_()) volatileRegs.add(input); if (regexp.volatile_()) volatileRegs.add(regexp); // Execute the RegExp. masm.computeEffectiveAddress(Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2); masm.PushRegsInMask(volatileRegs); masm.setupUnalignedABICall(temp3); masm.passABIArg(temp2); masm.callWithABI(codePointer); masm.PopRegsInMask(volatileRegs); Label success; masm.branch32(Assembler::Equal, matchResultAddress, Imm32(RegExpRunStatus_Success_NotFound), notFound); masm.branch32(Assembler::Equal, matchResultAddress, Imm32(RegExpRunStatus_Error), failure); // Lazily update the RegExpStatics. masm.movePtr(ImmPtr(res), temp1); Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput()); Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput()); Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource()); Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex()); masm.patchableCallPreBarrier(pendingInputAddress, MIRType::String); masm.patchableCallPreBarrier(matchesInputAddress, MIRType::String); masm.patchableCallPreBarrier(lazySourceAddress, MIRType::String); masm.storePtr(input, pendingInputAddress); masm.storePtr(input, matchesInputAddress); masm.storePtr(lastIndex, Address(temp1, RegExpStatics::offsetOfLazyIndex())); masm.store32(Imm32(1), Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation())); masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp2); masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3); masm.storePtr(temp3, lazySourceAddress); masm.load32(Address(temp2, RegExpShared::offsetOfFlags()), temp3); masm.store32(temp3, Address(temp1, RegExpStatics::offsetOfLazyFlags())); if (mode == RegExpShared::MatchOnly) { // endIndex is passed via temp3. masm.load32(endIndexAddress, temp3); } return true; } static void CopyStringChars(MacroAssembler& masm, Register to, Register from, Register len, Register byteOpScratch, size_t fromWidth, size_t toWidth); class CreateDependentString { Register string_; Register temp_; Label* failure_; enum class FallbackKind : uint8_t { InlineString, FatInlineString, NotInlineString, Count }; mozilla::EnumeratedArray fallbacks_, joins_; public: // Generate code that creates DependentString. // Caller should call generateFallback after masm.ret(), to generate // fallback path. void generate(MacroAssembler& masm, const JSAtomState& names, bool latin1, Register string, Register base, Register temp1, Register temp2, BaseIndex startIndexAddress, BaseIndex limitIndexAddress, Label* failure); // Generate fallback path for creating DependentString. void generateFallback(MacroAssembler& masm, LiveRegisterSet regsToSave); }; void CreateDependentString::generate(MacroAssembler& masm, const JSAtomState& names, bool latin1, Register string, Register base, Register temp1, Register temp2, BaseIndex startIndexAddress, BaseIndex limitIndexAddress, Label* failure) { string_ = string; temp_ = temp2; failure_ = failure; // Compute the string length. masm.load32(startIndexAddress, temp2); masm.load32(limitIndexAddress, temp1); masm.sub32(temp2, temp1); Label done, nonEmpty; // Zero length matches use the empty string. masm.branchTest32(Assembler::NonZero, temp1, temp1, &nonEmpty); masm.movePtr(ImmGCPtr(names.empty), string); masm.jump(&done); masm.bind(&nonEmpty); Label notInline; int32_t maxInlineLength = latin1 ? (int32_t) JSFatInlineString::MAX_LENGTH_LATIN1 : (int32_t) JSFatInlineString::MAX_LENGTH_TWO_BYTE; masm.branch32(Assembler::Above, temp1, Imm32(maxInlineLength), ¬Inline); { // Make a thin or fat inline string. Label stringAllocated, fatInline; int32_t maxThinInlineLength = latin1 ? (int32_t) JSThinInlineString::MAX_LENGTH_LATIN1 : (int32_t) JSThinInlineString::MAX_LENGTH_TWO_BYTE; masm.branch32(Assembler::Above, temp1, Imm32(maxThinInlineLength), &fatInline); int32_t thinFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_THIN_INLINE_FLAGS; masm.newGCString(string, temp2, &fallbacks_[FallbackKind::InlineString]); masm.bind(&joins_[FallbackKind::InlineString]); masm.store32(Imm32(thinFlags), Address(string, JSString::offsetOfFlags())); masm.jump(&stringAllocated); masm.bind(&fatInline); int32_t fatFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_FAT_INLINE_FLAGS; masm.newGCFatInlineString(string, temp2, &fallbacks_[FallbackKind::FatInlineString]); masm.bind(&joins_[FallbackKind::FatInlineString]); masm.store32(Imm32(fatFlags), Address(string, JSString::offsetOfFlags())); masm.bind(&stringAllocated); masm.store32(temp1, Address(string, JSString::offsetOfLength())); masm.push(string); masm.push(base); // Adjust the start index address for the above pushes. MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer()); BaseIndex newStartIndexAddress = startIndexAddress; newStartIndexAddress.offset += 2 * sizeof(void*); // Load chars pointer for the new string. masm.addPtr(ImmWord(JSInlineString::offsetOfInlineStorage()), string); // Load the source characters pointer. masm.loadStringChars(base, base); masm.load32(newStartIndexAddress, temp2); if (latin1) masm.addPtr(temp2, base); else masm.computeEffectiveAddress(BaseIndex(base, temp2, TimesTwo), base); CopyStringChars(masm, string, base, temp1, temp2, latin1 ? 1 : 2, latin1 ? 1 : 2); // Null-terminate. if (latin1) masm.store8(Imm32(0), Address(string, 0)); else masm.store16(Imm32(0), Address(string, 0)); masm.pop(base); masm.pop(string); } masm.jump(&done); masm.bind(¬Inline); { // Make a dependent string. int32_t flags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::DEPENDENT_FLAGS; masm.newGCString(string, temp2, &fallbacks_[FallbackKind::NotInlineString]); masm.bind(&joins_[FallbackKind::NotInlineString]); masm.store32(Imm32(flags), Address(string, JSString::offsetOfFlags())); masm.store32(temp1, Address(string, JSString::offsetOfLength())); masm.loadPtr(Address(base, JSString::offsetOfNonInlineChars()), temp1); masm.load32(startIndexAddress, temp2); if (latin1) masm.addPtr(temp2, temp1); else masm.computeEffectiveAddress(BaseIndex(temp1, temp2, TimesTwo), temp1); masm.storePtr(temp1, Address(string, JSString::offsetOfNonInlineChars())); masm.storePtr(base, Address(string, JSDependentString::offsetOfBase())); // Follow any base pointer if the input is itself a dependent string. // Watch for undepended strings, which have a base pointer but don't // actually share their characters with it. Label noBase; masm.branchTest32(Assembler::Zero, Address(base, JSString::offsetOfFlags()), Imm32(JSString::HAS_BASE_BIT), &noBase); masm.branchTest32(Assembler::NonZero, Address(base, JSString::offsetOfFlags()), Imm32(JSString::FLAT_BIT), &noBase); masm.loadPtr(Address(base, JSDependentString::offsetOfBase()), temp1); masm.storePtr(temp1, Address(string, JSDependentString::offsetOfBase())); masm.bind(&noBase); } masm.bind(&done); } static void* AllocateString(JSContext* cx) { return js::Allocate(cx); } static void* AllocateFatInlineString(JSContext* cx) { return js::Allocate(cx); } void CreateDependentString::generateFallback(MacroAssembler& masm, LiveRegisterSet regsToSave) { regsToSave.take(string_); regsToSave.take(temp_); for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) { masm.bind(&fallbacks_[kind]); masm.PushRegsInMask(regsToSave); masm.setupUnalignedABICall(string_); masm.loadJSContext(string_); masm.passABIArg(string_); masm.callWithABI(kind == FallbackKind::FatInlineString ? JS_FUNC_TO_DATA_PTR(void*, AllocateFatInlineString) : JS_FUNC_TO_DATA_PTR(void*, AllocateString)); masm.storeCallPointerResult(string_); masm.PopRegsInMask(regsToSave); masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_); masm.jump(&joins_[kind]); } } static void* CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots) { return js::Allocate(cx, kind, nDynamicSlots, gc::DefaultHeap, &ArrayObject::class_); } static void CreateMatchResultFallback(MacroAssembler& masm, LiveRegisterSet regsToSave, Register object, Register temp2, Register temp5, ArrayObject* templateObj, Label* fail) { MOZ_ASSERT(templateObj->group()->clasp() == &ArrayObject::class_); regsToSave.take(object); regsToSave.take(temp2); regsToSave.take(temp5); masm.PushRegsInMask(regsToSave); masm.setupUnalignedABICall(object); masm.loadJSContext(object); masm.passABIArg(object); masm.move32(Imm32(int32_t(templateObj->asTenured().getAllocKind())), temp2); masm.passABIArg(temp2); masm.move32(Imm32(int32_t(templateObj->as().numDynamicSlots())), temp5); masm.passABIArg(temp5); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, CreateMatchResultFallbackFunc)); masm.storeCallPointerResult(object); masm.PopRegsInMask(regsToSave); masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail); masm.initGCThing(object, temp2, templateObj, true, false); } JitCode* JitCompartment::generateRegExpMatcherStub(JSContext* cx) { Register regexp = RegExpMatcherRegExpReg; Register input = RegExpMatcherStringReg; Register lastIndex = RegExpMatcherLastIndexReg; ValueOperand result = JSReturnOperand; // We are free to clobber all registers, as LRegExpMatcher is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); // temp5 is used in single byte instructions when creating dependent // strings, and has restrictions on which register it can be on some // platforms. Register temp5; { AllocatableGeneralRegisterSet oregs = regs; do { temp5 = oregs.takeAny(); } while (!MacroAssembler::canUseInSingleByteInstruction(temp5)); regs.take(temp5); } Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); Register maybeTemp4 = InvalidReg; if (!regs.empty()) { // There are not enough registers on x86. maybeTemp4 = regs.takeAny(); } ArrayObject* templateObject = cx->compartment()->regExps.getOrCreateMatchResultTemplateObject(cx); if (!templateObject) return nullptr; // The template object should have enough space for the maximum number of // pairs this stub can handle. MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount == gc::GetGCKindSlots(templateObject->asTenured().getAllocKind())); MacroAssembler masm(cx); // The InputOutputData is placed above the return address on the stack. size_t inputOutputDataStartOffset = sizeof(void*); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp5, inputOutputDataStartOffset, RegExpShared::Normal, ¬Found, &oolEntry)) { return nullptr; } // Construct the result. Register object = temp1; Label matchResultFallback, matchResultJoin; masm.createGCObject(object, temp2, templateObject, gc::DefaultHeap, &matchResultFallback); masm.bind(&matchResultJoin); // Initialize slots of result object. masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2); masm.storeValue(templateObject->getSlot(0), Address(temp2, 0)); masm.storeValue(templateObject->getSlot(1), Address(temp2, sizeof(Value))); size_t elementsOffset = NativeObject::offsetOfFixedElements(); #ifdef DEBUG // Assert the initial value of initializedLength and length to make sure // restoration on failure case works. { Label initLengthOK, lengthOK; masm.branch32(Assembler::Equal, Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength()), Imm32(templateObject->getDenseInitializedLength()), &initLengthOK); masm.assumeUnreachable("Initial value of the match object's initializedLength does not match to restoration."); masm.bind(&initLengthOK); masm.branch32(Assembler::Equal, Address(object, elementsOffset + ObjectElements::offsetOfLength()), Imm32(templateObject->length()), &lengthOK); masm.assumeUnreachable("Initial value of The match object's length does not match to restoration."); masm.bind(&lengthOK); } #endif Register matchIndex = temp2; masm.move32(Imm32(0), matchIndex); size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset); Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset); BaseIndex stringAddress(object, matchIndex, TimesEight, elementsOffset); JS_STATIC_ASSERT(sizeof(MatchPair) == 8); BaseIndex stringIndexAddress(masm.getStackPointer(), matchIndex, TimesEight, pairsVectorStartOffset + offsetof(MatchPair, start)); BaseIndex stringLimitAddress(masm.getStackPointer(), matchIndex, TimesEight, pairsVectorStartOffset + offsetof(MatchPair, limit)); // Loop to construct the match strings. There are two different loops, // depending on whether the input is latin1. CreateDependentString depStr[2]; { Label isLatin1, done; masm.branchLatin1String(input, &isLatin1); Label* failure = &oolEntry; Register temp4 = (maybeTemp4 == InvalidReg) ? lastIndex : maybeTemp4; Label failureRestore; if (maybeTemp4 == InvalidReg) { failure = &failureRestore; // Save lastIndex value to temporary space. masm.store32(lastIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength())); } for (int isLatin = 0; isLatin <= 1; isLatin++) { if (isLatin) masm.bind(&isLatin1); Label matchLoop; masm.bind(&matchLoop); Label isUndefined, storeDone; masm.branch32(Assembler::LessThan, stringIndexAddress, Imm32(0), &isUndefined); depStr[isLatin].generate(masm, cx->names(), isLatin, temp3, input, temp4, temp5, stringIndexAddress, stringLimitAddress, failure); masm.storeValue(JSVAL_TYPE_STRING, temp3, stringAddress); masm.jump(&storeDone); masm.bind(&isUndefined); masm.storeValue(UndefinedValue(), stringAddress); masm.bind(&storeDone); masm.add32(Imm32(1), matchIndex); masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex, &done); masm.jump(&matchLoop); } if (maybeTemp4 == InvalidReg) { // Restore lastIndex value from temporary space, both for success // and failure cases. masm.load32(Address(object, elementsOffset + ObjectElements::offsetOfLength()), lastIndex); masm.jump(&done); masm.bind(&failureRestore); masm.load32(Address(object, elementsOffset + ObjectElements::offsetOfLength()), lastIndex); // Restore the match object for failure case. masm.store32(Imm32(templateObject->getDenseInitializedLength()), Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength())); masm.store32(Imm32(templateObject->length()), Address(object, elementsOffset + ObjectElements::offsetOfLength())); masm.jump(&oolEntry); } masm.bind(&done); } // Fill in the rest of the output object. masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength())); masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength())); masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2); MOZ_ASSERT(templateObject->numFixedSlots() == 0); MOZ_ASSERT(templateObject->lookupPure(cx->names().index)->slot() == 0); MOZ_ASSERT(templateObject->lookupPure(cx->names().input)->slot() == 1); masm.load32(pairsVectorAddress, temp3); masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0)); masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value))); // All done! masm.tagValue(JSVAL_TYPE_OBJECT, object, result); masm.ret(); masm.bind(¬Found); masm.moveValue(NullValue(), result); masm.ret(); // Fallback paths for CreateDependentString and createGCObject. // Need to save all registers in use when they were called. LiveRegisterSet regsToSave(RegisterSet::Volatile()); regsToSave.addUnchecked(regexp); regsToSave.addUnchecked(input); regsToSave.addUnchecked(lastIndex); regsToSave.addUnchecked(temp1); regsToSave.addUnchecked(temp2); regsToSave.addUnchecked(temp3); if (maybeTemp4 != InvalidReg) regsToSave.addUnchecked(maybeTemp4); regsToSave.addUnchecked(temp5); for (int isLatin = 0; isLatin <= 1; isLatin++) depStr[isLatin].generateFallback(masm, regsToSave); masm.bind(&matchResultFallback); CreateMatchResultFallback(masm, regsToSave, object, temp2, temp5, templateObject, &oolEntry); masm.jump(&matchResultJoin); // Use an undefined value to signal to the caller that the OOL stub needs to be called. masm.bind(&oolEntry); masm.moveValue(UndefinedValue(), result); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpMatcherStub"); JitCode* code = linker.newCode(cx, OTHER_CODE); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub"); #endif if (cx->zone()->needsIncrementalBarrier()) code->togglePreBarriers(true, DontReprotect); return code; } class OutOfLineRegExpMatcher : public OutOfLineCodeBase { LRegExpMatcher* lir_; public: explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineRegExpMatcher(this); } LRegExpMatcher* lir() const { return lir_; } }; typedef bool (*RegExpMatcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output); static const VMFunction RegExpMatcherRawInfo = FunctionInfo(RegExpMatcherRaw, "RegExpMatcherRaw"); void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) { LRegExpMatcher* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(lastIndex); regs.take(input); regs.take(regexp); Register temp = regs.takeAny(); masm.computeEffectiveAddress(Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp); pushArg(temp); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpMatcherRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg); MOZ_ASSERT(GetValueOutput(lir) == JSReturnOperand); #if defined(JS_NUNBOX32) MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Data); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Data); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Data); #elif defined(JS_PUNBOX64) MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg); #endif masm.reserveStack(RegExpReservedStack); OutOfLineRegExpMatcher* ool = new(alloc()) OutOfLineRegExpMatcher(lir); addOutOfLineCode(ool, lir->mir()); JitCode* regExpMatcherStub = gen->compartment->jitCompartment()->regExpMatcherStubNoBarrier(); masm.call(regExpMatcherStub); masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry()); masm.bind(ool->rejoin()); masm.freeStack(RegExpReservedStack); } static const int32_t RegExpSearcherResultNotFound = -1; static const int32_t RegExpSearcherResultFailed = -2; JitCode* JitCompartment::generateRegExpSearcherStub(JSContext* cx) { Register regexp = RegExpTesterRegExpReg; Register input = RegExpTesterStringReg; Register lastIndex = RegExpTesterLastIndexReg; Register result = ReturnReg; // We are free to clobber all registers, as LRegExpSearcher is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); MacroAssembler masm(cx); // The InputOutputData is placed above the return address on the stack. size_t inputOutputDataStartOffset = sizeof(void*); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp3, inputOutputDataStartOffset, RegExpShared::Normal, ¬Found, &oolEntry)) { return nullptr; } size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address stringIndexAddress(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, start)); Address stringLimitAddress(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, limit)); masm.load32(stringIndexAddress, result); masm.load32(stringLimitAddress, input); masm.lshiftPtr(Imm32(15), input); masm.or32(input, result); masm.ret(); masm.bind(¬Found); masm.move32(Imm32(RegExpSearcherResultNotFound), result); masm.ret(); masm.bind(&oolEntry); masm.move32(Imm32(RegExpSearcherResultFailed), result); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpSearcherStub"); JitCode* code = linker.newCode(cx, OTHER_CODE); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub"); #endif if (cx->zone()->needsIncrementalBarrier()) code->togglePreBarriers(true, DontReprotect); return code; } class OutOfLineRegExpSearcher : public OutOfLineCodeBase { LRegExpSearcher* lir_; public: explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineRegExpSearcher(this); } LRegExpSearcher* lir() const { return lir_; } }; typedef bool (*RegExpSearcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, MatchPairs* pairs, int32_t* result); static const VMFunction RegExpSearcherRawInfo = FunctionInfo(RegExpSearcherRaw, "RegExpSearcherRaw"); void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) { LRegExpSearcher* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(lastIndex); regs.take(input); regs.take(regexp); Register temp = regs.takeAny(); masm.computeEffectiveAddress(Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp); pushArg(temp); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpSearcherRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg); MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg); MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg); MOZ_ASSERT(RegExpTesterStringReg != ReturnReg); MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg); masm.reserveStack(RegExpReservedStack); OutOfLineRegExpSearcher* ool = new(alloc()) OutOfLineRegExpSearcher(lir); addOutOfLineCode(ool, lir->mir()); JitCode* regExpSearcherStub = gen->compartment->jitCompartment()->regExpSearcherStubNoBarrier(); masm.call(regExpSearcherStub); masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed), ool->entry()); masm.bind(ool->rejoin()); masm.freeStack(RegExpReservedStack); } static const int32_t RegExpTesterResultNotFound = -1; static const int32_t RegExpTesterResultFailed = -2; JitCode* JitCompartment::generateRegExpTesterStub(JSContext* cx) { Register regexp = RegExpTesterRegExpReg; Register input = RegExpTesterStringReg; Register lastIndex = RegExpTesterLastIndexReg; Register result = ReturnReg; MacroAssembler masm(cx); #ifdef JS_USE_LINK_REGISTER masm.pushReturnAddress(); #endif // We are free to clobber all registers, as LRegExpTester is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); masm.reserveStack(sizeof(irregexp::InputOutputData)); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp3, 0, RegExpShared::MatchOnly, ¬Found, &oolEntry)) { return nullptr; } Label done; // temp3 contains endIndex. masm.move32(temp3, result); masm.jump(&done); masm.bind(¬Found); masm.move32(Imm32(RegExpTesterResultNotFound), result); masm.jump(&done); masm.bind(&oolEntry); masm.move32(Imm32(RegExpTesterResultFailed), result); masm.bind(&done); masm.freeStack(sizeof(irregexp::InputOutputData)); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpTesterStub"); JitCode* code = linker.newCode(cx, OTHER_CODE); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpTesterStub"); #endif if (cx->zone()->needsIncrementalBarrier()) code->togglePreBarriers(true, DontReprotect); return code; } class OutOfLineRegExpTester : public OutOfLineCodeBase { LRegExpTester* lir_; public: explicit OutOfLineRegExpTester(LRegExpTester* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineRegExpTester(this); } LRegExpTester* lir() const { return lir_; } }; typedef bool (*RegExpTesterRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, int32_t* result); static const VMFunction RegExpTesterRawInfo = FunctionInfo(RegExpTesterRaw, "RegExpTesterRaw"); void CodeGenerator::visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool) { LRegExpTester* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpTesterRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpTester(LRegExpTester* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg); MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg); MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg); MOZ_ASSERT(RegExpTesterStringReg != ReturnReg); MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg); OutOfLineRegExpTester* ool = new(alloc()) OutOfLineRegExpTester(lir); addOutOfLineCode(ool, lir->mir()); JitCode* regExpTesterStub = gen->compartment->jitCompartment()->regExpTesterStubNoBarrier(); masm.call(regExpTesterStub); masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTesterResultFailed), ool->entry()); masm.bind(ool->rejoin()); } class OutOfLineRegExpPrototypeOptimizable : public OutOfLineCodeBase { LRegExpPrototypeOptimizable* ins_; public: explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins) : ins_(ins) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineRegExpPrototypeOptimizable(this); } LRegExpPrototypeOptimizable* ins() const { return ins_; } }; void CodeGenerator::visitRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins) { Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); Register temp = ToRegister(ins->temp()); OutOfLineRegExpPrototypeOptimizable* ool = new(alloc()) OutOfLineRegExpPrototypeOptimizable(ins); addOutOfLineCode(ool, ins->mir()); masm.loadJSContext(temp); masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp); size_t offset = JSCompartment::offsetOfRegExps() + RegExpCompartment::offsetOfOptimizableRegExpPrototypeShape(); masm.loadPtr(Address(temp, offset), temp); masm.loadPtr(Address(object, ShapedObject::offsetOfShape()), output); masm.branchPtr(Assembler::NotEqual, output, temp, ool->entry()); masm.move32(Imm32(0x1), output); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable* ool) { LRegExpPrototypeOptimizable* ins = ool->ins(); Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); saveVolatile(output); masm.setupUnalignedABICall(output); masm.loadJSContext(output); masm.passABIArg(output); masm.passABIArg(object); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpPrototypeOptimizableRaw)); masm.storeCallBoolResult(output); restoreVolatile(output); masm.jump(ool->rejoin()); } class OutOfLineRegExpInstanceOptimizable : public OutOfLineCodeBase { LRegExpInstanceOptimizable* ins_; public: explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins) : ins_(ins) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineRegExpInstanceOptimizable(this); } LRegExpInstanceOptimizable* ins() const { return ins_; } }; void CodeGenerator::visitRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins) { Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); Register temp = ToRegister(ins->temp()); OutOfLineRegExpInstanceOptimizable* ool = new(alloc()) OutOfLineRegExpInstanceOptimizable(ins); addOutOfLineCode(ool, ins->mir()); masm.loadJSContext(temp); masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp); size_t offset = JSCompartment::offsetOfRegExps() + RegExpCompartment::offsetOfOptimizableRegExpInstanceShape(); masm.loadPtr(Address(temp, offset), temp); masm.loadPtr(Address(object, ShapedObject::offsetOfShape()), output); masm.branchPtr(Assembler::NotEqual, output, temp, ool->entry()); masm.move32(Imm32(0x1), output); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable* ool) { LRegExpInstanceOptimizable* ins = ool->ins(); Register object = ToRegister(ins->object()); Register proto = ToRegister(ins->proto()); Register output = ToRegister(ins->output()); saveVolatile(output); masm.setupUnalignedABICall(output); masm.loadJSContext(output); masm.passABIArg(output); masm.passABIArg(object); masm.passABIArg(proto); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpInstanceOptimizableRaw)); masm.storeCallBoolResult(output); restoreVolatile(output); masm.jump(ool->rejoin()); } static void FindFirstDollarIndex(MacroAssembler& masm, Register str, Register len, Register chars, Register temp, Register output, bool isLatin1) { masm.loadStringChars(str, chars); masm.move32(Imm32(0), output); Label start, done; masm.bind(&start); if (isLatin1) masm.load8ZeroExtend(BaseIndex(chars, output, TimesOne), temp); else masm.load16ZeroExtend(BaseIndex(chars, output, TimesTwo), temp); masm.branch32(Assembler::Equal, temp, Imm32('$'), &done); masm.add32(Imm32(1), output); masm.branch32(Assembler::NotEqual, output, len, &start); masm.move32(Imm32(-1), output); masm.bind(&done); } typedef bool (*GetFirstDollarIndexRawFn)(JSContext*, HandleString, int32_t*); static const VMFunction GetFirstDollarIndexRawInfo = FunctionInfo(GetFirstDollarIndexRaw, "GetFirstDollarIndexRaw"); void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) { Register str = ToRegister(ins->str()); Register output = ToRegister(ins->output()); Register temp0 = ToRegister(ins->temp0()); Register temp1 = ToRegister(ins->temp1()); Register len = ToRegister(ins->temp2()); OutOfLineCode* ool = oolCallVM(GetFirstDollarIndexRawInfo, ins, ArgList(str), StoreRegisterTo(output)); masm.branchIfRope(str, ool->entry()); masm.loadStringLength(str, len); Label isLatin1, done; masm.branchLatin1String(str, &isLatin1); { FindFirstDollarIndex(masm, str, len, temp0, temp1, output, /* isLatin1 = */ false); } masm.jump(&done); { masm.bind(&isLatin1); FindFirstDollarIndex(masm, str, len, temp0, temp1, output, /* isLatin1 = */ true); } masm.bind(&done); masm.bind(ool->rejoin()); } typedef JSString* (*StringReplaceFn)(JSContext*, HandleString, HandleString, HandleString); static const VMFunction StringFlatReplaceInfo = FunctionInfo(js::str_flat_replace_string, "str_flat_replace_string"); static const VMFunction StringReplaceInfo = FunctionInfo(StringReplace, "StringReplace"); void CodeGenerator::visitStringReplace(LStringReplace* lir) { if (lir->replacement()->isConstant()) pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString())); else pushArg(ToRegister(lir->replacement())); if (lir->pattern()->isConstant()) pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString())); else pushArg(ToRegister(lir->pattern())); if (lir->string()->isConstant()) pushArg(ImmGCPtr(lir->string()->toConstant()->toString())); else pushArg(ToRegister(lir->string())); if (lir->mir()->isFlatReplacement()) callVM(StringFlatReplaceInfo, lir); else callVM(StringReplaceInfo, lir); } void CodeGenerator::emitSharedStub(ICStub::Kind kind, LInstruction* lir) { JSScript* script = lir->mirRaw()->block()->info().script(); jsbytecode* pc = lir->mirRaw()->toInstruction()->resumePoint()->pc(); #ifdef JS_USE_LINK_REGISTER // Some architectures don't push the return address on the stack but // use the link register. In that case the stack isn't aligned. Push // to make sure we are aligned. masm.Push(Imm32(0)); #endif // Create descriptor signifying end of Ion frame. uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, JitStubFrameLayout::Size()); masm.Push(Imm32(descriptor)); // Call into the stubcode. CodeOffset patchOffset; IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script); EmitCallIC(&patchOffset, masm); entry.setReturnOffset(CodeOffset(masm.currentOffset())); SharedStub sharedStub(kind, entry, patchOffset); masm.propagateOOM(sharedStubs_.append(sharedStub)); // Fix up upon return. uint32_t callOffset = masm.currentOffset(); #ifdef JS_USE_LINK_REGISTER masm.freeStack(sizeof(intptr_t) * 2); #else masm.freeStack(sizeof(intptr_t)); #endif markSafepointAt(callOffset, lir); } void CodeGenerator::visitBinarySharedStub(LBinarySharedStub* lir) { JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc()); switch (jsop) { case JSOP_ADD: case JSOP_SUB: case JSOP_MUL: case JSOP_DIV: case JSOP_MOD: case JSOP_POW: emitSharedStub(ICStub::Kind::BinaryArith_Fallback, lir); break; case JSOP_LT: case JSOP_LE: case JSOP_GT: case JSOP_GE: case JSOP_EQ: case JSOP_NE: case JSOP_STRICTEQ: case JSOP_STRICTNE: emitSharedStub(ICStub::Kind::Compare_Fallback, lir); break; default: MOZ_CRASH("Unsupported jsop in shared stubs."); } } void CodeGenerator::visitUnarySharedStub(LUnarySharedStub* lir) { JSOp jsop = JSOp(*lir->mir()->resumePoint()->pc()); switch (jsop) { case JSOP_BITNOT: case JSOP_NEG: emitSharedStub(ICStub::Kind::UnaryArith_Fallback, lir); break; case JSOP_CALLPROP: case JSOP_GETPROP: case JSOP_LENGTH: emitSharedStub(ICStub::Kind::GetProp_Fallback, lir); break; default: MOZ_CRASH("Unsupported jsop in shared stubs."); } } void CodeGenerator::visitNullarySharedStub(LNullarySharedStub* lir) { jsbytecode* pc = lir->mir()->resumePoint()->pc(); JSOp jsop = JSOp(*pc); switch (jsop) { case JSOP_NEWARRAY: { uint32_t length = GET_UINT32(pc); MOZ_ASSERT(length <= INT32_MAX, "the bytecode emitter must fail to compile code that would " "produce JSOP_NEWARRAY with a length exceeding int32_t range"); // Pass length in R0. masm.move32(Imm32(AssertedCast(length)), R0.scratchReg()); emitSharedStub(ICStub::Kind::NewArray_Fallback, lir); break; } case JSOP_NEWOBJECT: emitSharedStub(ICStub::Kind::NewObject_Fallback, lir); break; case JSOP_NEWINIT: { JSProtoKey key = JSProtoKey(GET_UINT8(pc)); if (key == JSProto_Array) { masm.move32(Imm32(0), R0.scratchReg()); emitSharedStub(ICStub::Kind::NewArray_Fallback, lir); } else { emitSharedStub(ICStub::Kind::NewObject_Fallback, lir); } break; } default: MOZ_CRASH("Unsupported jsop in shared stubs."); } } typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject); static const VMFunction LambdaInfo = FunctionInfo(js::Lambda, "Lambda"); void CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton* lir) { pushArg(ToRegister(lir->environmentChain())); pushArg(ImmGCPtr(lir->mir()->info().fun)); callVM(LambdaInfo, lir); } void CodeGenerator::visitLambda(LLambda* lir) { Register envChain = ToRegister(lir->environmentChain()); Register output = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); const LambdaFunctionInfo& info = lir->mir()->info(); OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, ArgList(ImmGCPtr(info.fun), envChain), StoreRegisterTo(output)); MOZ_ASSERT(!info.singletonType); masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry()); emitLambdaInit(output, envChain, info); if (info.flags & JSFunction::EXTENDED) { MOZ_ASSERT(info.fun->allowSuperProperty() || info.fun->isSelfHostedBuiltin() || info.fun->isAsync()); static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized"); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(0))); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1))); } masm.bind(ool->rejoin()); } class OutOfLineLambdaArrow : public OutOfLineCodeBase { public: LLambdaArrow* lir; Label entryNoPop_; explicit OutOfLineLambdaArrow(LLambdaArrow* lir) : lir(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineLambdaArrow(this); } Label* entryNoPop() { return &entryNoPop_; } }; typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue); static const VMFunction LambdaArrowInfo = FunctionInfo(js::LambdaArrow, "LambdaArrow"); void CodeGenerator::visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool) { Register envChain = ToRegister(ool->lir->environmentChain()); ValueOperand newTarget = ToValue(ool->lir, LLambdaArrow::NewTargetValue); Register output = ToRegister(ool->lir->output()); const LambdaFunctionInfo& info = ool->lir->mir()->info(); // When we get here, we may need to restore part of the newTarget, // which has been conscripted into service as a temp register. masm.pop(newTarget.scratchReg()); masm.bind(ool->entryNoPop()); saveLive(ool->lir); pushArg(newTarget); pushArg(envChain); pushArg(ImmGCPtr(info.fun)); callVM(LambdaArrowInfo, ool->lir); StoreRegisterTo(output).generate(this); restoreLiveIgnore(ool->lir, StoreRegisterTo(output).clobbered()); masm.jump(ool->rejoin()); } void CodeGenerator::visitLambdaArrow(LLambdaArrow* lir) { Register envChain = ToRegister(lir->environmentChain()); ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue); Register output = ToRegister(lir->output()); const LambdaFunctionInfo& info = lir->mir()->info(); OutOfLineLambdaArrow* ool = new (alloc()) OutOfLineLambdaArrow(lir); addOutOfLineCode(ool, lir->mir()); MOZ_ASSERT(!info.useSingletonForClone); if (info.singletonType) { // If the function has a singleton type, this instruction will only be // executed once so we don't bother inlining it. masm.jump(ool->entryNoPop()); masm.bind(ool->rejoin()); return; } // There's not enough registers on x86 with the profiler enabled to request // a temp. Instead, spill part of one of the values, being prepared to // restore it if necessary on the out of line path. Register tempReg = newTarget.scratchReg(); masm.push(newTarget.scratchReg()); masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry()); masm.pop(newTarget.scratchReg()); emitLambdaInit(output, envChain, info); // Initialize extended slots. Lexical |this| is stored in the first one. MOZ_ASSERT(info.flags & JSFunction::EXTENDED); static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized"); static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0, "|new.target| must be stored in first slot"); masm.storeValue(newTarget, Address(output, FunctionExtended::offsetOfExtendedSlot(0))); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1))); masm.bind(ool->rejoin()); } void CodeGenerator::emitLambdaInit(Register output, Register envChain, const LambdaFunctionInfo& info) { // Initialize nargs and flags. We do this with a single uint32 to avoid // 16-bit writes. union { struct S { uint16_t nargs; uint16_t flags; } s; uint32_t word; } u; u.s.nargs = info.nargs; u.s.flags = info.flags; MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2); masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs())); masm.storePtr(ImmGCPtr(info.scriptOrLazyScript), Address(output, JSFunction::offsetOfNativeOrScript())); masm.storePtr(envChain, Address(output, JSFunction::offsetOfEnvironment())); masm.storePtr(ImmGCPtr(info.fun->displayAtom()), Address(output, JSFunction::offsetOfAtom())); } typedef bool (*SetFunNameFn)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind); static const VMFunction SetFunNameInfo = FunctionInfo(js::SetFunctionNameIfNoOwnName, "SetFunName"); void CodeGenerator::visitSetFunName(LSetFunName* lir) { pushArg(Imm32(lir->mir()->prefixKind())); pushArg(ToValue(lir, LSetFunName::NameValue)); pushArg(ToRegister(lir->fun())); callVM(SetFunNameInfo, lir); } void CodeGenerator::visitOsiPoint(LOsiPoint* lir) { // Note: markOsiPoint ensures enough space exists between the last // LOsiPoint and this one to patch adjacent call instructions. MOZ_ASSERT(masm.framePushed() == frameSize()); uint32_t osiCallPointOffset = markOsiPoint(lir); LSafepoint* safepoint = lir->associatedSafepoint(); MOZ_ASSERT(!safepoint->osiCallPointOffset()); safepoint->setOsiCallPointOffset(osiCallPointOffset); #ifdef DEBUG // There should be no movegroups or other instructions between // an instruction and its OsiPoint. This is necessary because // we use the OsiPoint's snapshot from within VM calls. for (LInstructionReverseIterator iter(current->rbegin(lir)); iter != current->rend(); iter++) { if (*iter == lir) continue; MOZ_ASSERT(!iter->isMoveGroup()); MOZ_ASSERT(iter->safepoint() == safepoint); break; } #endif #ifdef CHECK_OSIPOINT_REGISTERS if (shouldVerifyOsiPointRegs(safepoint)) verifyOsiPointRegs(safepoint); #endif } void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); } // Out-of-line path to execute any move groups between the start of a loop // header and its interrupt check, then invoke the interrupt handler. class OutOfLineInterruptCheckImplicit : public OutOfLineCodeBase { public: LBlock* block; LInterruptCheck* lir; OutOfLineInterruptCheckImplicit(LBlock* block, LInterruptCheck* lir) : block(block), lir(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineInterruptCheckImplicit(this); } }; typedef bool (*InterruptCheckFn)(JSContext*); static const VMFunction InterruptCheckInfo = FunctionInfo(InterruptCheck, "InterruptCheck"); void CodeGenerator::visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ool) { #ifdef CHECK_OSIPOINT_REGISTERS // This is path is entered from the patched back-edge of the loop. This // means that the JitAtivation flags used for checking the validity of the // OSI points are not reseted by the path generated by generateBody, so we // have to reset it here. resetOsiPointRegs(ool->lir->safepoint()); #endif LInstructionIterator iter = ool->block->begin(); for (; iter != ool->block->end(); iter++) { if (iter->isMoveGroup()) { // Replay this move group that preceds the interrupt check at the // start of the loop header. Any incoming jumps here will be from // the backedge and will skip over the move group emitted inline. visitMoveGroup(iter->toMoveGroup()); } else { break; } } MOZ_ASSERT(*iter == ool->lir); saveLive(ool->lir); callVM(InterruptCheckInfo, ool->lir); restoreLive(ool->lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitTableSwitch(LTableSwitch* ins) { MTableSwitch* mir = ins->mir(); Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); const LAllocation* temp; if (mir->getOperand(0)->type() != MIRType::Int32) { temp = ins->tempInt()->output(); // The input is a double, so try and convert it to an integer. // If it does not fit in an integer, take the default case. masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp), defaultcase, false); } else { temp = ins->index(); } emitTableSwitchDispatch(mir, ToRegister(temp), ToRegisterOrInvalid(ins->tempPointer())); } void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) { MTableSwitch* mir = ins->mir(); Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); Register index = ToRegister(ins->tempInt()); ValueOperand value = ToValue(ins, LTableSwitchV::InputValue); Register tag = masm.extractTag(value, index); masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase); Label unboxInt, isInt; masm.branchTestInt32(Assembler::Equal, tag, &unboxInt); { FloatRegister floatIndex = ToFloatRegister(ins->tempFloat()); masm.unboxDouble(value, floatIndex); masm.convertDoubleToInt32(floatIndex, index, defaultcase, false); masm.jump(&isInt); } masm.bind(&unboxInt); masm.unboxInt32(value, index); masm.bind(&isInt); emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer())); } typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind); static const VMFunction DeepCloneObjectLiteralInfo = FunctionInfo(DeepCloneObjectLiteral, "DeepCloneObjectLiteral"); void CodeGenerator::visitCloneLiteral(LCloneLiteral* lir) { pushArg(ImmWord(TenuredObject)); pushArg(ToRegister(lir->getObjectLiteral())); callVM(DeepCloneObjectLiteralInfo, lir); } void CodeGenerator::visitParameter(LParameter* lir) { } void CodeGenerator::visitCallee(LCallee* lir) { Register callee = ToRegister(lir->output()); Address ptr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken()); masm.loadFunctionFromCalleeToken(ptr, callee); } void CodeGenerator::visitIsConstructing(LIsConstructing* lir) { Register output = ToRegister(lir->output()); Address calleeToken(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken()); masm.loadPtr(calleeToken, output); // We must be inside a function. MOZ_ASSERT(current->mir()->info().script()->functionNonDelazifying()); // The low bit indicates whether this call is constructing, just clear the // other bits. static_assert(CalleeToken_Function == 0x0, "CalleeTokenTag value should match"); static_assert(CalleeToken_FunctionConstructing == 0x1, "CalleeTokenTag value should match"); masm.andPtr(Imm32(0x1), output); } void CodeGenerator::visitStart(LStart* lir) { } void CodeGenerator::visitReturn(LReturn* lir) { #if defined(JS_NUNBOX32) DebugOnly type = lir->getOperand(TYPE_INDEX); DebugOnly payload = lir->getOperand(PAYLOAD_INDEX); MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type); MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data); #elif defined(JS_PUNBOX64) DebugOnly result = lir->getOperand(0); MOZ_ASSERT(ToRegister(result) == JSReturnReg); #endif // Don't emit a jump to the return label if this is the last block. if (current->mir() != *gen->graph().poBegin()) masm.jump(&returnLabel_); } void CodeGenerator::visitOsrEntry(LOsrEntry* lir) { Register temp = ToRegister(lir->temp()); // Remember the OSR entry offset into the code buffer. masm.flushBuffer(); setOsrEntryOffset(masm.size()); #ifdef JS_TRACE_LOGGING emitTracelogStopEvent(TraceLogger_Baseline); emitTracelogStartEvent(TraceLogger_IonMonkey); #endif // If profiling, save the current frame pointer to a per-thread global field. if (isProfilerInstrumentationEnabled()) masm.profilerEnterFrame(masm.getStackPointer(), temp); // Allocate the full frame for this function // Note we have a new entry here. So we reset MacroAssembler::framePushed() // to 0, before reserving the stack. MOZ_ASSERT(masm.framePushed() == frameSize()); masm.setFramePushed(0); // Ensure that the Ion frames is properly aligned. masm.assertStackAlignment(JitStackAlignment, 0); masm.reserveStack(frameSize()); } void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) { const LAllocation* frame = lir->getOperand(0); const LDefinition* object = lir->getDef(0); const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfEnvironmentChain(); masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object)); } void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) { const LAllocation* frame = lir->getOperand(0); const LDefinition* object = lir->getDef(0); const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj(); masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object)); } void CodeGenerator::visitOsrValue(LOsrValue* value) { const LAllocation* frame = value->getOperand(0); const ValueOperand out = ToOutValue(value); const ptrdiff_t frameOffset = value->mir()->frameOffset(); masm.loadValue(Address(ToRegister(frame), frameOffset), out); } void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) { const LAllocation* frame = lir->getOperand(0); const ValueOperand out = ToOutValue(lir); Address flags = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags()); Address retval = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue()); masm.moveValue(UndefinedValue(), out); Label done; masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done); masm.loadValue(retval, out); masm.bind(&done); } void CodeGenerator::visitStackArgT(LStackArgT* lir) { const LAllocation* arg = lir->getArgument(); MIRType argType = lir->type(); uint32_t argslot = lir->argslot(); MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount()); int32_t stack_offset = StackOffsetOfPassedArg(argslot); Address dest(masm.getStackPointer(), stack_offset); if (arg->isFloatReg()) masm.storeDouble(ToFloatRegister(arg), dest); else if (arg->isRegister()) masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest); else masm.storeValue(arg->toConstant()->toJSValue(), dest); } void CodeGenerator::visitStackArgV(LStackArgV* lir) { ValueOperand val = ToValue(lir, 0); uint32_t argslot = lir->argslot(); MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount()); int32_t stack_offset = StackOffsetOfPassedArg(argslot); masm.storeValue(val, Address(masm.getStackPointer(), stack_offset)); } void CodeGenerator::visitMoveGroup(LMoveGroup* group) { if (!group->numMoves()) return; MoveResolver& resolver = masm.moveResolver(); for (size_t i = 0; i < group->numMoves(); i++) { const LMove& move = group->getMove(i); LAllocation from = move.from(); LAllocation to = move.to(); LDefinition::Type type = move.type(); // No bogus moves. MOZ_ASSERT(from != to); MOZ_ASSERT(!from.isConstant()); MoveOp::Type moveType; switch (type) { case LDefinition::OBJECT: case LDefinition::SLOTS: #ifdef JS_NUNBOX32 case LDefinition::TYPE: case LDefinition::PAYLOAD: #else case LDefinition::BOX: #endif case LDefinition::GENERAL: moveType = MoveOp::GENERAL; break; case LDefinition::INT32: moveType = MoveOp::INT32; break; case LDefinition::FLOAT32: moveType = MoveOp::FLOAT32; break; case LDefinition::DOUBLE: moveType = MoveOp::DOUBLE; break; case LDefinition::SIMD128INT: moveType = MoveOp::SIMD128INT; break; case LDefinition::SIMD128FLOAT: moveType = MoveOp::SIMD128FLOAT; break; default: MOZ_CRASH("Unexpected move type"); } masm.propagateOOM(resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType)); } masm.propagateOOM(resolver.resolve()); if (masm.oom()) return; MoveEmitter emitter(masm); #ifdef JS_CODEGEN_X86 if (group->maybeScratchRegister().isGeneralReg()) emitter.setScratchRegister(group->maybeScratchRegister().toGeneralReg()->reg()); else resolver.sortMemoryToMemoryMoves(); #endif emitter.emit(resolver); emitter.finish(); } void CodeGenerator::visitInteger(LInteger* lir) { masm.move32(Imm32(lir->getValue()), ToRegister(lir->output())); } void CodeGenerator::visitInteger64(LInteger64* lir) { masm.move64(Imm64(lir->getValue()), ToOutRegister64(lir)); } void CodeGenerator::visitPointer(LPointer* lir) { if (lir->kind() == LPointer::GC_THING) masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output())); else masm.movePtr(ImmPtr(lir->ptr()), ToRegister(lir->output())); } void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) { // No-op. } void CodeGenerator::visitSlots(LSlots* lir) { Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots()); masm.loadPtr(slots, ToRegister(lir->output())); } void CodeGenerator::visitLoadSlotT(LLoadSlotT* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); AnyRegister result = ToAnyRegister(lir->output()); masm.loadUnboxedValue(Address(base, offset), lir->mir()->type(), result); } void CodeGenerator::visitLoadSlotV(LLoadSlotV* lir) { ValueOperand dest = ToOutValue(lir); Register base = ToRegister(lir->input()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); masm.loadValue(Address(base, offset), dest); } void CodeGenerator::visitStoreSlotT(LStoreSlotT* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); Address dest(base, offset); if (lir->mir()->needsBarrier()) emitPreBarrier(dest); MIRType valueType = lir->mir()->value()->type(); if (valueType == MIRType::ObjectOrNull) { masm.storeObjectOrNull(ToRegister(lir->value()), dest); } else { ConstantOrRegister value; if (lir->value()->isConstant()) value = ConstantOrRegister(lir->value()->toConstant()->toJSValue()); else value = TypedOrValueRegister(valueType, ToAnyRegister(lir->value())); masm.storeUnboxedValue(value, valueType, dest, lir->mir()->slotType()); } } void CodeGenerator::visitStoreSlotV(LStoreSlotV* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(Value); const ValueOperand value = ToValue(lir, LStoreSlotV::Value); if (lir->mir()->needsBarrier()) emitPreBarrier(Address(base, offset)); masm.storeValue(value, Address(base, offset)); } static void GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard, Register obj, Register scratch, Label* miss) { if (guard.group) { masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, miss); } else { masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, miss); } } void CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch, const TypedOrValueRegister& output) { MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic(); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { ReceiverGuard receiver = mir->receiver(i); Label next; masm.comment("GuardReceiver"); GuardReceiver(masm, receiver, obj, scratch, &next); if (receiver.shape) { masm.comment("loadTypedOrValue"); Register target = obj; Shape* shape = mir->shape(i); if (shape->slot() < shape->numFixedSlots()) { // Fixed slot. masm.loadTypedOrValue(Address(target, NativeObject::getFixedSlotOffset(shape->slot())), output); } else { // Dynamic slot. uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value); masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch); masm.loadTypedOrValue(Address(scratch, offset), output); } } if (i == mir->numReceivers() - 1) { bailoutFrom(&next, ins->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins) { Register obj = ToRegister(ins->obj()); ValueOperand output = GetValueOutput(ins); emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output); } void CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins) { Register obj = ToRegister(ins->obj()); TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output())); Register temp = (output.type() == MIRType::Double) ? ToRegister(ins->temp()) : output.typedReg().gpr(); emitGetPropertyPolymorphic(ins, obj, temp, output); } template static void EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type) { if (type == JSVAL_TYPE_OBJECT) masm.patchableCallPreBarrier(address, MIRType::Object); else if (type == JSVAL_TYPE_STRING) masm.patchableCallPreBarrier(address, MIRType::String); } void CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch, const ConstantOrRegister& value) { MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic(); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { ReceiverGuard receiver = mir->receiver(i); Label next; GuardReceiver(masm, receiver, obj, scratch, &next); if (receiver.shape) { Register target = obj; Shape* shape = mir->shape(i); if (shape->slot() < shape->numFixedSlots()) { // Fixed slot. Address addr(target, NativeObject::getFixedSlotOffset(shape->slot())); if (mir->needsBarrier()) emitPreBarrier(addr); masm.storeConstantOrRegister(value, addr); } else { // Dynamic slot. masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch); Address addr(scratch, (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value)); if (mir->needsBarrier()) emitPreBarrier(addr); masm.storeConstantOrRegister(value, addr); } } if (i == mir->numReceivers() - 1) { bailoutFrom(&next, ins->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins) { Register obj = ToRegister(ins->obj()); Register temp = ToRegister(ins->temp()); ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value); emitSetPropertyPolymorphic(ins, obj, temp, TypedOrValueRegister(value)); } void CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins) { Register obj = ToRegister(ins->obj()); Register temp = ToRegister(ins->temp()); ConstantOrRegister value; if (ins->mir()->value()->isConstant()) value = ConstantOrRegister(ins->mir()->value()->toConstant()->toJSValue()); else value = TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value())); emitSetPropertyPolymorphic(ins, obj, temp, value); } void CodeGenerator::visitElements(LElements* lir) { Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements()); masm.loadPtr(elements, ToRegister(lir->output())); } typedef bool (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t); static const VMFunction ConvertElementsToDoublesInfo = FunctionInfo(ObjectElements::ConvertElementsToDoubles, "ObjectElements::ConvertElementsToDoubles"); void CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles* lir) { Register elements = ToRegister(lir->elements()); OutOfLineCode* ool = oolCallVM(ConvertElementsToDoublesInfo, lir, ArgList(elements), StoreNothing()); Address convertedAddress(elements, ObjectElements::offsetOfFlags()); Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS); masm.branchTest32(Assembler::Zero, convertedAddress, bit, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitMaybeToDoubleElement(LMaybeToDoubleElement* lir) { Register elements = ToRegister(lir->elements()); Register value = ToRegister(lir->value()); ValueOperand out = ToOutValue(lir); FloatRegister temp = ToFloatRegister(lir->tempFloat()); Label convert, done; // If the CONVERT_DOUBLE_ELEMENTS flag is set, convert the int32 // value to double. Else, just box it. masm.branchTest32(Assembler::NonZero, Address(elements, ObjectElements::offsetOfFlags()), Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS), &convert); masm.tagValue(JSVAL_TYPE_INT32, value, out); masm.jump(&done); masm.bind(&convert); masm.convertInt32ToDouble(value, temp); masm.boxDouble(temp, out); masm.bind(&done); } typedef bool (*CopyElementsForWriteFn)(ExclusiveContext*, NativeObject*); static const VMFunction CopyElementsForWriteInfo = FunctionInfo(NativeObject::CopyElementsForWrite, "NativeObject::CopyElementsForWrite"); void CodeGenerator::visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir) { Register object = ToRegister(lir->object()); Register temp = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(CopyElementsForWriteInfo, lir, ArgList(object), StoreNothing()); if (lir->mir()->checkNative()) { masm.loadObjClass(object, temp); masm.branchTest32(Assembler::NonZero, Address(temp, Class::offsetOfFlags()), Imm32(Class::NON_NATIVE), ool->rejoin()); } masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp); masm.branchTest32(Assembler::NonZero, Address(temp, ObjectElements::offsetOfFlags()), Imm32(ObjectElements::COPY_ON_WRITE), ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) { Address environment(ToRegister(lir->function()), JSFunction::offsetOfEnvironment()); masm.loadPtr(environment, ToRegister(lir->output())); } void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) { Register input = ToRegister(guard->input()); Register expected = ToRegister(guard->expected()); Assembler::Condition cond = guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; bailoutCmpPtr(cond, input, expected, guard->snapshot()); } void CodeGenerator::visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir) { const MGuardReceiverPolymorphic* mir = lir->mir(); Register obj = ToRegister(lir->object()); Register temp = ToRegister(lir->temp()); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { const ReceiverGuard& receiver = mir->receiver(i); Label next; GuardReceiver(masm, receiver, obj, temp, &next); if (i == mir->numReceivers() - 1) { bailoutFrom(&next, lir->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitTypeBarrierV(LTypeBarrierV* lir) { ValueOperand operand = ToValue(lir, LTypeBarrierV::Input); Register scratch = ToTempRegisterOrInvalid(lir->temp()); Label miss; masm.guardTypeSet(operand, lir->mir()->resultTypeSet(), lir->mir()->barrierKind(), scratch, &miss); bailoutFrom(&miss, lir->snapshot()); } void CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir) { Register obj = ToRegister(lir->object()); Register scratch = ToTempRegisterOrInvalid(lir->temp()); Label miss, ok; if (lir->mir()->type() == MIRType::ObjectOrNull) { masm.comment("Object or Null"); Label* nullTarget = lir->mir()->resultTypeSet()->mightBeMIRType(MIRType::Null) ? &ok : &miss; masm.branchTestPtr(Assembler::Zero, obj, obj, nullTarget); } else { MOZ_ASSERT(lir->mir()->type() == MIRType::Object); MOZ_ASSERT(lir->mir()->barrierKind() != BarrierKind::TypeTagOnly); } if (lir->mir()->barrierKind() != BarrierKind::TypeTagOnly) { masm.comment("Type tag only"); masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch, &miss); } bailoutFrom(&miss, lir->snapshot()); masm.bind(&ok); } void CodeGenerator::visitMonitorTypes(LMonitorTypes* lir) { ValueOperand operand = ToValue(lir, LMonitorTypes::Input); Register scratch = ToTempUnboxRegister(lir->temp()); Label matched, miss; masm.guardTypeSet(operand, lir->mir()->typeSet(), lir->mir()->barrierKind(), scratch, &miss); bailoutFrom(&miss, lir->snapshot()); } // Out-of-line path to update the store buffer. class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase { LInstruction* lir_; const LAllocation* object_; public: OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object) : lir_(lir), object_(object) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineCallPostWriteBarrier(this); } LInstruction* lir() const { return lir_; } const LAllocation* object() const { return object_; } }; static void EmitStoreBufferCheckForConstant(MacroAssembler& masm, JSObject* object, AllocatableGeneralRegisterSet& regs, Label* exit, Label* callVM) { Register temp = regs.takeAny(); const gc::TenuredCell* cell = &object->asTenured(); gc::Arena* arena = cell->arena(); Register cells = temp; masm.loadPtr(AbsoluteAddress(&arena->bufferedCells), cells); size_t index = gc::ArenaCellSet::getCellIndex(cell); size_t word; uint32_t mask; gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask); size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t); masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask), exit); // Check whether this is the sentinel set and if so call the VM to allocate // one for this arena. masm.branchPtr(Assembler::Equal, Address(cells, gc::ArenaCellSet::offsetOfArena()), ImmPtr(nullptr), callVM); // Add the cell to the set. masm.or32(Imm32(mask), Address(cells, offset)); masm.jump(exit); regs.add(temp); } static void EmitPostWriteBarrier(MacroAssembler& masm, Register objreg, JSObject* maybeConstant, bool isGlobal, AllocatableGeneralRegisterSet& regs) { MOZ_ASSERT_IF(isGlobal, maybeConstant); Label callVM; Label exit; // We already have a fast path to check whether a global is in the store // buffer. if (!isGlobal && maybeConstant) EmitStoreBufferCheckForConstant(masm, maybeConstant, regs, &exit, &callVM); // Call into the VM to barrier the write. masm.bind(&callVM); Register runtimereg = regs.takeAny(); masm.mov(ImmPtr(GetJitContext()->runtime), runtimereg); void (*fun)(JSRuntime*, JSObject*) = isGlobal ? PostGlobalWriteBarrier : PostWriteBarrier; masm.setupUnalignedABICall(regs.takeAny()); masm.passABIArg(runtimereg); masm.passABIArg(objreg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, fun)); masm.bind(&exit); } void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); Register objreg; JSObject* object = nullptr; bool isGlobal = false; if (obj->isConstant()) { object = &obj->toConstant()->toObject(); isGlobal = isGlobalObject(object); objreg = regs.takeAny(); masm.movePtr(ImmGCPtr(object), objreg); } else { objreg = ToRegister(obj); regs.takeUnchecked(objreg); } EmitPostWriteBarrier(masm, objreg, object, isGlobal, regs); } void CodeGenerator::emitPostWriteBarrier(Register objreg) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); regs.takeUnchecked(objreg); EmitPostWriteBarrier(masm, objreg, nullptr, false, regs); } void CodeGenerator::visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool) { saveLiveVolatile(ool->lir()); const LAllocation* obj = ool->object(); emitPostWriteBarrier(obj); restoreLiveVolatile(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal, OutOfLineCode* ool) { // Check whether an object is a global that we have already barriered before // calling into the VM. if (!maybeGlobal->isConstant()) return; JSObject* obj = &maybeGlobal->toConstant()->toObject(); if (!isGlobalObject(obj)) return; JSCompartment* comp = obj->compartment(); auto addr = AbsoluteAddress(&comp->globalWriteBarriered); masm.branch32(Assembler::NotEqual, addr, Imm32(0), ool->rejoin()); } template void CodeGenerator::visitPostWriteBarrierCommonO(LPostBarrierType* lir, OutOfLineCode* ool) { addOutOfLineCode(ool, lir->mir()); Register temp = ToTempRegisterOrInvalid(lir->temp()); if (lir->object()->isConstant()) { // Constant nursery objects cannot appear here, see LIRGenerator::visitPostWriteElementBarrier. MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject())); } else { masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp, ool->rejoin()); } maybeEmitGlobalBarrierCheck(lir->object(), ool); Register valueObj = ToRegister(lir->value()); masm.branchTestPtr(Assembler::Zero, valueObj, valueObj, ool->rejoin()); masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->value()), temp, ool->entry()); masm.bind(ool->rejoin()); } template void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir, OutOfLineCode* ool) { addOutOfLineCode(ool, lir->mir()); Register temp = ToTempRegisterOrInvalid(lir->temp()); if (lir->object()->isConstant()) { // Constant nursery objects cannot appear here, see LIRGenerator::visitPostWriteElementBarrier. MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject())); } else { masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp, ool->rejoin()); } maybeEmitGlobalBarrierCheck(lir->object(), ool); ValueOperand value = ToValue(lir, LPostBarrierType::Input); masm.branchValueIsNurseryObject(Assembler::Equal, value, temp, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object()); visitPostWriteBarrierCommonO(lir, ool); } void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object()); visitPostWriteBarrierCommonV(lir, ool); } // Out-of-line path to update the store buffer. class OutOfLineCallPostWriteElementBarrier : public OutOfLineCodeBase { LInstruction* lir_; const LAllocation* object_; const LAllocation* index_; public: OutOfLineCallPostWriteElementBarrier(LInstruction* lir, const LAllocation* object, const LAllocation* index) : lir_(lir), object_(object), index_(index) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineCallPostWriteElementBarrier(this); } LInstruction* lir() const { return lir_; } const LAllocation* object() const { return object_; } const LAllocation* index() const { return index_; } }; void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier* ool) { saveLiveVolatile(ool->lir()); const LAllocation* obj = ool->object(); const LAllocation* index = ool->index(); Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj); Register indexreg = ToRegister(index); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); regs.takeUnchecked(indexreg); if (obj->isConstant()) { objreg = regs.takeAny(); masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg); } else { regs.takeUnchecked(objreg); } Register runtimereg = regs.takeAny(); masm.setupUnalignedABICall(runtimereg); masm.mov(ImmPtr(GetJitContext()->runtime), runtimereg); masm.passABIArg(runtimereg); masm.passABIArg(objreg); masm.passABIArg(indexreg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteElementBarrier)); restoreLiveVolatile(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::visitPostWriteElementBarrierO(LPostWriteElementBarrierO* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index()); visitPostWriteBarrierCommonO(lir, ool); } void CodeGenerator::visitPostWriteElementBarrierV(LPostWriteElementBarrierV* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index()); visitPostWriteBarrierCommonV(lir, ool); } void CodeGenerator::visitCallNative(LCallNative* call) { WrappedFunction* target = call->getSingleTarget(); MOZ_ASSERT(target); MOZ_ASSERT(target->isNative()); int callargslot = call->argslot(); int unusedStack = StackOffsetOfPassedArg(callargslot); // Registers used for callWithABI() argument-passing. const Register argContextReg = ToRegister(call->getArgContextReg()); const Register argUintNReg = ToRegister(call->getArgUintNReg()); const Register argVpReg = ToRegister(call->getArgVpReg()); // Misc. temporary registers. const Register tempReg = ToRegister(call->getTempReg()); DebugOnly initialStack = masm.framePushed(); masm.checkStackAlignment(); // Native functions have the signature: // bool (*)(JSContext*, unsigned, Value* vp) // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward // are the function arguments. // Allocate space for the outparam, moving the StackPointer to what will be &vp[1]. masm.adjustStack(unusedStack); // Push a Value containing the callee object: natives are allowed to access their callee before // setitng the return value. The StackPointer is moved to &vp[0]. masm.Push(ObjectValue(*target->rawJSFunction())); // Preload arguments into registers. masm.loadJSContext(argContextReg); masm.move32(Imm32(call->numActualArgs()), argUintNReg); masm.moveStackPtrTo(argVpReg); masm.Push(argUintNReg); // Construct native exit frame. uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg); masm.enterFakeExitFrameForNative(call->mir()->isConstructing()); markSafepointAt(safepointOffset, call); emitTracelogStartEvent(TraceLogger_Call); // Construct and execute call. masm.setupUnalignedABICall(tempReg); masm.passABIArg(argContextReg); masm.passABIArg(argUintNReg); masm.passABIArg(argVpReg); JSNative native = target->native(); if (call->ignoresReturnValue()) { const JSJitInfo* jitInfo = target->jitInfo(); if (jitInfo && jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) native = jitInfo->ignoresReturnValueMethod; } masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, native)); emitTracelogStopEvent(TraceLogger_Call); // Test for failure. masm.branchIfFalseBool(ReturnReg, masm.failureLabel()); // Load the outparam vp[0] into output register(s). masm.loadValue(Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()), JSReturnOperand); // The next instruction is removing the footer of the exit frame, so there // is no need for leaveFakeExitFrame. // Move the StackPointer back to its original location, unwinding the native exit frame. masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack); MOZ_ASSERT(masm.framePushed() == initialStack); } static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv) { // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This // will be in the first slot but may be fixed or non-fixed. MOZ_ASSERT(obj != priv); // Check shape->numFixedSlots != 0. masm.loadPtr(Address(obj, ShapedObject::offsetOfShape()), priv); Label hasFixedSlots, done; masm.branchTest32(Assembler::NonZero, Address(priv, Shape::offsetOfSlotInfo()), Imm32(Shape::fixedSlotsMask()), &hasFixedSlots); masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), priv); masm.loadPrivate(Address(priv, 0), priv); masm.jump(&done); masm.bind(&hasFixedSlots); masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv); masm.bind(&done); } void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) { WrappedFunction* target = call->getSingleTarget(); MOZ_ASSERT(target); MOZ_ASSERT(target->isNative()); MOZ_ASSERT(target->jitInfo()); MOZ_ASSERT(call->mir()->isCallDOMNative()); int callargslot = call->argslot(); int unusedStack = StackOffsetOfPassedArg(callargslot); // Registers used for callWithABI() argument-passing. const Register argJSContext = ToRegister(call->getArgJSContext()); const Register argObj = ToRegister(call->getArgObj()); const Register argPrivate = ToRegister(call->getArgPrivate()); const Register argArgs = ToRegister(call->getArgArgs()); DebugOnly initialStack = masm.framePushed(); masm.checkStackAlignment(); // DOM methods have the signature: // bool (*)(JSContext*, HandleObject, void* private, const JSJitMethodCallArgs& args) // Where args is initialized from an argc and a vp, vp[0] is space for an // outparam and the callee, vp[1] is |this|, and vp[2] onward are the // function arguments. Note that args stores the argv, not the vp, and // argv == vp + 2. // Nestle the stack up against the pushed arguments, leaving StackPointer at // &vp[1] masm.adjustStack(unusedStack); // argObj is filled with the extracted object, then returned. Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj); MOZ_ASSERT(obj == argObj); // Push a Value containing the callee object: natives are allowed to access their callee before // setitng the return value. After this the StackPointer points to &vp[0]. masm.Push(ObjectValue(*target->rawJSFunction())); // Now compute the argv value. Since StackPointer is pointing to &vp[0] and // argv is &vp[2] we just need to add 2*sizeof(Value) to the current // StackPointer. JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgv == 0); JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgc == IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv); masm.computeEffectiveAddress(Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs); LoadDOMPrivate(masm, obj, argPrivate); // Push argc from the call instruction into what will become the IonExitFrame masm.Push(Imm32(call->numActualArgs())); // Push our argv onto the stack masm.Push(argArgs); // And store our JSJitMethodCallArgs* in argArgs. masm.moveStackPtrTo(argArgs); // Push |this| object for passing HandleObject. We push after argc to // maintain the same sp-relative location of the object pointer with other // DOMExitFrames. masm.Push(argObj); masm.moveStackPtrTo(argObj); // Construct native exit frame. uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext); masm.enterFakeExitFrame(IonDOMMethodExitFrameLayoutToken); markSafepointAt(safepointOffset, call); // Construct and execute call. masm.setupUnalignedABICall(argJSContext); masm.loadJSContext(argJSContext); masm.passABIArg(argJSContext); masm.passABIArg(argObj); masm.passABIArg(argPrivate); masm.passABIArg(argArgs); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->jitInfo()->method)); if (target->jitInfo()->isInfallible) { masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()), JSReturnOperand); } else { // Test for failure. masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel()); // Load the outparam vp[0] into output register(s). masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()), JSReturnOperand); } // The next instruction is removing the footer of the exit frame, so there // is no need for leaveFakeExitFrame. // Move the StackPointer back to its original location, unwinding the native exit frame. masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack); MOZ_ASSERT(masm.framePushed() == initialStack); } typedef bool (*GetIntrinsicValueFn)(JSContext* cx, HandlePropertyName, MutableHandleValue); static const VMFunction GetIntrinsicValueInfo = FunctionInfo(GetIntrinsicValue, "GetIntrinsicValue"); void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) { pushArg(ImmGCPtr(lir->mir()->name())); callVM(GetIntrinsicValueInfo, lir); } typedef bool (*InvokeFunctionFn)(JSContext*, HandleObject, bool, bool, uint32_t, Value*, MutableHandleValue); static const VMFunction InvokeFunctionInfo = FunctionInfo(InvokeFunction, "InvokeFunction"); void CodeGenerator::emitCallInvokeFunction(LInstruction* call, Register calleereg, bool constructing, bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) { // Nestle %esp up to the argument vector. // Each path must account for framePushed_ separately, for callVM to be valid. masm.freeStack(unusedStack); pushArg(masm.getStackPointer()); // argv. pushArg(Imm32(argc)); // argc. pushArg(Imm32(ignoresReturnValue)); pushArg(Imm32(constructing)); // constructing. pushArg(calleereg); // JSFunction*. callVM(InvokeFunctionInfo, call); // Un-nestle %esp from the argument vector. No prefix was pushed. masm.reserveStack(unusedStack); } void CodeGenerator::visitCallGeneric(LCallGeneric* call) { Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); Register nargsreg = ToRegister(call->getNargsReg()); uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); Label invoke, thunk, makeCall, end; // Known-target case is handled by LCallKnown. MOZ_ASSERT(!call->hasSingleTarget()); // Generate an ArgumentsRectifier. JitCode* argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(); masm.checkStackAlignment(); // Guard that calleereg is actually a function object. masm.loadObjClass(calleereg, nargsreg); masm.branchPtr(Assembler::NotEqual, nargsreg, ImmPtr(&JSFunction::class_), &invoke); // Guard that calleereg is an interpreted function with a JSScript. // If we are constructing, also ensure the callee is a constructor. if (call->mir()->isConstructing()) { masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke); } else { masm.branchIfFunctionHasNoScript(calleereg, &invoke); masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, calleereg, objreg, &invoke); } // Knowing that calleereg is a non-native function, load the JSScript. masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); // Load script jitcode. masm.loadBaselineOrIonRaw(objreg, objreg, &invoke); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(Imm32(call->numActualArgs())); masm.PushCalleeToken(calleereg, call->mir()->isConstructing()); masm.Push(Imm32(descriptor)); // Check whether the provided arguments satisfy target argc. // We cannot have lowered to LCallGeneric with a known target. Assert that we didn't // add any undefineds in IonBuilder. NB: MCall::numStackArgs includes |this|. DebugOnly numNonArgsOnStack = 1 + call->isConstructing(); MOZ_ASSERT(call->numActualArgs() == call->mir()->numStackArgs() - numNonArgsOnStack); masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nargsreg); masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()), &thunk); masm.jump(&makeCall); // Argument fixed needed. Load the ArgumentsRectifier. masm.bind(&thunk); { MOZ_ASSERT(ArgumentsRectifierReg != objreg); masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. masm.loadPtr(Address(objreg, JitCode::offsetOfCode()), objreg); masm.move32(Imm32(call->numActualArgs()), ArgumentsRectifierReg); } // Finally call the function in objreg. masm.bind(&makeCall); uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, call); // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage - unusedStack); masm.jump(&end); // Handle uncompiled or native functions. masm.bind(&invoke); emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); masm.bind(&end); // If the return value of the constructing function is Primitive, // replace the return value with the Object from CreateThis. if (call->mir()->isConstructing()) { Label notPrimitive; masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand); masm.bind(¬Primitive); } } typedef bool (*InvokeFunctionShuffleFn)(JSContext*, HandleObject, uint32_t, uint32_t, Value*, MutableHandleValue); static const VMFunction InvokeFunctionShuffleInfo = FunctionInfo(InvokeFunctionShuffleNewTarget, "InvokeFunctionShuffleNewTarget"); void CodeGenerator::emitCallInvokeFunctionShuffleNewTarget(LCallKnown* call, Register calleeReg, uint32_t numFormals, uint32_t unusedStack) { masm.freeStack(unusedStack); pushArg(masm.getStackPointer()); pushArg(Imm32(numFormals)); pushArg(Imm32(call->numActualArgs())); pushArg(calleeReg); callVM(InvokeFunctionShuffleInfo, call); masm.reserveStack(unusedStack); } void CodeGenerator::visitCallKnown(LCallKnown* call) { Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); WrappedFunction* target = call->getSingleTarget(); Label end, uncompiled; // Native single targets are handled by LCallNative. MOZ_ASSERT(!target->isNative()); // Missing arguments must have been explicitly appended by the IonBuilder. DebugOnly numNonArgsOnStack = 1 + call->isConstructing(); MOZ_ASSERT(target->nargs() <= call->mir()->numStackArgs() - numNonArgsOnStack); MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor()); masm.checkStackAlignment(); if (target->isClassConstructor() && !call->isConstructing()) { emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); return; } MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing()); // The calleereg is known to be a non-native function, but might point to // a LazyScript instead of a JSScript. masm.branchIfFunctionHasNoScript(calleereg, &uncompiled); // Knowing that calleereg is a non-native function, load the JSScript. masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); // Load script jitcode. if (call->mir()->needsArgCheck()) masm.loadBaselineOrIonRaw(objreg, objreg, &uncompiled); else masm.loadBaselineOrIonNoArgCheck(objreg, objreg, &uncompiled); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(Imm32(call->numActualArgs())); masm.PushCalleeToken(calleereg, call->mir()->isConstructing()); masm.Push(Imm32(descriptor)); // Finally call the function in objreg. uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, call); // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage - unusedStack); masm.jump(&end); // Handle uncompiled functions. masm.bind(&uncompiled); if (call->isConstructing() && target->nargs() > call->numActualArgs()) emitCallInvokeFunctionShuffleNewTarget(call, calleereg, target->nargs(), unusedStack); else emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); masm.bind(&end); // If the return value of the constructing function is Primitive, // replace the return value with the Object from CreateThis. if (call->mir()->isConstructing()) { Label notPrimitive; masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand); masm.bind(¬Primitive); } } template void CodeGenerator::emitCallInvokeFunction(T* apply, Register extraStackSize) { Register objreg = ToRegister(apply->getTempObject()); MOZ_ASSERT(objreg != extraStackSize); // Push the space used by the arguments. masm.moveStackPtrTo(objreg); masm.Push(extraStackSize); pushArg(objreg); // argv. pushArg(ToRegister(apply->getArgc())); // argc. pushArg(Imm32(false)); // ignoresReturnValue. pushArg(Imm32(false)); // isConstrucing. pushArg(ToRegister(apply->getFunction())); // JSFunction*. // This specialization og callVM restore the extraStackSize after the call. callVM(InvokeFunctionInfo, apply, &extraStackSize); masm.Pop(extraStackSize); } // Do not bailout after the execution of this function since the stack no longer // correspond to what is expected by the snapshots. void CodeGenerator::emitAllocateSpaceForApply(Register argcreg, Register extraStackSpace, Label* end) { // Initialize the loop counter AND Compute the stack usage (if == 0) masm.movePtr(argcreg, extraStackSpace); // Align the JitFrameLayout on the JitStackAlignment. if (JitStackValueAlignment > 1) { MOZ_ASSERT(frameSize() % JitStackAlignment == 0, "Stack padding assumes that the frameSize is correct"); MOZ_ASSERT(JitStackValueAlignment == 2); Label noPaddingNeeded; // if the number of arguments is odd, then we do not need any padding. masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded); masm.addPtr(Imm32(1), extraStackSpace); masm.bind(&noPaddingNeeded); } // Reserve space for copying the arguments. NativeObject::elementsSizeMustNotOverflow(); masm.lshiftPtr(Imm32(ValueShift), extraStackSpace); masm.subFromStackPtr(extraStackSpace); #ifdef DEBUG // Put a magic value in the space reserved for padding. Note, this code // cannot be merged with the previous test, as not all architectures can // write below their stack pointers. if (JitStackValueAlignment > 1) { MOZ_ASSERT(JitStackValueAlignment == 2); Label noPaddingNeeded; // if the number of arguments is odd, then we do not need any padding. masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded); BaseValueIndex dstPtr(masm.getStackPointer(), argcreg); masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr); masm.bind(&noPaddingNeeded); } #endif // Skip the copy of arguments if there are none. masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, end); } // Destroys argvIndex and copyreg. void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase, Register argvIndex, Register copyreg, size_t argvSrcOffset, size_t argvDstOffset) { Label loop; masm.bind(&loop); // As argvIndex is off by 1, and we use the decBranchPtr instruction // to loop back, we have to substract the size of the word which are // copied. BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void*)); BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex, argvDstOffset - sizeof(void*)); masm.loadPtr(srcPtr, copyreg); masm.storePtr(copyreg, dstPtr); // Handle 32 bits architectures. if (sizeof(Value) == 2 * sizeof(void*)) { BaseValueIndex srcPtrLow(argvSrcBase, argvIndex, argvSrcOffset - 2 * sizeof(void*)); BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex, argvDstOffset - 2 * sizeof(void*)); masm.loadPtr(srcPtrLow, copyreg); masm.storePtr(copyreg, dstPtrLow); } masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop); } void CodeGenerator::emitPopArguments(Register extraStackSpace) { // Pop |this| and Arguments. masm.freeStack(extraStackSpace); } void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply, Register extraStackSpace) { // Holds the function nargs. Initially the number of args to the caller. Register argcreg = ToRegister(apply->getArgc()); Register copyreg = ToRegister(apply->getTempObject()); Label end; emitAllocateSpaceForApply(argcreg, extraStackSpace, &end); // We are making a copy of the arguments which are above the JitFrameLayout // of the current Ion frame. // // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst // Compute the source and destination offsets into the stack. size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs(); size_t argvDstOffset = 0; // Save the extra stack space, and re-use the register as a base. masm.push(extraStackSpace); Register argvSrcBase = extraStackSpace; argvSrcOffset += sizeof(void*); argvDstOffset += sizeof(void*); // Save the actual number of register, and re-use the register as an index register. masm.push(argcreg); Register argvIndex = argcreg; argvSrcOffset += sizeof(void*); argvDstOffset += sizeof(void*); // srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset // dstPtr = (StackPointer ) + argvDstOffset masm.addStackPtrTo(argvSrcBase); // Copy arguments. emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset, argvDstOffset); // Restore argcreg and the extra stack space counter. masm.pop(argcreg); masm.pop(extraStackSpace); // Join with all arguments copied and the extra stack usage computed. masm.bind(&end); // Push |this|. masm.addPtr(Imm32(sizeof(Value)), extraStackSpace); masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex)); } void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply, Register extraStackSpace) { Label noCopy, epilogue; Register tmpArgc = ToRegister(apply->getTempObject()); Register elementsAndArgc = ToRegister(apply->getElements()); // Invariants guarded in the caller: // - the array is not too long // - the array length equals its initialized length // The array length is our argc for the purposes of allocating space. Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength()); masm.load32(length, tmpArgc); // Allocate space for the values. emitAllocateSpaceForApply(tmpArgc, extraStackSpace, &noCopy); // Copy the values. This code is skipped entirely if there are // no values. size_t argvDstOffset = 0; Register argvSrcBase = elementsAndArgc; // Elements value masm.push(extraStackSpace); Register copyreg = extraStackSpace; argvDstOffset += sizeof(void*); masm.push(tmpArgc); Register argvIndex = tmpArgc; argvDstOffset += sizeof(void*); // Copy emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, 0, argvDstOffset); // Restore. masm.pop(elementsAndArgc); masm.pop(extraStackSpace); masm.jump(&epilogue); // Clear argc if we skipped the copy step. masm.bind(&noCopy); masm.movePtr(ImmPtr(0), elementsAndArgc); // Join with all arguments copied and the extra stack usage computed. // Note, "elements" has become "argc". masm.bind(&epilogue); // Push |this|. masm.addPtr(Imm32(sizeof(Value)), extraStackSpace); masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex)); } template void CodeGenerator::emitApplyGeneric(T* apply) { // Holds the function object. Register calleereg = ToRegister(apply->getFunction()); // Temporary register for modifying the function object. Register objreg = ToRegister(apply->getTempObject()); Register extraStackSpace = ToRegister(apply->getTempStackCounter()); // Holds the function nargs, computed in the invoker or (for // ApplyArray) in the argument pusher. Register argcreg = ToRegister(apply->getArgc()); // Unless already known, guard that calleereg is actually a function object. if (!apply->hasSingleTarget()) { masm.loadObjClass(calleereg, objreg); ImmPtr ptr = ImmPtr(&JSFunction::class_); bailoutCmpPtr(Assembler::NotEqual, objreg, ptr, apply->snapshot()); } // Copy the arguments of the current function. // // In the case of ApplyArray, also compute argc: the argc register // and the elements register are the same; argc must not be // referenced before the call to emitPushArguments() and elements // must not be referenced after it returns. // // objreg is dead across this call. // // extraStackSpace is garbage on entry and defined on exit. emitPushArguments(apply, extraStackSpace); masm.checkStackAlignment(); // If the function is native, only emit the call to InvokeFunction. if (apply->hasSingleTarget() && apply->getSingleTarget()->isNative()) { emitCallInvokeFunction(apply, extraStackSpace); emitPopArguments(extraStackSpace); return; } Label end, invoke; // Guard that calleereg is an interpreted function with a JSScript. masm.branchIfFunctionHasNoScript(calleereg, &invoke); // Knowing that calleereg is a non-native function, load the JSScript. masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); // Load script jitcode. masm.loadBaselineOrIonRaw(objreg, objreg, &invoke); // Call with an Ion frame or a rectifier frame. { // Create the frame descriptor. unsigned pushed = masm.framePushed(); Register stackSpace = extraStackSpace; masm.addPtr(Imm32(pushed), stackSpace); masm.makeFrameDescriptor(stackSpace, JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(argcreg); masm.Push(calleereg); masm.Push(stackSpace); // descriptor Label underflow, rejoin; // Check whether the provided arguments satisfy target argc. if (!apply->hasSingleTarget()) { Register nformals = extraStackSpace; masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nformals); masm.branch32(Assembler::Below, argcreg, nformals, &underflow); } else { masm.branch32(Assembler::Below, argcreg, Imm32(apply->getSingleTarget()->nargs()), &underflow); } // Skip the construction of the rectifier frame because we have no // underflow. masm.jump(&rejoin); // Argument fixup needed. Get ready to call the argumentsRectifier. { masm.bind(&underflow); // Hardcode the address of the argumentsRectifier code. JitCode* argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(); MOZ_ASSERT(ArgumentsRectifierReg != objreg); masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. masm.loadPtr(Address(objreg, JitCode::offsetOfCode()), objreg); masm.movePtr(argcreg, ArgumentsRectifierReg); } masm.bind(&rejoin); // Finally call the function in objreg, as assigned by one of the paths above. uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, apply); // Recover the number of arguments from the frame descriptor. masm.loadPtr(Address(masm.getStackPointer(), 0), stackSpace); masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), stackSpace); masm.subPtr(Imm32(pushed), stackSpace); // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage); masm.jump(&end); } // Handle uncompiled or native functions. { masm.bind(&invoke); emitCallInvokeFunction(apply, extraStackSpace); } // Pop arguments and continue. masm.bind(&end); emitPopArguments(extraStackSpace); } void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) { // Limit the number of parameters we can handle to a number that does not risk // us allocating too much stack, notably on Windows where there is a 4K guard page // that has to be touched to extend the stack. See bug 1351278. The value "3000" // is the size of the guard page minus an arbitrary, but large, safety margin. LSnapshot* snapshot = apply->snapshot(); Register argcreg = ToRegister(apply->getArgc()); uint32_t limit = 3000 / sizeof(Value); bailoutCmp32(Assembler::Above, argcreg, Imm32(limit), snapshot); emitApplyGeneric(apply); } void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) { LSnapshot* snapshot = apply->snapshot(); Register tmp = ToRegister(apply->getTempObject()); Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength()); masm.load32(length, tmp); // See comment in visitApplyArgsGeneric, above. uint32_t limit = 3000 / sizeof(Value); bailoutCmp32(Assembler::Above, tmp, Imm32(limit), snapshot); // Ensure that the array does not contain an uninitialized tail. Address initializedLength(ToRegister(apply->getElements()), ObjectElements::offsetOfInitializedLength()); masm.sub32(initializedLength, tmp); bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot); emitApplyGeneric(apply); } void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); } void CodeGenerator::visitUnreachable(LUnreachable* lir) { masm.assumeUnreachable("end-of-block assumed unreachable"); } void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) { encode(lir->snapshot()); } void CodeGenerator::visitGetDynamicName(LGetDynamicName* lir) { Register envChain = ToRegister(lir->getEnvironmentChain()); Register name = ToRegister(lir->getName()); Register temp1 = ToRegister(lir->temp1()); Register temp2 = ToRegister(lir->temp2()); Register temp3 = ToRegister(lir->temp3()); masm.loadJSContext(temp3); /* Make space for the outparam. */ masm.adjustStack(-int32_t(sizeof(Value))); masm.moveStackPtrTo(temp2); masm.setupUnalignedABICall(temp1); masm.passABIArg(temp3); masm.passABIArg(envChain); masm.passABIArg(name); masm.passABIArg(temp2); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GetDynamicName)); const ValueOperand out = ToOutValue(lir); masm.loadValue(Address(masm.getStackPointer(), 0), out); masm.adjustStack(sizeof(Value)); Label undefined; masm.branchTestUndefined(Assembler::Equal, out, &undefined); bailoutFrom(&undefined, lir->snapshot()); } typedef bool (*DirectEvalSFn)(JSContext*, HandleObject, HandleScript, HandleValue, HandleString, jsbytecode*, MutableHandleValue); static const VMFunction DirectEvalStringInfo = FunctionInfo(DirectEvalStringFromIon, "DirectEvalStringFromIon"); void CodeGenerator::visitCallDirectEval(LCallDirectEval* lir) { Register envChain = ToRegister(lir->getEnvironmentChain()); Register string = ToRegister(lir->getString()); pushArg(ImmPtr(lir->mir()->pc())); pushArg(string); pushArg(ToValue(lir, LCallDirectEval::NewTarget)); pushArg(ImmGCPtr(current->mir()->info().script())); pushArg(envChain); callVM(DirectEvalStringInfo, lir); } void CodeGenerator::generateArgumentsChecks(bool bailout) { // Registers safe for use before generatePrologue(). static const uint32_t EntryTempMask = Registers::TempMask & ~(1 << OsrFrameReg.code()); // This function can be used the normal way to check the argument types, // before entering the function and bailout when arguments don't match. // For debug purpose, this is can also be used to force/check that the // arguments are correct. Upon fail it will hit a breakpoint. MIRGraph& mir = gen->graph(); MResumePoint* rp = mir.entryResumePoint(); // No registers are allocated yet, so it's safe to grab anything. Register temp = GeneralRegisterSet(EntryTempMask).getAny(); const CompileInfo& info = gen->info(); Label miss; for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) { // All initial parameters are guaranteed to be MParameters. MParameter* param = rp->getOperand(i)->toParameter(); const TypeSet* types = param->resultTypeSet(); if (!types || types->unknown()) continue; // Calculate the offset on the stack of the argument. // (i - info.startArgSlot()) - Compute index of arg within arg vector. // ... * sizeof(Value) - Scale by value size. // ArgToStackOffset(...) - Compute displacement within arg vector. int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value)); masm.guardTypeSet(Address(masm.getStackPointer(), offset), types, BarrierKind::TypeSet, temp, &miss); } if (miss.used()) { if (bailout) { bailoutFrom(&miss, graph.entrySnapshot()); } else { Label success; masm.jump(&success); masm.bind(&miss); // Check for cases where the type set guard might have missed due to // changing object groups. for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) { MParameter* param = rp->getOperand(i)->toParameter(); const TemporaryTypeSet* types = param->resultTypeSet(); if (!types || types->unknown()) continue; Label skip; Address addr(masm.getStackPointer(), ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value))); masm.branchTestObject(Assembler::NotEqual, addr, &skip); Register obj = masm.extractObject(addr, temp); masm.guardTypeSetMightBeIncomplete(types, obj, temp, &success); masm.bind(&skip); } masm.assumeUnreachable("Argument check fail."); masm.bind(&success); } } } // Out-of-line path to report over-recursed error and fail. class CheckOverRecursedFailure : public OutOfLineCodeBase { LInstruction* lir_; public: explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitCheckOverRecursedFailure(this); } LInstruction* lir() const { return lir_; } }; void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) { // If we don't push anything on the stack, skip the check. if (omitOverRecursedCheck()) return; // Ensure that this frame will not cross the stack limit. // This is a weak check, justified by Ion using the C stack: we must always // be some distance away from the actual limit, since if the limit is // crossed, an error must be thrown, which requires more frames. // // It must always be possible to trespass past the stack limit. // Ion may legally place frames very close to the limit. Calling additional // C functions may then violate the limit without any checking. // Since Ion frames exist on the C stack, the stack limit may be // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota(). const void* limitAddr = GetJitContext()->runtime->addressOfJitStackLimit(); CheckOverRecursedFailure* ool = new(alloc()) CheckOverRecursedFailure(lir); addOutOfLineCode(ool, lir->mir()); // Conditional forward (unlikely) branch to failure. masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr), ool->entry()); masm.bind(ool->rejoin()); } typedef bool (*DefVarFn)(JSContext*, HandlePropertyName, unsigned, HandleObject); static const VMFunction DefVarInfo = FunctionInfo(DefVar, "DefVar"); void CodeGenerator::visitDefVar(LDefVar* lir) { Register envChain = ToRegister(lir->environmentChain()); pushArg(envChain); // JSObject* pushArg(Imm32(lir->mir()->attrs())); // unsigned pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName* callVM(DefVarInfo, lir); } typedef bool (*DefLexicalFn)(JSContext*, HandlePropertyName, unsigned); static const VMFunction DefLexicalInfo = FunctionInfo(DefGlobalLexical, "DefGlobalLexical"); void CodeGenerator::visitDefLexical(LDefLexical* lir) { pushArg(Imm32(lir->mir()->attrs())); // unsigned pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName* callVM(DefLexicalInfo, lir); } typedef bool (*DefFunOperationFn)(JSContext*, HandleScript, HandleObject, HandleFunction); static const VMFunction DefFunOperationInfo = FunctionInfo(DefFunOperation, "DefFunOperation"); void CodeGenerator::visitDefFun(LDefFun* lir) { Register envChain = ToRegister(lir->environmentChain()); Register fun = ToRegister(lir->fun()); pushArg(fun); pushArg(envChain); pushArg(ImmGCPtr(current->mir()->info().script())); callVM(DefFunOperationInfo, lir); } typedef bool (*CheckOverRecursedFn)(JSContext*); static const VMFunction CheckOverRecursedInfo = FunctionInfo(CheckOverRecursed, "CheckOverRecursed"); void CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool) { // The OOL path is hit if the recursion depth has been exceeded. // Throw an InternalError for over-recursion. // LFunctionEnvironment can appear before LCheckOverRecursed, so we have // to save all live registers to avoid crashes if CheckOverRecursed triggers // a GC. saveLive(ool->lir()); callVM(CheckOverRecursedInfo, ool->lir()); restoreLive(ool->lir()); masm.jump(ool->rejoin()); } IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() { // If scripts are being profiled, create a new IonScriptCounts for the // profiling data, which will be attached to the associated JSScript or // wasm module after code generation finishes. if (!GetJitContext()->hasProfilingScripts()) return nullptr; // This test inhibits IonScriptCount creation for wasm code which is // currently incompatible with wasm codegen for two reasons: (1) wasm code // must be serializable and script count codegen bakes in absolute // addresses, (2) wasm code does not have a JSScript with which to associate // code coverage data. JSScript* script = gen->info().script(); if (!script) return nullptr; UniquePtr counts(js_new()); if (!counts || !counts->init(graph.numBlocks())) return nullptr; for (size_t i = 0; i < graph.numBlocks(); i++) { MBasicBlock* block = graph.getBlock(i)->mir(); uint32_t offset = 0; char* description = nullptr; if (MResumePoint* resume = block->entryResumePoint()) { // Find a PC offset in the outermost script to use. If this // block is from an inlined script, find a location in the // outer script to associate information about the inlining // with. while (resume->caller()) resume = resume->caller(); offset = script->pcToOffset(resume->pc()); if (block->entryResumePoint()->caller()) { // Get the filename and line number of the inner script. JSScript* innerScript = block->info().script(); description = (char*) js_calloc(200); if (description) { snprintf(description, 200, "%s:%" PRIuSIZE, innerScript->filename(), innerScript->lineno()); } } } if (!counts->block(i).init(block->id(), offset, description, block->numSuccessors())) return nullptr; for (size_t j = 0; j < block->numSuccessors(); j++) counts->block(i).setSuccessor(j, skipTrivialBlocks(block->getSuccessor(j))->id()); } scriptCounts_ = counts.release(); return scriptCounts_; } // Structure for managing the state tracked for a block by script counters. struct ScriptCountBlockState { IonBlockCounts& block; MacroAssembler& masm; Sprinter printer; public: ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm) : block(*block), masm(*masm), printer(GetJitContext()->cx, false) { } bool init() { if (!printer.init()) return false; // Bump the hit count for the block at the start. This code is not // included in either the text for the block or the instruction byte // counts. masm.inc64(AbsoluteAddress(block.addressOfHitCount())); // Collect human readable assembly for the code generated in the block. masm.setPrinter(&printer); return true; } void visitInstruction(LInstruction* ins) { // Prefix stream of assembly instructions with their LIR instruction // name and any associated high level info. if (const char* extra = ins->extraName()) printer.printf("[%s:%s]\n", ins->opName(), extra); else printer.printf("[%s]\n", ins->opName()); } ~ScriptCountBlockState() { masm.setPrinter(nullptr); if (!printer.hadOutOfMemory()) block.setCode(printer.string()); } }; void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) { CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp); masm.propagateOOM(ionScriptLabels_.append(label)); // If IonScript::invalidationCount_ != 0, the script has been invalidated. masm.branch32(Assembler::NotEqual, Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0), invalidated); } void CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset) { MOZ_ASSERT(type == MIRType::Object || type == MIRType::ObjectOrNull || type == MIRType::String || type == MIRType::Symbol); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); Register temp = regs.takeAny(); masm.push(temp); // Don't check if the script has been invalidated. In that case invalid // types are expected (until we reach the OsiPoint and bailout). Label done; branchIfInvalidated(temp, &done); if ((type == MIRType::Object || type == MIRType::ObjectOrNull) && typeset && !typeset->unknownObject()) { // We have a result TypeSet, assert this object is in it. Label miss, ok; if (type == MIRType::ObjectOrNull) masm.branchPtr(Assembler::Equal, input, ImmWord(0), &ok); if (typeset->getObjectCount() > 0) masm.guardObjectType(input, typeset, temp, &miss); else masm.jump(&miss); masm.jump(&ok); masm.bind(&miss); masm.guardTypeSetMightBeIncomplete(typeset, input, temp, &ok); masm.assumeUnreachable("MIR instruction returned object with unexpected type"); masm.bind(&ok); } // Check that we have a valid GC pointer. saveVolatile(); masm.setupUnalignedABICall(temp); masm.loadJSContext(temp); masm.passABIArg(temp); masm.passABIArg(input); void* callee; switch (type) { case MIRType::Object: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectPtr); break; case MIRType::ObjectOrNull: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectOrNullPtr); break; case MIRType::String: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidStringPtr); break; case MIRType::Symbol: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidSymbolPtr); break; default: MOZ_CRASH(); } masm.callWithABI(callee); restoreVolatile(); masm.bind(&done); masm.pop(temp); } void CodeGenerator::emitAssertResultV(const ValueOperand input, const TemporaryTypeSet* typeset) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); masm.push(temp1); masm.push(temp2); // Don't check if the script has been invalidated. In that case invalid // types are expected (until we reach the OsiPoint and bailout). Label done; branchIfInvalidated(temp1, &done); if (typeset && !typeset->unknown()) { // We have a result TypeSet, assert this value is in it. Label miss, ok; masm.guardTypeSet(input, typeset, BarrierKind::TypeSet, temp1, &miss); masm.jump(&ok); masm.bind(&miss); // Check for cases where the type set guard might have missed due to // changing object groups. Label realMiss; masm.branchTestObject(Assembler::NotEqual, input, &realMiss); Register payload = masm.extractObject(input, temp1); masm.guardTypeSetMightBeIncomplete(typeset, payload, temp1, &ok); masm.bind(&realMiss); masm.assumeUnreachable("MIR instruction returned value with unexpected type"); masm.bind(&ok); } // Check that we have a valid GC pointer. saveVolatile(); masm.pushValue(input); masm.moveStackPtrTo(temp1); masm.setupUnalignedABICall(temp2); masm.loadJSContext(temp2); masm.passABIArg(temp2); masm.passABIArg(temp1); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssertValidValue)); masm.popValue(input); restoreVolatile(); masm.bind(&done); masm.pop(temp2); masm.pop(temp1); } #ifdef DEBUG void CodeGenerator::emitObjectOrStringResultChecks(LInstruction* lir, MDefinition* mir) { if (lir->numDefs() == 0) return; MOZ_ASSERT(lir->numDefs() == 1); Register output = ToRegister(lir->getDef(0)); emitAssertObjectOrStringResult(output, mir->type(), mir->resultTypeSet()); } void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) { if (lir->numDefs() == 0) return; MOZ_ASSERT(lir->numDefs() == BOX_PIECES); if (!lir->getDef(0)->output()->isRegister()) return; ValueOperand output = ToOutValue(lir); emitAssertResultV(output, mir->resultTypeSet()); } void CodeGenerator::emitDebugResultChecks(LInstruction* ins) { // In debug builds, check that LIR instructions return valid values. MDefinition* mir = ins->mirRaw(); if (!mir) return; switch (mir->type()) { case MIRType::Object: case MIRType::ObjectOrNull: case MIRType::String: case MIRType::Symbol: emitObjectOrStringResultChecks(ins, mir); break; case MIRType::Value: emitValueResultChecks(ins, mir); break; default: break; } } void CodeGenerator::emitDebugForceBailing(LInstruction* lir) { if (!lir->snapshot()) return; if (lir->isStart()) return; if (lir->isOsiPoint()) return; masm.comment("emitDebugForceBailing"); const void* bailAfterAddr = GetJitContext()->runtime->addressOfIonBailAfter(); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); Label done, notBail, bail; masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterAddr), Imm32(0), &done); { Register temp = regs.takeAny(); masm.push(temp); masm.load32(AbsoluteAddress(bailAfterAddr), temp); masm.sub32(Imm32(1), temp); masm.store32(temp, AbsoluteAddress(bailAfterAddr)); masm.branch32(Assembler::NotEqual, temp, Imm32(0), ¬Bail); { masm.pop(temp); masm.jump(&bail); bailoutFrom(&bail, lir->snapshot()); } masm.bind(¬Bail); masm.pop(temp); } masm.bind(&done); } #endif bool CodeGenerator::generateBody() { IonScriptCounts* counts = maybeCreateScriptCounts(); #if defined(JS_ION_PERF) PerfSpewer* perfSpewer = &perfSpewer_; if (gen->compilingWasm()) perfSpewer = &gen->perfSpewer(); #endif for (size_t i = 0; i < graph.numBlocks(); i++) { current = graph.getBlock(i); // Don't emit any code for trivial blocks, containing just a goto. Such // blocks are created to split critical edges, and if we didn't end up // putting any instructions in them, we can skip them. if (current->isTrivial()) continue; #ifdef JS_JITSPEW const char* filename = nullptr; size_t lineNumber = 0; unsigned columnNumber = 0; if (current->mir()->info().script()) { filename = current->mir()->info().script()->filename(); if (current->mir()->pc()) lineNumber = PCToLineNumber(current->mir()->info().script(), current->mir()->pc(), &columnNumber); } else { #ifdef DEBUG lineNumber = current->mir()->lineno(); columnNumber = current->mir()->columnIndex(); #endif } JitSpew(JitSpew_Codegen, "# block%" PRIuSIZE " %s:%" PRIuSIZE ":%u%s:", i, filename ? filename : "?", lineNumber, columnNumber, current->mir()->isLoopHeader() ? " (loop header)" : ""); #endif masm.bind(current->label()); mozilla::Maybe blockCounts; if (counts) { blockCounts.emplace(&counts->block(i), &masm); if (!blockCounts->init()) return false; } #if defined(JS_ION_PERF) perfSpewer->startBasicBlock(current->mir(), masm); #endif for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) { if (!alloc().ensureBallast()) return false; #ifdef JS_JITSPEW JitSpewStart(JitSpew_Codegen, "instruction %s", iter->opName()); if (const char* extra = iter->extraName()) JitSpewCont(JitSpew_Codegen, ":%s", extra); JitSpewFin(JitSpew_Codegen); #endif if (counts) blockCounts->visitInstruction(*iter); #ifdef CHECK_OSIPOINT_REGISTERS if (iter->safepoint()) resetOsiPointRegs(iter->safepoint()); #endif if (iter->mirRaw()) { // Only add instructions that have a tracked inline script tree. if (iter->mirRaw()->trackedTree()) { if (!addNativeToBytecodeEntry(iter->mirRaw()->trackedSite())) return false; } // Track the start native offset of optimizations. if (iter->mirRaw()->trackedOptimizations()) { if (!addTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations())) return false; } } #ifdef DEBUG setElement(*iter); // needed to encode correct snapshot location. emitDebugForceBailing(*iter); #endif iter->accept(this); // Track the end native offset of optimizations. if (iter->mirRaw() && iter->mirRaw()->trackedOptimizations()) extendTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations()); #ifdef DEBUG if (!counts) emitDebugResultChecks(*iter); #endif } if (masm.oom()) return false; #if defined(JS_ION_PERF) perfSpewer->endBasicBlock(masm); #endif } return true; } // Out-of-line object allocation for LNewArray. class OutOfLineNewArray : public OutOfLineCodeBase { LNewArray* lir_; public: explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineNewArray(this); } LNewArray* lir() const { return lir_; } }; typedef JSObject* (*NewArrayOperationFn)(JSContext*, HandleScript, jsbytecode*, uint32_t, NewObjectKind); static const VMFunction NewArrayOperationInfo = FunctionInfo(NewArrayOperation, "NewArrayOperation"); static JSObject* NewArrayWithGroup(JSContext* cx, uint32_t length, HandleObjectGroup group, bool convertDoubleElements) { ArrayObject* res = NewFullyAllocatedArrayTryUseGroup(cx, group, length); if (!res) return nullptr; if (convertDoubleElements) res->setShouldConvertDoubleElements(); return res; } typedef JSObject* (*NewArrayWithGroupFn)(JSContext*, uint32_t, HandleObjectGroup, bool); static const VMFunction NewArrayWithGroupInfo = FunctionInfo(NewArrayWithGroup, "NewArrayWithGroup"); void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) { Register objReg = ToRegister(lir->output()); MOZ_ASSERT(!lir->isCall()); saveLive(lir); JSObject* templateObject = lir->mir()->templateObject(); if (templateObject) { pushArg(Imm32(lir->mir()->convertDoubleElements())); pushArg(ImmGCPtr(templateObject->group())); pushArg(Imm32(lir->mir()->length())); callVM(NewArrayWithGroupInfo, lir); } else { pushArg(Imm32(GenericObject)); pushArg(Imm32(lir->mir()->length())); pushArg(ImmPtr(lir->mir()->pc())); pushArg(ImmGCPtr(lir->mir()->block()->info().script())); callVM(NewArrayOperationInfo, lir); } if (ReturnReg != objReg) masm.movePtr(ReturnReg, objReg); restoreLive(lir); } typedef JSObject* (*NewDerivedTypedObjectFn)(JSContext*, HandleObject type, HandleObject owner, int32_t offset); static const VMFunction CreateDerivedTypedObjInfo = FunctionInfo(CreateDerivedTypedObj, "CreateDerivedTypedObj"); void CodeGenerator::visitNewDerivedTypedObject(LNewDerivedTypedObject* lir) { pushArg(ToRegister(lir->offset())); pushArg(ToRegister(lir->owner())); pushArg(ToRegister(lir->type())); callVM(CreateDerivedTypedObjInfo, lir); } void CodeGenerator::visitAtan2D(LAtan2D* lir) { Register temp = ToRegister(lir->temp()); FloatRegister y = ToFloatRegister(lir->y()); FloatRegister x = ToFloatRegister(lir->x()); masm.setupUnalignedABICall(temp); masm.passABIArg(y, MoveOp::DOUBLE); masm.passABIArg(x, MoveOp::DOUBLE); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaAtan2), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg); } void CodeGenerator::visitHypot(LHypot* lir) { Register temp = ToRegister(lir->temp()); uint32_t numArgs = lir->numArgs(); masm.setupUnalignedABICall(temp); for (uint32_t i = 0 ; i < numArgs; ++i) masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE); switch(numArgs) { case 2: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaHypot), MoveOp::DOUBLE); break; case 3: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot3), MoveOp::DOUBLE); break; case 4: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot4), MoveOp::DOUBLE); break; default: MOZ_CRASH("Unexpected number of arguments to hypot function."); } MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg); } void CodeGenerator::visitNewArray(LNewArray* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); JSObject* templateObject = lir->mir()->templateObject(); DebugOnly length = lir->mir()->length(); MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT); if (lir->mir()->isVMCall()) { visitNewArrayCallVM(lir); return; } OutOfLineNewArray* ool = new(alloc()) OutOfLineNewArray(lir); addOutOfLineCode(ool, lir->mir()); masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(), /* initContents = */ true, lir->mir()->convertDoubleElements()); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) { visitNewArrayCallVM(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::visitNewArrayCopyOnWrite(LNewArrayCopyOnWrite* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); ArrayObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewArrayCopyOnWriteInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(objReg)); masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry()); masm.bind(ool->rejoin()); } typedef ArrayObject* (*ArrayConstructorOneArgFn)(JSContext*, HandleObjectGroup, int32_t length); static const VMFunction ArrayConstructorOneArgInfo = FunctionInfo(ArrayConstructorOneArg, "ArrayConstructorOneArg"); void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) { Register lengthReg = ToRegister(lir->length()); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(ArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject->group()), lengthReg), StoreRegisterTo(objReg)); bool canInline = true; size_t inlineLength = 0; if (templateObject->as().hasFixedElements()) { size_t numSlots = gc::GetGCKindSlots(templateObject->asTenured().getAllocKind()); inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER; } else { canInline = false; } if (canInline) { // Try to do the allocation inline if the template object is big enough // for the length in lengthReg. If the length is bigger we could still // use the template object and not allocate the elements, but it's more // efficient to do a single big allocation than (repeatedly) reallocating // the array later on when filling it. masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength), ool->entry()); masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry()); size_t lengthOffset = NativeObject::offsetOfFixedElements() + ObjectElements::offsetOfLength(); masm.store32(lengthReg, Address(objReg, lengthOffset)); } else { masm.jump(ool->entry()); } masm.bind(ool->rejoin()); } typedef TypedArrayObject* (*TypedArrayConstructorOneArgFn)(JSContext*, HandleObject, int32_t length); static const VMFunction TypedArrayConstructorOneArgInfo = FunctionInfo(TypedArrayCreateWithTemplate, "TypedArrayCreateWithTemplate"); void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp1()); Register lengthReg = ToRegister(lir->temp2()); LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); TypedArrayObject* ttemplate = &templateObject->as(); uint32_t n = ttemplate->length(); OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(n)), StoreRegisterTo(objReg)); masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry(), /*initContents*/true, /*convertDoubleElements*/false); masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(), ttemplate, TypedArrayLength::Fixed); masm.bind(ool->rejoin()); } void CodeGenerator::visitNewTypedArrayDynamicLength(LNewTypedArrayDynamicLength* lir) { Register lengthReg = ToRegister(lir->length()); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); TypedArrayObject* ttemplate = &templateObject->as(); OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject), lengthReg), StoreRegisterTo(objReg)); masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry(), /*initContents*/true, /*convertDoubleElements*/false); masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(), ttemplate, TypedArrayLength::Dynamic); masm.bind(ool->rejoin()); } // Out-of-line object allocation for JSOP_NEWOBJECT. class OutOfLineNewObject : public OutOfLineCodeBase { LNewObject* lir_; public: explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) { codegen->visitOutOfLineNewObject(this); } LNewObject* lir() const { return lir_; } }; typedef JSObject* (*NewInitObjectWithTemplateFn)(JSContext*, HandleObject); static const VMFunction NewInitObjectWithTemplateInfo = FunctionInfo(NewObjectOperationWithTemplate, "NewObjectOperationWithTemplate"); typedef JSObject* (*NewInitObjectFn)(JSContext*, HandleScript, jsbytecode* pc, NewObjectKind); static const VMFunction NewInitObjectInfo = FunctionInfo(NewObjectOperation, "NewObjectOperation"); typedef PlainObject* (*ObjectCreateWithTemplateFn)(JSContext*, HandlePlainObject); static const VMFunction ObjectCreateWithTemplateInfo = FunctionInfo(ObjectCreateWithTemplate, "ObjectCreateWithTemplate"); void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) { Register objReg = ToRegister(lir->output()); MOZ_ASSERT(!lir->isCall()); saveLive(lir); JSObject* templateObject = lir->mir()->templateObject(); // If we're making a new object with a class prototype (that is, an object // that derives its class from its prototype instead of being // PlainObject::class_'d) from self-hosted code, we need a different init // function. switch (lir->mir()->mode()) { case MNewObject::ObjectLiteral: if (templateObject) { pushArg(ImmGCPtr(templateObject)); callVM(NewInitObjectWithTemplateInfo, lir); } else { pushArg(Imm32(GenericObject)); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); pushArg(ImmGCPtr(lir->mir()->block()->info().script())); callVM(NewInitObjectInfo, lir); } break; case MNewObject::ObjectCreate: pushArg(ImmGCPtr(templateObject)); callVM(ObjectCreateWithTemplateInfo, lir); break; } if (ReturnReg != objReg) masm.movePtr(ReturnReg, objReg); restoreLive(lir); } static bool ShouldInitFixedSlots(LInstruction* lir, JSObject* obj) { if (!obj->isNative()) return true; NativeObject* templateObj = &obj->as(); // Look for StoreFixedSlot instructions following an object allocation // that write to this object before a GC is triggered or this object is // passed to a VM call. If all fixed slots will be initialized, the // allocation code doesn't need to set the slots to |undefined|. uint32_t nfixed = templateObj->numUsedFixedSlots(); if (nfixed == 0) return false; // Only optimize if all fixed slots are initially |undefined|, so that we // can assume incremental pre-barriers are not necessary. See also the // comment below. for (uint32_t slot = 0; slot < nfixed; slot++) { if (!templateObj->getSlot(slot).isUndefined()) return true; } // Keep track of the fixed slots that are initialized. initializedSlots is // a bit mask with a bit for each slot. MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS); static_assert(NativeObject::MAX_FIXED_SLOTS <= 32, "Slot bits must fit in 32 bits"); uint32_t initializedSlots = 0; uint32_t numInitialized = 0; MInstruction* allocMir = lir->mirRaw()->toInstruction(); MBasicBlock* block = allocMir->block(); // Skip the allocation instruction. MInstructionIterator iter = block->begin(allocMir); MOZ_ASSERT(*iter == allocMir); iter++; while (true) { for (; iter != block->end(); iter++) { if (iter->isNop() || iter->isConstant() || iter->isPostWriteBarrier()) { // These instructions won't trigger a GC or read object slots. continue; } if (iter->isStoreFixedSlot()) { MStoreFixedSlot* store = iter->toStoreFixedSlot(); if (store->object() != allocMir) return true; // We may not initialize this object slot on allocation, so the // pre-barrier could read uninitialized memory. Simply disable // the barrier for this store: the object was just initialized // so the barrier is not necessary. store->setNeedsBarrier(false); uint32_t slot = store->slot(); MOZ_ASSERT(slot < nfixed); if ((initializedSlots & (1 << slot)) == 0) { numInitialized++; initializedSlots |= (1 << slot); if (numInitialized == nfixed) { // All fixed slots will be initialized. MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed); return false; } } continue; } if (iter->isGoto()) { block = iter->toGoto()->target(); if (block->numPredecessors() != 1) return true; break; } // Unhandled instruction, assume it bails or reads object slots. return true; } iter = block->begin(); } MOZ_CRASH("Shouldn't get here"); } void CodeGenerator::visitNewObject(LNewObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); JSObject* templateObject = lir->mir()->templateObject(); if (lir->mir()->isVMCall()) { visitNewObjectVMCall(lir); return; } OutOfLineNewObject* ool = new(alloc()) OutOfLineNewObject(lir); addOutOfLineCode(ool, lir->mir()); bool initContents = ShouldInitFixedSlots(lir, templateObject); masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(), initContents); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) { visitNewObjectVMCall(ool->lir()); masm.jump(ool->rejoin()); } typedef InlineTypedObject* (*NewTypedObjectFn)(JSContext*, Handle, gc::InitialHeap); static const VMFunction NewTypedObjectInfo = FunctionInfo(InlineTypedObject::createCopy, "InlineTypedObject::createCopy"); void CodeGenerator::visitNewTypedObject(LNewTypedObject* lir) { Register object = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); InlineTypedObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(object)); masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitSimdBox(LSimdBox* lir) { FloatRegister in = ToFloatRegister(lir->input()); Register object = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); InlineTypedObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); MIRType type = lir->mir()->input()->type(); registerSimdTemplate(lir->mir()->simdType()); MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across oolCallVM"); OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(object)); masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry()); masm.bind(ool->rejoin()); Address objectData(object, InlineTypedObject::offsetOfDataStart()); switch (type) { case MIRType::Int8x16: case MIRType::Int16x8: case MIRType::Int32x4: case MIRType::Bool8x16: case MIRType::Bool16x8: case MIRType::Bool32x4: masm.storeUnalignedSimd128Int(in, objectData); break; case MIRType::Float32x4: masm.storeUnalignedSimd128Float(in, objectData); break; default: MOZ_CRASH("Unknown SIMD kind when generating code for SimdBox."); } } void CodeGenerator::registerSimdTemplate(SimdType simdType) { simdRefreshTemplatesDuringLink_ |= 1 << uint32_t(simdType); } void CodeGenerator::captureSimdTemplate(JSContext* cx) { JitCompartment* jitCompartment = cx->compartment()->jitCompartment(); while (simdRefreshTemplatesDuringLink_) { uint32_t typeIndex = mozilla::CountTrailingZeroes32(simdRefreshTemplatesDuringLink_); simdRefreshTemplatesDuringLink_ ^= 1 << typeIndex; SimdType type = SimdType(typeIndex); // Note: the weak-reference on the template object should not have been // garbage collected. It is either registered by IonBuilder, or verified // before using it in the EagerSimdUnbox phase. jitCompartment->registerSimdTemplateObjectFor(type); } } void CodeGenerator::visitSimdUnbox(LSimdUnbox* lir) { Register object = ToRegister(lir->input()); FloatRegister simd = ToFloatRegister(lir->output()); Register temp = ToRegister(lir->temp()); Label bail; // obj->group() masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp); // Guard that the object has the same representation as the one produced for // SIMD value-type. Address clasp(temp, ObjectGroup::offsetOfClasp()); static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent"); masm.branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_), &bail); // obj->type()->typeDescr() // The previous class pointer comparison implies that the addendumKind is // Addendum_TypeDescr. masm.loadPtr(Address(temp, ObjectGroup::offsetOfAddendum()), temp); // Check for the /Kind/ reserved slot of the TypeDescr. This is an Int32 // Value which is equivalent to the object class check. static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots"); Address typeDescrKind(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND)); masm.assertTestInt32(Assembler::Equal, typeDescrKind, "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())"); masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrKind), Imm32(js::type::Simd), &bail); SimdType type = lir->mir()->simdType(); // Check if the SimdTypeDescr /Type/ match the specialization of this // MSimdUnbox instruction. static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots"); Address typeDescrType(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE)); masm.assertTestInt32(Assembler::Equal, typeDescrType, "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())"); masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrType), Imm32(int32_t(type)), &bail); // Load the value from the data of the InlineTypedObject. Address objectData(object, InlineTypedObject::offsetOfDataStart()); switch (lir->mir()->type()) { case MIRType::Int8x16: case MIRType::Int16x8: case MIRType::Int32x4: case MIRType::Bool8x16: case MIRType::Bool16x8: case MIRType::Bool32x4: masm.loadUnalignedSimd128Int(objectData, simd); break; case MIRType::Float32x4: masm.loadUnalignedSimd128Float(objectData, simd); break; default: MOZ_CRASH("The impossible happened!"); } bailoutFrom(&bail, lir->snapshot()); } typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap); static const VMFunction NewNamedLambdaObjectInfo = FunctionInfo(NamedLambdaObject::createTemplateObject, "NamedLambdaObject::createTemplateObject"); void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); EnvironmentObject* templateObj = lir->mir()->templateObj(); const CompileInfo& info = lir->mir()->block()->info(); // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewNamedLambdaObjectInfo, lir, ArgList(ImmGCPtr(info.funMaybeLazy()), Imm32(gc::DefaultHeap)), StoreRegisterTo(objReg)); bool initContents = ShouldInitFixedSlots(lir, templateObj); masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewCallObjectFn)(JSContext*, HandleShape, HandleObjectGroup); static const VMFunction NewCallObjectInfo = FunctionInfo(NewCallObject, "NewCallObject"); void CodeGenerator::visitNewCallObject(LNewCallObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); CallObject* templateObj = lir->mir()->templateObject(); OutOfLineCode* ool = oolCallVM(NewCallObjectInfo, lir, ArgList(ImmGCPtr(templateObj->lastProperty()), ImmGCPtr(templateObj->group())), StoreRegisterTo(objReg)); // Inline call object creation, using the OOL path only for tricky cases. bool initContents = ShouldInitFixedSlots(lir, templateObj); masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewSingletonCallObjectFn)(JSContext*, HandleShape); static const VMFunction NewSingletonCallObjectInfo = FunctionInfo(NewSingletonCallObject, "NewSingletonCallObject"); void CodeGenerator::visitNewSingletonCallObject(LNewSingletonCallObject* lir) { Register objReg = ToRegister(lir->output()); JSObject* templateObj = lir->mir()->templateObject(); OutOfLineCode* ool; ool = oolCallVM(NewSingletonCallObjectInfo, lir, ArgList(ImmGCPtr(templateObj->as().lastProperty())), StoreRegisterTo(objReg)); // Objects can only be given singleton types in VM calls. We make the call // out of line to not bloat inline code, even if (naively) this seems like // extra work. masm.jump(ool->entry()); masm.bind(ool->rejoin()); } typedef JSObject* (*NewStringObjectFn)(JSContext*, HandleString); static const VMFunction NewStringObjectInfo = FunctionInfo(NewStringObject, "NewStringObject"); void CodeGenerator::visitNewStringObject(LNewStringObject* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); StringObject* templateObj = lir->mir()->templateObj(); OutOfLineCode* ool = oolCallVM(NewStringObjectInfo, lir, ArgList(input), StoreRegisterTo(output)); masm.createGCObject(output, temp, templateObj, gc::DefaultHeap, ool->entry()); masm.loadStringLength(input, temp); masm.storeValue(JSVAL_TYPE_STRING, input, Address(output, StringObject::offsetOfPrimitiveValue())); masm.storeValue(JSVAL_TYPE_INT32, temp, Address(output, StringObject::offsetOfLength())); masm.bind(ool->rejoin()); } typedef bool(*InitElemFn)(JSContext* cx, jsbytecode* pc, HandleObject obj, HandleValue id, HandleValue value); static const VMFunction InitElemInfo = FunctionInfo(InitElemOperation, "InitElemOperation"); void CodeGenerator::visitInitElem(LInitElem* lir) { Register objReg = ToRegister(lir->getObject()); pushArg(ToValue(lir, LInitElem::ValueIndex)); pushArg(ToValue(lir, LInitElem::IdIndex)); pushArg(objReg); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitElemInfo, lir); } typedef bool (*InitElemGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandleValue, HandleObject); static const VMFunction InitElemGetterSetterInfo = FunctionInfo(InitGetterSetterOperation, "InitGetterSetterOperation"); void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) { Register obj = ToRegister(lir->object()); Register value = ToRegister(lir->value()); pushArg(value); pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex)); pushArg(obj); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitElemGetterSetterInfo, lir); } typedef bool(*MutatePrototypeFn)(JSContext* cx, HandlePlainObject obj, HandleValue value); static const VMFunction MutatePrototypeInfo = FunctionInfo(MutatePrototype, "MutatePrototype"); void CodeGenerator::visitMutateProto(LMutateProto* lir) { Register objReg = ToRegister(lir->getObject()); pushArg(ToValue(lir, LMutateProto::ValueIndex)); pushArg(objReg); callVM(MutatePrototypeInfo, lir); } typedef bool(*InitPropFn)(JSContext*, HandleObject, HandlePropertyName, HandleValue, jsbytecode* pc); static const VMFunction InitPropInfo = FunctionInfo(InitProp, "InitProp"); void CodeGenerator::visitInitProp(LInitProp* lir) { Register objReg = ToRegister(lir->getObject()); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); pushArg(ToValue(lir, LInitProp::ValueIndex)); pushArg(ImmGCPtr(lir->mir()->propertyName())); pushArg(objReg); callVM(InitPropInfo, lir); } typedef bool(*InitPropGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName, HandleObject); static const VMFunction InitPropGetterSetterInfo = FunctionInfo(InitGetterSetterOperation, "InitGetterSetterOperation"); void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) { Register obj = ToRegister(lir->object()); Register value = ToRegister(lir->value()); pushArg(value); pushArg(ImmGCPtr(lir->mir()->name())); pushArg(obj); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitPropGetterSetterInfo, lir); } typedef bool (*CreateThisFn)(JSContext* cx, HandleObject callee, HandleObject newTarget, MutableHandleValue rval); static const VMFunction CreateThisInfoCodeGen = FunctionInfo(CreateThis, "CreateThis"); void CodeGenerator::visitCreateThis(LCreateThis* lir) { const LAllocation* callee = lir->getCallee(); const LAllocation* newTarget = lir->getNewTarget(); if (newTarget->isConstant()) pushArg(ImmGCPtr(&newTarget->toConstant()->toObject())); else pushArg(ToRegister(newTarget)); if (callee->isConstant()) pushArg(ImmGCPtr(&callee->toConstant()->toObject())); else pushArg(ToRegister(callee)); callVM(CreateThisInfoCodeGen, lir); } static JSObject* CreateThisForFunctionWithProtoWrapper(JSContext* cx, HandleObject callee, HandleObject newTarget, HandleObject proto) { return CreateThisForFunctionWithProto(cx, callee, newTarget, proto); } typedef JSObject* (*CreateThisWithProtoFn)(JSContext* cx, HandleObject callee, HandleObject newTarget, HandleObject proto); static const VMFunction CreateThisWithProtoInfo = FunctionInfo(CreateThisForFunctionWithProtoWrapper, "CreateThisForFunctionWithProtoWrapper"); void CodeGenerator::visitCreateThisWithProto(LCreateThisWithProto* lir) { const LAllocation* callee = lir->getCallee(); const LAllocation* newTarget = lir->getNewTarget(); const LAllocation* proto = lir->getPrototype(); if (proto->isConstant()) pushArg(ImmGCPtr(&proto->toConstant()->toObject())); else pushArg(ToRegister(proto)); if (newTarget->isConstant()) pushArg(ImmGCPtr(&newTarget->toConstant()->toObject())); else pushArg(ToRegister(newTarget)); if (callee->isConstant()) pushArg(ImmGCPtr(&callee->toConstant()->toObject())); else pushArg(ToRegister(callee)); callVM(CreateThisWithProtoInfo, lir); } void CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate* lir) { JSObject* templateObject = lir->mir()->templateObject(); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(NewInitObjectWithTemplateInfo, lir, ArgList(ImmGCPtr(templateObject)), StoreRegisterTo(objReg)); // Allocate. If the FreeList is empty, call to VM, which may GC. bool initContents = !templateObject->is() || ShouldInitFixedSlots(lir, &templateObject->as()); masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewIonArgumentsObjectFn)(JSContext* cx, JitFrameLayout* frame, HandleObject); static const VMFunction NewIonArgumentsObjectInfo = FunctionInfo((NewIonArgumentsObjectFn) ArgumentsObject::createForIon, "ArgumentsObject::createForIon"); void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) { // This should be getting constructed in the first block only, and not any OSR entry blocks. MOZ_ASSERT(lir->mir()->block()->id() == 0); Register callObj = ToRegister(lir->getCallObject()); Register temp = ToRegister(lir->temp0()); Label done; if (ArgumentsObject* templateObj = lir->mir()->templateObject()) { Register objTemp = ToRegister(lir->temp1()); Register cxTemp = ToRegister(lir->temp2()); masm.Push(callObj); // Try to allocate an arguments object. This will leave the reserved // slots uninitialized, so it's important we don't GC until we // initialize these slots in ArgumentsObject::finishForIon. Label failure; masm.createGCObject(objTemp, temp, templateObj, gc::DefaultHeap, &failure, /* initContents = */ false); masm.moveStackPtrTo(temp); masm.addPtr(Imm32(masm.framePushed()), temp); masm.setupUnalignedABICall(cxTemp); masm.loadJSContext(cxTemp); masm.passABIArg(cxTemp); masm.passABIArg(temp); masm.passABIArg(callObj); masm.passABIArg(objTemp); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ArgumentsObject::finishForIon)); masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure); // Discard saved callObj on the stack. masm.addToStackPtr(Imm32(sizeof(uintptr_t))); masm.jump(&done); masm.bind(&failure); masm.Pop(callObj); } masm.moveStackPtrTo(temp); masm.addPtr(Imm32(frameSize()), temp); pushArg(callObj); pushArg(temp); callVM(NewIonArgumentsObjectInfo, lir); masm.bind(&done); } void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) { Register temp = ToRegister(lir->getTemp(0)); Register argsObj = ToRegister(lir->getArgsObject()); ValueOperand out = ToOutValue(lir); masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp); Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value)); masm.loadValue(argAddr, out); #ifdef DEBUG Label success; masm.branchTestMagic(Assembler::NotEqual, out, &success); masm.assumeUnreachable("Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC."); masm.bind(&success); #endif } void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) { Register temp = ToRegister(lir->getTemp(0)); Register argsObj = ToRegister(lir->getArgsObject()); ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex); masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp); Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value)); emitPreBarrier(argAddr); #ifdef DEBUG Label success; masm.branchTestMagic(Assembler::NotEqual, argAddr, &success); masm.assumeUnreachable("Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC."); masm.bind(&success); #endif masm.storeValue(value, argAddr); } void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) { ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex); Register obj = ToRegister(lir->getObject()); Register output = ToRegister(lir->output()); Label valueIsObject, end; masm.branchTestObject(Assembler::Equal, value, &valueIsObject); // Value is not an object. Return that other object. masm.movePtr(obj, output); masm.jump(&end); // Value is an object. Return unbox(Value). masm.bind(&valueIsObject); Register payload = masm.extractObject(value, output); if (payload != output) masm.movePtr(payload, output); masm.bind(&end); } typedef bool (*BoxNonStrictThisFn)(JSContext*, HandleValue, MutableHandleValue); static const VMFunction BoxNonStrictThisInfo = FunctionInfo(BoxNonStrictThis, "BoxNonStrictThis"); void CodeGenerator::visitComputeThis(LComputeThis* lir) { ValueOperand value = ToValue(lir, LComputeThis::ValueIndex); ValueOperand output = ToOutValue(lir); OutOfLineCode* ool = oolCallVM(BoxNonStrictThisInfo, lir, ArgList(value), StoreValueTo(output)); masm.branchTestObject(Assembler::NotEqual, value, ool->entry()); masm.moveValue(value, output); masm.bind(ool->rejoin()); } void CodeGenerator::visitArrowNewTarget(LArrowNewTarget* lir) { Register callee = ToRegister(lir->callee()); ValueOperand output = ToOutValue(lir); masm.loadValue(Address(callee, FunctionExtended::offsetOfArrowNewTargetSlot()), output); } void CodeGenerator::visitArrayLength(LArrayLength* lir) { Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength()); masm.load32(length, ToRegister(lir->output())); } void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) { Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength()); RegisterOrInt32Constant newLength = ToRegisterOrInt32Constant(lir->index()); masm.inc32(&newLength); masm.store32(newLength, length); // Restore register value if it is used/captured after. masm.dec32(&newLength); } template static void RangeFront(MacroAssembler&, Register, Register, Register); template <> void RangeFront(MacroAssembler& masm, Register range, Register i, Register front) { masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front); masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front); static_assert(ValueMap::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24"); masm.mulBy3(i, i); masm.lshiftPtr(Imm32(3), i); masm.addPtr(i, front); } template <> void RangeFront(MacroAssembler& masm, Register range, Register i, Register front) { masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front); masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front); static_assert(ValueSet::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16"); masm.lshiftPtr(Imm32(4), i); masm.addPtr(i, front); } template static void RangePopFront(MacroAssembler& masm, Register range, Register front, Register dataLength, Register temp) { Register i = temp; masm.add32(Imm32(1), Address(range, OrderedHashTable::Range::offsetOfCount())); masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i); masm.add32(Imm32(1), i); Label done, seek; masm.bind(&seek); masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done); // We can add sizeof(Data) to |front| to select the next element, because // |front| and |range.ht.data[i]| point to the same location. static_assert(OrderedHashTable::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front); masm.branchTestMagic(Assembler::NotEqual, Address(front, OrderedHashTable::offsetOfEntryKey()), JS_HASH_KEY_EMPTY, &done); masm.add32(Imm32(1), i); masm.jump(&seek); masm.bind(&done); masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI())); } template static inline void RangeDestruct(MacroAssembler& masm, Register range, Register temp0, Register temp1) { Register next = temp0; Register prevp = temp1; masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next); masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp); masm.storePtr(next, Address(prevp, 0)); Label hasNoNext; masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext); masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP())); masm.bind(&hasNoNext); masm.callFreeStub(range); } template <> void CodeGenerator::emitLoadIteratorValues(Register result, Register temp, Register front) { size_t elementsOffset = NativeObject::offsetOfFixedElements(); Address keyAddress(front, ValueMap::Entry::offsetOfKey()); Address valueAddress(front, ValueMap::Entry::offsetOfValue()); Address keyElemAddress(result, elementsOffset); Address valueElemAddress(result, elementsOffset + sizeof(Value)); masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value); masm.patchableCallPreBarrier(valueElemAddress, MIRType::Value); masm.storeValue(keyAddress, keyElemAddress, temp); masm.storeValue(valueAddress, valueElemAddress, temp); Label keyIsNotObject, valueIsNotNurseryObject, emitBarrier; masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject); masm.branchValueIsNurseryObject(Assembler::Equal, keyAddress, temp, &emitBarrier); masm.bind(&keyIsNotObject); masm.branchTestObject(Assembler::NotEqual, valueAddress, &valueIsNotNurseryObject); masm.branchValueIsNurseryObject(Assembler::NotEqual, valueAddress, temp, &valueIsNotNurseryObject); { masm.bind(&emitBarrier); saveVolatile(temp); emitPostWriteBarrier(result); restoreVolatile(temp); } masm.bind(&valueIsNotNurseryObject); } template <> void CodeGenerator::emitLoadIteratorValues(Register result, Register temp, Register front) { size_t elementsOffset = NativeObject::offsetOfFixedElements(); Address keyAddress(front, ValueSet::offsetOfEntryKey()); Address keyElemAddress(result, elementsOffset); masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value); masm.storeValue(keyAddress, keyElemAddress, temp); Label keyIsNotObject; masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject); masm.branchValueIsNurseryObject(Assembler::NotEqual, keyAddress, temp, &keyIsNotObject); { saveVolatile(temp); emitPostWriteBarrier(result); restoreVolatile(temp); } masm.bind(&keyIsNotObject); } template void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) { Register iter = ToRegister(lir->iter()); Register result = ToRegister(lir->result()); Register temp = ToRegister(lir->temp0()); Register dataLength = ToRegister(lir->temp1()); Register range = ToRegister(lir->temp2()); Register output = ToRegister(lir->output()); masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot)), range); Label iterAlreadyDone, iterDone, done; masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone); masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp); masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()), dataLength); masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()), dataLength); masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone); { masm.push(iter); Register front = iter; RangeFront(masm, range, temp, front); emitLoadIteratorValues(result, temp, front); RangePopFront(masm, range, front, dataLength, temp); masm.pop(iter); masm.move32(Imm32(0), output); } masm.jump(&done); { masm.bind(&iterDone); RangeDestruct(masm, range, temp, dataLength); masm.storeValue(PrivateValue(nullptr), Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot))); masm.bind(&iterAlreadyDone); masm.move32(Imm32(1), output); } masm.bind(&done); } void CodeGenerator::visitGetNextEntryForIterator(LGetNextEntryForIterator* lir) { if (lir->mir()->mode() == MGetNextEntryForIterator::Map) { emitGetNextEntryForIterator(lir); } else { MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set); emitGetNextEntryForIterator(lir); } } void CodeGenerator::visitTypedArrayLength(LTypedArrayLength* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), out); } void CodeGenerator::visitTypedArrayElements(LTypedArrayElements* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), out); } void CodeGenerator::visitSetDisjointTypedElements(LSetDisjointTypedElements* lir) { Register target = ToRegister(lir->target()); Register targetOffset = ToRegister(lir->targetOffset()); Register source = ToRegister(lir->source()); Register temp = ToRegister(lir->temp()); masm.setupUnalignedABICall(temp); masm.passABIArg(target); masm.passABIArg(targetOffset); masm.passABIArg(source); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::SetDisjointTypedElements)); } void CodeGenerator::visitTypedObjectDescr(LTypedObjectDescr* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), out); masm.loadPtr(Address(out, ObjectGroup::offsetOfAddendum()), out); } void CodeGenerator::visitTypedObjectElements(LTypedObjectElements* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); if (lir->mir()->definitelyOutline()) { masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out); } else { Label inlineObject, done; masm.loadObjClass(obj, out); masm.branchPtr(Assembler::Equal, out, ImmPtr(&InlineOpaqueTypedObject::class_), &inlineObject); masm.branchPtr(Assembler::Equal, out, ImmPtr(&InlineTransparentTypedObject::class_), &inlineObject); masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out); masm.jump(&done); masm.bind(&inlineObject); masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), out); masm.bind(&done); } } void CodeGenerator::visitSetTypedObjectOffset(LSetTypedObjectOffset* lir) { Register object = ToRegister(lir->object()); Register offset = ToRegister(lir->offset()); Register temp0 = ToRegister(lir->temp0()); Register temp1 = ToRegister(lir->temp1()); // Compute the base pointer for the typed object's owner. masm.loadPtr(Address(object, OutlineTypedObject::offsetOfOwner()), temp0); Label inlineObject, done; masm.loadObjClass(temp0, temp1); masm.branchPtr(Assembler::Equal, temp1, ImmPtr(&InlineOpaqueTypedObject::class_), &inlineObject); masm.branchPtr(Assembler::Equal, temp1, ImmPtr(&InlineTransparentTypedObject::class_), &inlineObject); masm.loadPrivate(Address(temp0, ArrayBufferObject::offsetOfDataSlot()), temp0); masm.jump(&done); masm.bind(&inlineObject); masm.addPtr(ImmWord(InlineTypedObject::offsetOfDataStart()), temp0); masm.bind(&done); // Compute the new data pointer and set it in the object. masm.addPtr(offset, temp0); masm.storePtr(temp0, Address(object, OutlineTypedObject::offsetOfData())); } void CodeGenerator::visitStringLength(LStringLength* lir) { Register input = ToRegister(lir->string()); Register output = ToRegister(lir->output()); masm.loadStringLength(input, output); } void CodeGenerator::visitMinMaxI(LMinMaxI* ins) { Register first = ToRegister(ins->first()); Register output = ToRegister(ins->output()); MOZ_ASSERT(first == output); Label done; Assembler::Condition cond = ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan; if (ins->second()->isConstant()) { masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done); masm.move32(Imm32(ToInt32(ins->second())), output); } else { masm.branch32(cond, first, ToRegister(ins->second()), &done); masm.move32(ToRegister(ins->second()), output); } masm.bind(&done); } void CodeGenerator::visitAbsI(LAbsI* ins) { Register input = ToRegister(ins->input()); Label positive; MOZ_ASSERT(input == ToRegister(ins->output())); masm.branchTest32(Assembler::NotSigned, input, input, &positive); masm.neg32(input); LSnapshot* snapshot = ins->snapshot(); #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) if (snapshot) bailoutCmp32(Assembler::Equal, input, Imm32(INT32_MIN), snapshot); #else if (snapshot) bailoutIf(Assembler::Overflow, snapshot); #endif masm.bind(&positive); } void CodeGenerator::visitPowI(LPowI* ins) { FloatRegister value = ToFloatRegister(ins->value()); Register power = ToRegister(ins->power()); Register temp = ToRegister(ins->temp()); MOZ_ASSERT(power != temp); masm.setupUnalignedABICall(temp); masm.passABIArg(value, MoveOp::DOUBLE); masm.passABIArg(power); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::powi), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); } void CodeGenerator::visitPowD(LPowD* ins) { FloatRegister value = ToFloatRegister(ins->value()); FloatRegister power = ToFloatRegister(ins->power()); Register temp = ToRegister(ins->temp()); masm.setupUnalignedABICall(temp); masm.passABIArg(value, MoveOp::DOUBLE); masm.passABIArg(power, MoveOp::DOUBLE); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaPow), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); } void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) { Register temp = ToRegister(ins->temp()); FloatRegister input = ToFloatRegister(ins->input()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); masm.setupUnalignedABICall(temp); const MathCache* mathCache = ins->mir()->cache(); if (mathCache) { masm.movePtr(ImmPtr(mathCache), temp); masm.passABIArg(temp); } masm.passABIArg(input, MoveOp::DOUBLE); # define MAYBE_CACHED(fcn) (mathCache ? (void*)fcn ## _impl : (void*)fcn ## _uncached) void* funptr = nullptr; switch (ins->mir()->function()) { case MMathFunction::Log: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log)); break; case MMathFunction::Sin: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sin)); break; case MMathFunction::Cos: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cos)); break; case MMathFunction::Exp: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_exp)); break; case MMathFunction::Tan: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_tan)); break; case MMathFunction::ATan: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_atan)); break; case MMathFunction::ASin: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_asin)); break; case MMathFunction::ACos: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_acos)); break; case MMathFunction::Log10: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log10)); break; case MMathFunction::Log2: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log2)); break; case MMathFunction::Log1P: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_log1p)); break; case MMathFunction::ExpM1: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_expm1)); break; case MMathFunction::CosH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cosh)); break; case MMathFunction::SinH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sinh)); break; case MMathFunction::TanH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_tanh)); break; case MMathFunction::ACosH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_acosh)); break; case MMathFunction::ASinH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_asinh)); break; case MMathFunction::ATanH: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_atanh)); break; case MMathFunction::Sign: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_sign)); break; case MMathFunction::Trunc: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_trunc)); break; case MMathFunction::Cbrt: funptr = JS_FUNC_TO_DATA_PTR(void*, MAYBE_CACHED(js::math_cbrt)); break; case MMathFunction::Floor: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_floor_impl); break; case MMathFunction::Ceil: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_ceil_impl); break; case MMathFunction::Round: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_round_impl); break; default: MOZ_CRASH("Unknown math function"); } # undef MAYBE_CACHED masm.callWithABI(funptr, MoveOp::DOUBLE); } void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) { Register temp = ToRegister(ins->temp()); FloatRegister input = ToFloatRegister(ins->input()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg); masm.setupUnalignedABICall(temp); masm.passABIArg(input, MoveOp::FLOAT32); void* funptr = nullptr; switch (ins->mir()->function()) { case MMathFunction::Floor: funptr = JS_FUNC_TO_DATA_PTR(void*, floorf); break; case MMathFunction::Round: funptr = JS_FUNC_TO_DATA_PTR(void*, math_roundf_impl); break; case MMathFunction::Ceil: funptr = JS_FUNC_TO_DATA_PTR(void*, ceilf); break; default: MOZ_CRASH("Unknown or unsupported float32 math function"); } masm.callWithABI(funptr, MoveOp::FLOAT32); } void CodeGenerator::visitModD(LModD* ins) { FloatRegister lhs = ToFloatRegister(ins->lhs()); FloatRegister rhs = ToFloatRegister(ins->rhs()); Register temp = ToRegister(ins->temp()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); masm.setupUnalignedABICall(temp); masm.passABIArg(lhs, MoveOp::DOUBLE); masm.passABIArg(rhs, MoveOp::DOUBLE); if (gen->compilingWasm()) masm.callWithABI(wasm::SymbolicAddress::ModD, MoveOp::DOUBLE); else masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE); } typedef bool (*BinaryFn)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue); static const VMFunction AddInfo = FunctionInfo(js::AddValues, "AddValues"); static const VMFunction SubInfo = FunctionInfo(js::SubValues, "SubValues"); static const VMFunction MulInfo = FunctionInfo(js::MulValues, "MulValues"); static const VMFunction DivInfo = FunctionInfo(js::DivValues, "DivValues"); static const VMFunction ModInfo = FunctionInfo(js::ModValues, "ModValues"); static const VMFunction UrshInfo = FunctionInfo(js::UrshValues, "UrshValues"); void CodeGenerator::visitBinaryV(LBinaryV* lir) { pushArg(ToValue(lir, LBinaryV::RhsInput)); pushArg(ToValue(lir, LBinaryV::LhsInput)); switch (lir->jsop()) { case JSOP_ADD: callVM(AddInfo, lir); break; case JSOP_SUB: callVM(SubInfo, lir); break; case JSOP_MUL: callVM(MulInfo, lir); break; case JSOP_DIV: callVM(DivInfo, lir); break; case JSOP_MOD: callVM(ModInfo, lir); break; case JSOP_URSH: callVM(UrshInfo, lir); break; default: MOZ_CRASH("Unexpected binary op"); } } typedef bool (*StringCompareFn)(JSContext*, HandleString, HandleString, bool*); static const VMFunction StringsEqualInfo = FunctionInfo(jit::StringsEqual, "StringsEqual"); static const VMFunction StringsNotEqualInfo = FunctionInfo(jit::StringsEqual, "StringsEqual"); void CodeGenerator::emitCompareS(LInstruction* lir, JSOp op, Register left, Register right, Register output) { MOZ_ASSERT(lir->isCompareS() || lir->isCompareStrictS()); OutOfLineCode* ool = nullptr; if (op == JSOP_EQ || op == JSOP_STRICTEQ) { ool = oolCallVM(StringsEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } else { MOZ_ASSERT(op == JSOP_NE || op == JSOP_STRICTNE); ool = oolCallVM(StringsNotEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } masm.compareStrings(op, left, right, output, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitCompareStrictS(LCompareStrictS* lir) { JSOp op = lir->mir()->jsop(); MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE); const ValueOperand leftV = ToValue(lir, LCompareStrictS::Lhs); Register right = ToRegister(lir->right()); Register output = ToRegister(lir->output()); Register tempToUnbox = ToTempUnboxRegister(lir->tempToUnbox()); Label string, done; masm.branchTestString(Assembler::Equal, leftV, &string); masm.move32(Imm32(op == JSOP_STRICTNE), output); masm.jump(&done); masm.bind(&string); Register left = masm.extractString(leftV, tempToUnbox); emitCompareS(lir, op, left, right, output); masm.bind(&done); } void CodeGenerator::visitCompareS(LCompareS* lir) { JSOp op = lir->mir()->jsop(); Register left = ToRegister(lir->left()); Register right = ToRegister(lir->right()); Register output = ToRegister(lir->output()); emitCompareS(lir, op, left, right, output); } typedef bool (*CompareFn)(JSContext*, MutableHandleValue, MutableHandleValue, bool*); static const VMFunction EqInfo = FunctionInfo(jit::LooselyEqual, "LooselyEqual"); static const VMFunction NeInfo = FunctionInfo(jit::LooselyEqual, "LooselyEqual"); static const VMFunction StrictEqInfo = FunctionInfo(jit::StrictlyEqual, "StrictlyEqual"); static const VMFunction StrictNeInfo = FunctionInfo(jit::StrictlyEqual, "StrictlyEqual"); static const VMFunction LtInfo = FunctionInfo(jit::LessThan, "LessThan"); static const VMFunction LeInfo = FunctionInfo(jit::LessThanOrEqual, "LessThanOrEqual"); static const VMFunction GtInfo = FunctionInfo(jit::GreaterThan, "GreaterThan"); static const VMFunction GeInfo = FunctionInfo(jit::GreaterThanOrEqual, "GreaterThanOrEqual"); void CodeGenerator::visitCompareVM(LCompareVM* lir) { pushArg(ToValue(lir, LBinaryV::RhsInput)); pushArg(ToValue(lir, LBinaryV::LhsInput)); switch (lir->mir()->jsop()) { case JSOP_EQ: callVM(EqInfo, lir); break; case JSOP_NE: callVM(NeInfo, lir); break; case JSOP_STRICTEQ: callVM(StrictEqInfo, lir); break; case JSOP_STRICTNE: callVM(StrictNeInfo, lir); break; case JSOP_LT: callVM(LtInfo, lir); break; case JSOP_LE: callVM(LeInfo, lir); break; case JSOP_GT: callVM(GtInfo, lir); break; case JSOP_GE: callVM(GeInfo, lir); break; default: MOZ_CRASH("Unexpected compare op"); } } void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) { JSOp op = lir->mir()->jsop(); MCompare::CompareType compareType = lir->mir()->compareType(); MOZ_ASSERT(compareType == MCompare::Compare_Undefined || compareType == MCompare::Compare_Null); const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::Value); Register output = ToRegister(lir->output()); if (op == JSOP_EQ || op == JSOP_NE) { MOZ_ASSERT(lir->mir()->lhs()->type() != MIRType::Object || lir->mir()->operandMightEmulateUndefined(), "Operands which can't emulate undefined should have been folded"); OutOfLineTestObjectWithLabels* ool = nullptr; Maybe