//===--- SILGenApply.cpp - Constructs call sites for SILGen ---------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See http://swift.org/LICENSE.txt for license information // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "ArgumentSource.h" #include "LValue.h" #include "RValue.h" #include "Scope.h" #include "Initialization.h" #include "SpecializedEmitter.h" #include "Varargs.h" #include "swift/AST/ASTContext.h" #include "swift/AST/DiagnosticsSIL.h" #include "swift/AST/ForeignErrorConvention.h" #include "swift/AST/Module.h" #include "swift/Basic/Fallthrough.h" #include "swift/Basic/Range.h" #include "swift/SIL/SILArgument.h" #include "swift/SIL/PrettyStackTrace.h" using namespace swift; using namespace Lowering; /// Retrieve the type to use for a method found via dynamic lookup. static CanAnyFunctionType getDynamicMethodFormalType(SILGenModule &SGM, SILValue proto, ValueDecl *member, SILDeclRef methodName, Type memberType) { auto &ctx = SGM.getASTContext(); CanType selfTy; if (member->isInstanceMember()) { selfTy = ctx.TheUnknownObjectType; } else { selfTy = proto->getType().getSwiftType(); } auto extInfo = FunctionType::ExtInfo() .withRepresentation(FunctionType::Representation::Thin); return CanFunctionType::get(selfTy, memberType->getCanonicalType(), extInfo); } /// Replace the 'self' parameter in the given type. static CanSILFunctionType replaceSelfTypeForDynamicLookup(ASTContext &ctx, CanSILFunctionType fnType, CanType newSelfType, SILDeclRef methodName) { auto oldParams = fnType->getParameters(); SmallVector newParams; newParams.append(oldParams.begin(), oldParams.end() - 1); newParams.push_back({newSelfType, oldParams.back().getConvention()}); // If the method returns Self, substitute AnyObject for the result type. SmallVector newResults; newResults.append(fnType->getAllResults().begin(), fnType->getAllResults().end()); if (auto fnDecl = dyn_cast(methodName.getDecl())) { if (fnDecl->hasDynamicSelf()) { auto anyObjectTy = ctx.getProtocol(KnownProtocolKind::AnyObject) ->getDeclaredType(); for (auto &result : newResults) { auto newResultTy = result.getType()->replaceCovariantResultType(anyObjectTy, 0); result = result.getWithType(newResultTy->getCanonicalType()); } } } return SILFunctionType::get(nullptr, fnType->getExtInfo(), fnType->getCalleeConvention(), newParams, newResults, fnType->getOptionalErrorResult(), ctx); } static Type getExistentialArchetype(SILValue existential) { CanType ty = existential->getType().getSwiftRValueType(); if (ty->is()) return ty; return cast(ty)->getDecl()->getProtocolSelf()->getArchetype(); } /// Retrieve the type to use for a method found via dynamic lookup. static CanSILFunctionType getDynamicMethodLoweredType(SILGenFunction &gen, SILValue proto, SILDeclRef methodName) { auto &ctx = gen.getASTContext(); // Determine the opaque 'self' parameter type. CanType selfTy; if (methodName.getDecl()->isInstanceMember()) { selfTy = getExistentialArchetype(proto)->getCanonicalType(); } else { selfTy = proto->getType().getSwiftType(); } // Replace the 'self' parameter type in the method type with it. auto methodTy = gen.SGM.getConstantType(methodName).castTo(); return replaceSelfTypeForDynamicLookup(ctx, methodTy, selfTy, methodName); } static bool canUseStaticDispatch(SILGenFunction &gen, SILDeclRef constant) { auto *funcDecl = cast(constant.getDecl()); auto thisModule = gen.SGM.M.getSwiftModule(); return funcDecl->isFinal() || (thisModule == funcDecl->getModuleContext()); } namespace { /// Abstractly represents a callee, which may be a constant or function value, /// and knows how to perform dynamic dispatch and reference the appropriate /// entry point at any valid uncurry level. class Callee { public: enum class Kind { /// An indirect function value. IndirectValue, /// A direct standalone function call, referenceable by a FunctionRefInst. StandaloneFunction, /// Enum case constructor call. EnumElement, VirtualMethod_First, /// A method call using class method dispatch. ClassMethod = VirtualMethod_First, /// A method call using super method dispatch. SuperMethod, VirtualMethod_Last = SuperMethod, GenericMethod_First, /// A method call using archetype dispatch. WitnessMethod = GenericMethod_First, /// A method call using dynamic lookup. DynamicMethod, GenericMethod_Last = DynamicMethod }; const Kind kind; // Move, don't copy. Callee(const Callee &) = delete; Callee &operator=(const Callee &) = delete; private: union { ManagedValue IndirectValue; SILDeclRef Constant; }; SILValue SelfValue; ArrayRef Substitutions; CanAnyFunctionType OrigFormalInterfaceType; CanAnyFunctionType SubstFormalType; Optional SpecializeLoc; bool HasSubstitutions = false; Optional> Captures; // The pointer back to the AST node that produced the callee. SILLocation Loc; private: Callee(ManagedValue indirectValue, CanAnyFunctionType origFormalType, CanAnyFunctionType substFormalType, SILLocation L) : kind(Kind::IndirectValue), IndirectValue(indirectValue), OrigFormalInterfaceType(origFormalType), SubstFormalType(substFormalType), Loc(L) {} static CanAnyFunctionType getConstantFormalInterfaceType(SILGenFunction &gen, SILDeclRef fn) { return gen.SGM.Types.getConstantInfo(fn.atUncurryLevel(0)) .FormalInterfaceType; } Callee(SILGenFunction &gen, SILDeclRef standaloneFunction, CanAnyFunctionType substFormalType, SILLocation l) : kind(Kind::StandaloneFunction), Constant(standaloneFunction), OrigFormalInterfaceType(getConstantFormalInterfaceType(gen, standaloneFunction)), SubstFormalType(substFormalType), Loc(l) { } Callee(Kind methodKind, SILGenFunction &gen, SILValue selfValue, SILDeclRef methodName, CanAnyFunctionType substFormalType, SILLocation l) : kind(methodKind), Constant(methodName), SelfValue(selfValue), OrigFormalInterfaceType(getConstantFormalInterfaceType(gen, methodName)), SubstFormalType(substFormalType), Loc(l) { } /// Build a clause that looks like 'origParamType' but uses 'selfType' /// in place of the underlying archetype. static CanType buildSubstSelfType(CanType origParamType, CanType selfType, ASTContext &ctx) { assert(!isa(origParamType) && "Self can't be @lvalue"); if (auto lv = dyn_cast(origParamType)) { selfType = buildSubstSelfType(lv.getObjectType(), selfType, ctx); return CanInOutType::get(selfType); } if (auto tuple = dyn_cast(origParamType)) { assert(tuple->getNumElements() == 1); selfType = buildSubstSelfType(tuple.getElementType(0), selfType, ctx); auto field = tuple->getElement(0).getWithType(selfType); return CanType(TupleType::get(field, ctx)); } assert(isa(origParamType) == isa(selfType)); assert(origParamType->getRValueInstanceType()->isTypeParameter()); assert(selfType->getRValueInstanceType()->is()); return selfType; } CanArchetypeType getWitnessMethodSelfType() const { return cast(SubstFormalType.getInput() ->getRValueInstanceType() ->getCanonicalType()); } CanSILFunctionType getSubstFunctionType(SILGenModule &SGM, CanSILFunctionType origFnType) const { if (!HasSubstitutions) return origFnType; return origFnType->substGenericArgs(SGM.M, SGM.SwiftModule, Substitutions); } /// Add the 'self' clause back to the substituted formal type of /// this protocol method. void addProtocolSelfToFormalType(SILGenModule &SGM, SILDeclRef name, CanType protocolSelfType) { // The result types of the expressions yielding protocol values // (reflected in SubstFormalType) reflect an implicit level of // function application, including some extra polymorphic // substitution. HasSubstitutions = true; auto &ctx = SGM.getASTContext(); // Add the 'self' parameter back. We want it to look like a // substitution of the appropriate clause from the original type. auto selfType = OrigFormalInterfaceType.getInput(); auto substSelfType = buildSubstSelfType(selfType, protocolSelfType, ctx); auto extInfo = FunctionType::ExtInfo(FunctionType::Representation::Thin, /*noreturn*/ false, /*throws*/ OrigFormalInterfaceType->throws()); SubstFormalType = CanFunctionType::get(substSelfType, SubstFormalType, extInfo); } /// Add the 'self' type to the substituted function type of this /// dynamic callee. void addDynamicCalleeSelfToFormalType(SILGenModule &SGM) { assert(kind == Kind::DynamicMethod); // Drop the original self clause. CanType methodType = OrigFormalInterfaceType.getResult(); // Replace it with the dynamic self type. OrigFormalInterfaceType = getDynamicMethodFormalType(SGM, SelfValue, Constant.getDecl(), Constant, methodType); assert(!OrigFormalInterfaceType->hasTypeParameter()); // Add a self clause to the substituted type. auto selfType = OrigFormalInterfaceType.getInput(); SubstFormalType = CanFunctionType::get(selfType, SubstFormalType, OrigFormalInterfaceType->getExtInfo()); } public: static Callee forIndirect(ManagedValue indirectValue, CanAnyFunctionType origFormalType, CanAnyFunctionType substFormalType, SILLocation l) { return Callee(indirectValue, origFormalType, substFormalType, l); } static Callee forDirect(SILGenFunction &gen, SILDeclRef c, CanAnyFunctionType substFormalType, SILLocation l) { return Callee(gen, c, substFormalType, l); } static Callee forEnumElement(SILGenFunction &gen, SILDeclRef c, CanAnyFunctionType substFormalType, SILLocation l) { assert(isa(c.getDecl())); return Callee(Kind::EnumElement, gen, SILValue(), c, substFormalType, l); } static Callee forClassMethod(SILGenFunction &gen, SILValue selfValue, SILDeclRef name, CanAnyFunctionType substFormalType, SILLocation l) { return Callee(Kind::ClassMethod, gen, selfValue, name, substFormalType, l); } static Callee forSuperMethod(SILGenFunction &gen, SILValue selfValue, SILDeclRef name, CanAnyFunctionType substFormalType, SILLocation l) { return Callee(Kind::SuperMethod, gen, selfValue, name, substFormalType, l); } static Callee forArchetype(SILGenFunction &gen, SILValue optOpeningInstruction, CanType protocolSelfType, SILDeclRef name, CanAnyFunctionType substFormalType, SILLocation l) { Callee callee(Kind::WitnessMethod, gen, optOpeningInstruction, name, substFormalType, l); callee.addProtocolSelfToFormalType(gen.SGM, name, protocolSelfType); return callee; } static Callee forDynamic(SILGenFunction &gen, SILValue proto, SILDeclRef name, CanAnyFunctionType substFormalType, SILLocation l) { Callee callee(Kind::DynamicMethod, gen, proto, name, substFormalType, l); callee.addDynamicCalleeSelfToFormalType(gen.SGM); return callee; } Callee(Callee &&) = default; Callee &operator=(Callee &&) = default; void setSubstitutions(SILGenFunction &gen, SILLocation loc, ArrayRef newSubs, unsigned callDepth) { // Currently generic methods of generic types are the deepest we should // be able to stack specializations. // FIXME: Generic local functions can add type parameters to arbitrary // depth. assert(callDepth < 2 && "specialization below 'self' or argument depth?!"); assert(Substitutions.empty() && "Already have substitutions?"); Substitutions = newSubs; assert(getNaturalUncurryLevel() >= callDepth && "specializations below uncurry level?!"); SpecializeLoc = loc; HasSubstitutions = true; } void setCaptures(SmallVectorImpl &&captures) { Captures = std::move(captures); } ArrayRef getCaptures() const { if (Captures) return *Captures; return {}; } bool hasCaptures() const { return Captures.hasValue(); } CanAnyFunctionType getOrigFormalType() const { return OrigFormalInterfaceType; } CanAnyFunctionType getSubstFormalType() const { return SubstFormalType; } unsigned getNaturalUncurryLevel() const { switch (kind) { case Kind::IndirectValue: return 0; case Kind::StandaloneFunction: case Kind::EnumElement: case Kind::ClassMethod: case Kind::SuperMethod: case Kind::WitnessMethod: case Kind::DynamicMethod: return Constant.uncurryLevel; } } EnumElementDecl *getEnumElementDecl() { assert(kind == Kind::EnumElement); return cast(Constant.getDecl()); } std::tuple, ApplyOptions> getAtUncurryLevel(SILGenFunction &gen, unsigned level) const { ManagedValue mv; ApplyOptions options = ApplyOptions::None; SILConstantInfo constantInfo; Optional constant = None; switch (kind) { case Kind::IndirectValue: assert(level == 0 && "can't curry indirect function"); mv = IndirectValue; assert(!HasSubstitutions); break; case Kind::StandaloneFunction: { assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of standalone function"); constant = Constant.atUncurryLevel(level); // If we're currying a direct reference to a class-dispatched method, // make sure we emit the right set of thunks. if (constant->isCurried && Constant.hasDecl()) if (auto func = Constant.getAbstractFunctionDecl()) if (getMethodDispatch(func) == MethodDispatch::Class) constant = constant->asDirectReference(true); constantInfo = gen.getConstantInfo(*constant); SILValue ref = gen.emitGlobalFunctionRef(Loc, *constant, constantInfo); mv = ManagedValue::forUnmanaged(ref); break; } case Kind::EnumElement: { assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of enum constructor"); constant = Constant.atUncurryLevel(level); constantInfo = gen.getConstantInfo(*constant); // We should not end up here if the enum constructor call is fully // applied. assert(constant->isCurried); SILValue ref = gen.emitGlobalFunctionRef(Loc, *constant, constantInfo); mv = ManagedValue::forUnmanaged(ref); break; } case Kind::ClassMethod: { assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of method"); constant = Constant.atUncurryLevel(level); constantInfo = gen.getConstantInfo(*constant); // If the call is curried, emit a direct call to the curry thunk. if (level < Constant.uncurryLevel) { SILValue ref = gen.emitGlobalFunctionRef(Loc, *constant, constantInfo); mv = ManagedValue::forUnmanaged(ref); break; } // Otherwise, do the dynamic dispatch inline. SILValue methodVal = gen.B.createClassMethod(Loc, SelfValue, *constant, /*volatile*/ constant->isForeign); mv = ManagedValue::forUnmanaged(methodVal); break; } case Kind::SuperMethod: { assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of method"); assert(level == getNaturalUncurryLevel() && "Currying the self parameter of super method calls should've been emitted"); constant = Constant.atUncurryLevel(level); constantInfo = gen.getConstantInfo(*constant); if (SILDeclRef baseConstant = Constant.getBaseOverriddenVTableEntry()) constantInfo = gen.SGM.Types.getConstantOverrideInfo(Constant, baseConstant); auto methodVal = gen.B.createSuperMethod(Loc, SelfValue, *constant, constantInfo.getSILType(), /*volatile*/ constant->isForeign); mv = ManagedValue::forUnmanaged(methodVal); break; } case Kind::WitnessMethod: { assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of method"); constant = Constant.atUncurryLevel(level); constantInfo = gen.getConstantInfo(*constant); // If the call is curried, emit a direct call to the curry thunk. if (level < Constant.uncurryLevel) { SILValue ref = gen.emitGlobalFunctionRef(Loc, *constant, constantInfo); mv = ManagedValue::forUnmanaged(ref); break; } // Look up the witness for the archetype. auto proto = Constant.getDecl()->getDeclContext() ->getAsProtocolOrProtocolExtensionContext(); auto archetype = getWitnessMethodSelfType(); // Get the openend existential value if the archetype is an opened // existential type. SILValue OpenedExistential; if (!archetype->getOpenedExistentialType().isNull()) OpenedExistential = SelfValue; SILValue fn = gen.B.createWitnessMethod(Loc, archetype, ProtocolConformanceRef(proto), *constant, constantInfo.getSILType(), OpenedExistential, constant->isForeign); mv = ManagedValue::forUnmanaged(fn); break; } case Kind::DynamicMethod: { assert(level >= 1 && "currying 'self' of dynamic method dispatch not yet supported"); assert(level <= Constant.uncurryLevel && "uncurrying past natural uncurry level of method"); auto constant = Constant.atUncurryLevel(level); constantInfo = gen.getConstantInfo(constant); auto closureType = replaceSelfTypeForDynamicLookup(gen.getASTContext(), constantInfo.SILFnType, SelfValue->getType().getSwiftRValueType(), Constant); SILValue fn = gen.B.createDynamicMethod(Loc, SelfValue, constant, SILType::getPrimitiveObjectType(closureType), /*volatile*/ constant.isForeign); mv = ManagedValue::forUnmanaged(fn); break; } } Optional foreignError; if (constant && constant->isForeign) { foreignError = cast(constant->getDecl()) ->getForeignErrorConvention(); } CanSILFunctionType substFnType = getSubstFunctionType(gen.SGM, mv.getType().castTo()); return std::make_tuple(mv, substFnType, foreignError, options); } ArrayRef getSubstitutions() const { return Substitutions; } SILDeclRef getMethodName() const { return Constant; } /// Return a specialized emission function if this is a function with a known /// lowering, such as a builtin, or return null if there is no specialized /// emitter. Optional getSpecializedEmitter(SILGenModule &SGM, unsigned uncurryLevel) const { // Currently we have no curried known functions. if (uncurryLevel != 0) return None; switch (kind) { case Kind::StandaloneFunction: { return SpecializedEmitter::forDecl(SGM, Constant); } case Kind::EnumElement: case Kind::IndirectValue: case Kind::ClassMethod: case Kind::SuperMethod: case Kind::WitnessMethod: case Kind::DynamicMethod: return None; } llvm_unreachable("bad callee kind"); } }; /// Given that we've applied some sort of trivial transform to the /// value of the given ManagedValue, enter a cleanup for the result if /// the original had a cleanup. static ManagedValue maybeEnterCleanupForTransformed(SILGenFunction &gen, ManagedValue orig, SILValue result) { if (orig.hasCleanup()) { orig.forwardCleanup(gen); return gen.emitManagedBufferWithCleanup(result); } else { return ManagedValue::forUnmanaged(result); } } static Callee prepareArchetypeCallee(SILGenFunction &gen, SILLocation loc, SILDeclRef constant, ArgumentSource &selfValue, CanAnyFunctionType substFnType, ArrayRef &substitutions) { auto fd = cast(constant.getDecl()); auto protocol = cast(fd->getDeclContext()); // Method calls through ObjC protocols require ObjC dispatch. constant = constant.asForeign(protocol->isObjC()); CanType selfTy = selfValue.getSubstRValueType(); SILParameterInfo _selfParam; auto getSelfParameter = [&]() -> SILParameterInfo { if (_selfParam != SILParameterInfo()) return _selfParam; auto constantFnType = gen.SGM.Types.getConstantFunctionType(constant); return (_selfParam = constantFnType->getSelfParameter()); }; auto getSGFContextForSelf = [&]() -> SGFContext { return (getSelfParameter().isConsumed() ? SGFContext() : SGFContext::AllowGuaranteedPlusZero); }; auto setSelfValueToAddress = [&](SILLocation loc, ManagedValue address) { assert(address.getType().isAddress()); assert(address.getType().is()); auto formalTy = address.getType().getSwiftRValueType(); if (getSelfParameter().isIndirectMutating()) { // Be sure not to consume the cleanup for an inout argument. auto selfLV = ManagedValue::forLValue(address.getValue()); selfValue = ArgumentSource(loc, LValue::forAddress(selfLV, AbstractionPattern(formalTy), formalTy)); } else { selfValue = ArgumentSource(loc, RValue(address, formalTy)); } }; // If we're calling a member of a non-class-constrained protocol, // but our archetype refines it to be class-bound, then // we have to materialize the value in order to pass it indirectly. auto materializeSelfIfNecessary = [&] { // Only an instance method of a non-class protocol is ever passed // indirectly. if (!fd->isInstanceMember() || protocol->requiresClass() || selfValue.hasLValueType() || !cast(selfValue.getSubstRValueType())->requiresClass()) return; auto selfParameter = getSelfParameter(); assert(selfParameter.isIndirect()); (void)selfParameter; SILLocation selfLoc = selfValue.getLocation(); // Evaluate the reference into memory. ManagedValue address = [&]() -> ManagedValue { // Do so at +0 if we can. auto ref = std::move(selfValue) .getAsSingleValue(gen, getSGFContextForSelf()); // If we're already in memory for some reason, great. if (ref.getType().isAddress()) return ref; // Store the reference into a temporary. auto temp = gen.emitTemporaryAllocation(selfLoc, ref.getValue()->getType()); gen.B.createStore(selfLoc, ref.getValue(), temp); // If we had a cleanup, create a cleanup at the new address. return maybeEnterCleanupForTransformed(gen, ref, temp); }(); setSelfValueToAddress(selfLoc, address); }; // Construct an archetype call. // Link back to something to create a data dependency if we have // an opened type. SILValue openingSite; auto archetype = cast(CanType(selfTy->getRValueInstanceType())); if (archetype->getOpenedExistentialType()) { openingSite = gen.getArchetypeOpeningSite(archetype); } materializeSelfIfNecessary(); // The protocol self is implicitly decurried. substFnType = cast(substFnType.getResult()); return Callee::forArchetype(gen, openingSite, selfTy, constant, substFnType, loc); } /// An ASTVisitor for decomposing a nesting of ApplyExprs into an initial /// Callee and a list of CallSites. The CallEmission class below uses these /// to generate the actual SIL call. /// /// Formally, an ApplyExpr in the AST always has a single argument, which may /// be of tuple type, possibly empty. Also, some callees have a formal type /// which is curried -- for example, methods have type Self -> Arg -> Result. /// /// However, SIL functions take zero or more parameters and the natural entry /// point of a method takes Self as an additional argument, rather than /// returning a partial application. /// /// Therefore, nested ApplyExprs applied to a constant are flattened into a /// single call of the most uncurried entry point fitting the call site. /// This avoids intermediate closure construction. /// /// For example, a method reference 'self.method' decomposes into curry thunk /// as the callee, with a single call site '(self)'. /// /// On the other hand, a call of a method 'self.method(x)(y)' with a function /// return type decomposes into the method's natural entry point as the callee, /// and two call sites, first '(x, self)' then '(y)'. class SILGenApply : public Lowering::ExprVisitor { public: /// The SILGenFunction that we are emitting SIL into. SILGenFunction &SGF; /// The apply callee that abstractly represents the entry point that is being /// called. Optional ApplyCallee; /// The lvalue or rvalue representing the argument source of self. ArgumentSource SelfParam; Expr *SelfApplyExpr = nullptr; Type SelfType; std::vector CallSites; Expr *SideEffect = nullptr; /// The depth of uncurries that we have seen. /// /// *NOTE* This counter is incremented *after* we return from visiting a call /// site's children. This means that it is not valid until we finish visiting /// the expression. unsigned CallDepth = 0; /// When visiting expressions, sometimes we need to emit self before we know /// what the actual callee is. In such cases, we assume that we are passing /// self at +0 and then after we know what the callee is, we check if the /// self is passed at +1. If so, we add an extra retain. bool AssumedPlusZeroSelf = false; SILGenApply(SILGenFunction &gen) : SGF(gen) {} void setCallee(Callee &&c) { assert((SelfParam ? CallDepth == 1 : CallDepth == 0) && "setting callee at non-zero call depth?!"); assert(!ApplyCallee && "already set callee!"); ApplyCallee.emplace(std::move(c)); } void setSideEffect(Expr *sideEffectExpr) { assert(!SideEffect && "already set side effect!"); SideEffect = sideEffectExpr; } void setSelfParam(ArgumentSource &&theSelfParam, Expr *theSelfApplyExpr) { assert(!SelfParam && "already set this!"); SelfParam = std::move(theSelfParam); SelfApplyExpr = theSelfApplyExpr; SelfType = theSelfApplyExpr->getType(); ++CallDepth; } void setSelfParam(ArgumentSource &&theSelfParam, Type selfType) { assert(!SelfParam && "already set this!"); SelfParam = std::move(theSelfParam); SelfApplyExpr = nullptr; SelfType = selfType; ++CallDepth; } void decompose(Expr *e) { visit(e); } /// Get the type of the function for substitution purposes. /// /// \param otherCtorRefUsesAllocating If true, the OtherConstructorDeclRef /// refers to the initializing CanFunctionType getSubstFnType(bool otherCtorRefUsesAllocating = false) { // TODO: optimize this if there are no specializes in play auto getSiteType = [&](ApplyExpr *site, bool otherCtorRefUsesAllocating) { if (otherCtorRefUsesAllocating) { // We have a reference to an initializing constructor, but we will // actually be using the allocating constructor. Update the type // appropriately. // FIXME: Re-derive the type from the declaration + substitutions? auto ctorRef = cast( site->getFn()->getSemanticsProvidingExpr()); auto fnType = ctorRef->getType()->castTo(); auto selfTy = MetatypeType::get( fnType->getInput()->getInOutObjectType()); return CanFunctionType::get(selfTy->getCanonicalType(), fnType->getResult()->getCanonicalType(), fnType->getExtInfo()); } return cast(site->getFn()->getType()->getCanonicalType()); }; CanFunctionType fnType; auto addSite = [&](ApplyExpr *site, bool otherCtorRefUsesAllocating) { auto siteType = getSiteType(site, otherCtorRefUsesAllocating); // If this is the first call site, use its formal type directly. if (!fnType) { fnType = siteType; return; } fnType = CanFunctionType::get(siteType.getInput(), fnType, siteType->getExtInfo()); }; for (auto callSite : CallSites) { addSite(callSite, false); } // The self application might be a DynamicMemberRefExpr. if (auto selfApply = dyn_cast_or_null(SelfApplyExpr)) { addSite(selfApply, otherCtorRefUsesAllocating); } assert(fnType && "found no call sites?"); return fnType; } /// Fall back to an unknown, indirect callee. void visitExpr(Expr *e) { ManagedValue fn = SGF.emitRValueAsSingleValue(e); auto origType = cast(e->getType()->getCanonicalType()); setCallee(Callee::forIndirect(fn, origType, getSubstFnType(), e)); } void visitLoadExpr(LoadExpr *e) { // TODO: preserve the function pointer at its original abstraction level ManagedValue fn = SGF.emitRValueAsSingleValue(e); auto origType = cast(e->getType()->getCanonicalType()); setCallee(Callee::forIndirect(fn, origType, getSubstFnType(), e)); } /// Add a call site to the curry. void visitApplyExpr(ApplyExpr *e) { if (e->isSuper()) { applySuper(e); } else if (applyInitDelegation(e)) { // Already done } else { CallSites.push_back(e); visit(e->getFn()); } ++CallDepth; } /// Given a metatype value for the type, allocate an Objective-C /// object (with alloc_ref_dynamic) of that type. /// /// \returns the self object. ManagedValue allocateObjCObject(ManagedValue selfMeta, SILLocation loc) { auto metaType = selfMeta.getType().castTo(); CanType type = metaType.getInstanceType(); // Convert to an Objective-C metatype representation, if needed. ManagedValue selfMetaObjC; if (metaType->getRepresentation() == MetatypeRepresentation::ObjC) { selfMetaObjC = selfMeta; } else { CanAnyMetatypeType objcMetaType; if (isa(metaType)) { objcMetaType = CanMetatypeType::get(type, MetatypeRepresentation::ObjC); } else { objcMetaType = CanExistentialMetatypeType::get(type, MetatypeRepresentation::ObjC); } selfMetaObjC = ManagedValue( SGF.B.emitThickToObjCMetatype( loc, selfMeta.getValue(), SGF.SGM.getLoweredType(objcMetaType)), selfMeta.getCleanup()); } // Allocate the object. return ManagedValue(SGF.B.createAllocRefDynamic( loc, selfMetaObjC.getValue(), SGF.SGM.getLoweredType(type), /*objc=*/true), selfMetaObjC.getCleanup()); } // // Known callees. // void visitDeclRefExpr(DeclRefExpr *e) { // If we need to perform dynamic dispatch for the given function, // emit class_method to do so. if (auto afd = dyn_cast(e->getDecl())) { Optional kind; bool isDynamicallyDispatched; bool requiresAllocRefDynamic = false; // Determine whether the method is dynamically dispatched. if (auto *proto = dyn_cast(afd->getDeclContext())) { // We have four cases to deal with here: // // 1) for a "static" / "type" method, the base is a metatype. // 2) for a classbound protocol, the base is a class-bound protocol rvalue, // which is loadable. // 3) for a mutating method, the base has inout type. // 4) for a nonmutating method, the base is a general archetype // rvalue, which is address-only. The base is passed at +0, so it isn't // consumed. // // In the last case, the AST has this call typed as being applied // to an rvalue, but the witness is actually expecting a pointer // to the +0 value in memory. We just pass in the address since // archetypes are address-only. CanAnyFunctionType substFnType = getSubstFnType(); assert(!CallSites.empty()); ApplyExpr *thisCallSite = CallSites.back(); CallSites.pop_back(); ArgumentSource selfValue = thisCallSite->getArg(); ArrayRef subs = e->getDeclRef().getSubstitutions(); SILDeclRef::Kind kind = SILDeclRef::Kind::Func; if (isa(afd)) { if (proto->isObjC()) { SILLocation loc = thisCallSite->getArg(); // For Objective-C initializers, we only have an initializing // initializer. We need to allocate the object ourselves. kind = SILDeclRef::Kind::Initializer; auto metatype = std::move(selfValue).getAsSingleValue(SGF); auto allocated = allocateObjCObject(metatype, loc); auto allocatedType = allocated.getType().getSwiftRValueType(); selfValue = ArgumentSource(loc, RValue(allocated, allocatedType)); } else { // For non-Objective-C initializers, we have an allocating // initializer to call. kind = SILDeclRef::Kind::Allocator; } } SILDeclRef constant = SILDeclRef(afd, kind); // Prepare the callee. This can modify both selfValue and subs. Callee theCallee = prepareArchetypeCallee(SGF, e, constant, selfValue, substFnType, subs); setSelfParam(std::move(selfValue), thisCallSite); setCallee(std::move(theCallee)); // If there are substitutions, add them now. if (!subs.empty()) { ApplyCallee->setSubstitutions(SGF, e, subs, CallDepth); } return; } if (e->getAccessSemantics() != AccessSemantics::Ordinary) { isDynamicallyDispatched = false; } else { switch (getMethodDispatch(afd)) { case MethodDispatch::Class: isDynamicallyDispatched = true; break; case MethodDispatch::Static: isDynamicallyDispatched = false; break; } } if (isa(afd) && isDynamicallyDispatched) { kind = SILDeclRef::Kind::Func; } else if (auto ctor = dyn_cast(afd)) { ApplyExpr *thisCallSite = CallSites.back(); // Required constructors are dynamically dispatched when the 'self' // value is not statically derived. if (ctor->isRequired() && thisCallSite->getArg()->getType()->is() && !thisCallSite->getArg()->isStaticallyDerivedMetatype()) { if (requiresObjCDispatch(afd)) { // When we're performing Objective-C dispatch, we don't have an // allocating constructor to call. So, perform an alloc_ref_dynamic // and pass that along to the initializer. requiresAllocRefDynamic = true; kind = SILDeclRef::Kind::Initializer; } else { kind = SILDeclRef::Kind::Allocator; } } else { isDynamicallyDispatched = false; } } if (isDynamicallyDispatched) { ApplyExpr *thisCallSite = CallSites.back(); CallSites.pop_back(); // Emit the rvalue for self, allowing for guaranteed plus zero if we // have a func. bool AllowPlusZero = kind && *kind == SILDeclRef::Kind::Func; RValue self = SGF.emitRValue(thisCallSite->getArg(), AllowPlusZero ? SGFContext::AllowGuaranteedPlusZero : SGFContext()); // If we allowed for PlusZero and we *did* get the value back at +0, // then we assumed that self could be passed at +0. We will check later // if the actual callee passes self at +1 later when we know its actual // type. AssumedPlusZeroSelf = AllowPlusZero && self.peekIsPlusZeroRValueOrTrivial(); // If we require a dynamic allocation of the object here, do so now. if (requiresAllocRefDynamic) { SILLocation loc = thisCallSite->getArg(); auto selfValue = allocateObjCObject( std::move(self).getAsSingleValue(SGF, loc), loc); self = RValue(SGF, loc, selfValue.getType().getSwiftRValueType(), selfValue); } auto selfValue = self.peekScalarValue(); setSelfParam(ArgumentSource(thisCallSite->getArg(), std::move(self)), thisCallSite); SILDeclRef constant(afd, kind.getValue(), SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(afd)); setCallee(Callee::forClassMethod(SGF, selfValue, constant, getSubstFnType(), e)); // setSelfParam bumps the callDepth, but we aren't really past the // 'self' call depth in this case. --CallDepth; // If there are substitutions, add them. if (e->getDeclRef().isSpecialized()) { ApplyCallee->setSubstitutions(SGF, e, e->getDeclRef().getSubstitutions(), CallDepth); } return; } } // If this is a direct reference to a vardecl, it must be a let constant // (which doesn't need to be loaded). Just emit its value directly. if (auto *vd = dyn_cast(e->getDecl())) { (void)vd; assert(vd->isLet() && "Direct reference to vardecl that isn't a let?"); visitExpr(e); return; } SILDeclRef constant(e->getDecl(), SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(e->getDecl())); // Otherwise, we have a statically-dispatched call. CanFunctionType substFnType = getSubstFnType(); ArrayRef subs; auto afd = dyn_cast(e->getDecl()); if (afd) { auto constantInfo = SGF.getConstantInfo(constant); // Forward local substitutions to a non-generic local function. if (afd->getParent()->isLocalContext() && !afd->getGenericParams()) subs = constantInfo.getForwardingSubstitutions(SGF.getASTContext()); // If there are captures, put the placeholder curry level in the formal // type. // TODO: Eliminate the need for this. if (afd->getCaptureInfo().hasLocalCaptures()) substFnType = CanFunctionType::get( SGF.getASTContext().TheEmptyTupleType, substFnType); } if (e->getDeclRef().isSpecialized()) { assert(subs.empty() && "nested local generics not yet supported"); subs = e->getDeclRef().getSubstitutions(); } // Enum case constructor references are open-coded. if (isa(e->getDecl())) setCallee(Callee::forEnumElement(SGF, constant, substFnType, e)); else setCallee(Callee::forDirect(SGF, constant, substFnType, e)); // If the decl ref requires captures, emit the capture params. if (afd) { if (afd->getCaptureInfo().hasLocalCaptures()) { assert(!e->getDeclRef().isSpecialized() && "generic local fns not implemented"); SmallVector captures; SGF.emitCaptures(e, afd, CaptureEmission::ImmediateApplication, captures); ApplyCallee->setCaptures(std::move(captures)); } // FIXME: We should be checking hasLocalCaptures() on the lowered // captures in the constant info too, to generate more efficient // code for mutually recursive local functions which otherwise // capture no state. } // If there are substitutions, add them, always at depth 0. if (!subs.empty()) ApplyCallee->setSubstitutions(SGF, e, subs, 0); } void visitAbstractClosureExpr(AbstractClosureExpr *e) { // A directly-called closure can be emitted as a direct call instead of // really producing a closure object. SILDeclRef constant(e); // Emit the closure function if we haven't yet. if (!SGF.SGM.hasFunction(constant)) SGF.SGM.emitClosure(e); ArrayRef subs; CanFunctionType substFnType = getSubstFnType(); // FIXME: We should be checking hasLocalCaptures() on the lowered // captures in the constant info above, to generate more efficient // code for mutually recursive local functions which otherwise // capture no state. auto constantInfo = SGF.getConstantInfo(constant); subs = constantInfo.getForwardingSubstitutions(SGF.getASTContext()); // If there are captures, put the placeholder curry level in the formal // type. // TODO: Eliminate the need for this. if (e->getCaptureInfo().hasLocalCaptures()) substFnType = CanFunctionType::get( SGF.getASTContext().TheEmptyTupleType, substFnType); setCallee(Callee::forDirect(SGF, constant, substFnType, e)); // If the closure requires captures, emit them. if (e->getCaptureInfo().hasLocalCaptures()) { SmallVector captures; SGF.emitCaptures(e, e, CaptureEmission::ImmediateApplication, captures); ApplyCallee->setCaptures(std::move(captures)); } // If there are substitutions, add them, always at depth 0. if (!subs.empty()) ApplyCallee->setSubstitutions(SGF, e, subs, 0); } void visitOtherConstructorDeclRefExpr(OtherConstructorDeclRefExpr *e) { // FIXME: We might need to go through ObjC dispatch for references to // constructors imported from Clang (which won't have a direct entry point) // or to delegate to a designated initializer. setCallee(Callee::forDirect(SGF, SILDeclRef(e->getDecl(), SILDeclRef::Kind::Initializer), getSubstFnType(), e)); // If there are substitutions, add them. if (e->getDeclRef().isSpecialized()) ApplyCallee->setSubstitutions(SGF, e, e->getDeclRef().getSubstitutions(), CallDepth); } void visitDotSyntaxBaseIgnoredExpr(DotSyntaxBaseIgnoredExpr *e) { setSideEffect(e->getLHS()); visit(e->getRHS()); } void visitFunctionConversionExpr(FunctionConversionExpr *e) { // FIXME: Check whether this function conversion requires us to build a // thunk. visit(e->getSubExpr()); } void visitCovariantFunctionConversionExpr(CovariantFunctionConversionExpr *e){ // FIXME: These expressions merely adjust the result type for DynamicSelf // in an unchecked, ABI-compatible manner. They shouldn't prevent us form // forming a complete call. visitExpr(e); } void visitIdentityExpr(IdentityExpr *e) { visit(e->getSubExpr()); } void applySuper(ApplyExpr *apply) { // Load the 'super' argument. Expr *arg = apply->getArg(); ManagedValue super = SGF.emitRValueAsSingleValue(arg); // The callee for a super call has to be either a method or constructor. Expr *fn = apply->getFn(); ArrayRef substitutions; SILDeclRef constant; if (auto *ctorRef = dyn_cast(fn)) { constant = SILDeclRef(ctorRef->getDecl(), SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(ctorRef->getDecl())); if (ctorRef->getDeclRef().isSpecialized()) substitutions = ctorRef->getDeclRef().getSubstitutions(); } else if (auto *declRef = dyn_cast(fn)) { assert(isa(declRef->getDecl()) && "non-function super call?!"); constant = SILDeclRef(declRef->getDecl(), SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(declRef->getDecl())); if (declRef->getDeclRef().isSpecialized()) substitutions = declRef->getDeclRef().getSubstitutions(); } else llvm_unreachable("invalid super callee"); CanType superFormalType = arg->getType()->getCanonicalType(); setSelfParam(ArgumentSource(arg, RValue(SGF, apply, superFormalType, super)), apply); if (constant.isForeign || !canUseStaticDispatch(SGF, constant)) { // All Objective-C methods and // non-final native Swift methods use dynamic dispatch. SILValue Input = super.getValue(); while (auto *UI = dyn_cast(Input)) Input = UI->getOperand(); // ObjC super calls require dynamic dispatch. setCallee(Callee::forSuperMethod(SGF, Input, constant, getSubstFnType(), fn)); } else { // Native Swift super calls to final methods are direct. setCallee(Callee::forDirect(SGF, constant, getSubstFnType(), fn)); } // If there are any substitutions for the callee, apply them now. if (!substitutions.empty()) ApplyCallee->setSubstitutions(SGF, fn, substitutions, CallDepth-1); } /// Walk the given \c selfArg expression that produces the appropriate /// `self` for a call, applying the same transformations to the provided /// \c selfValue (which might be a metatype). /// /// This is used for initializer delegation, so it covers only the narrow /// subset of expressions used there. ManagedValue emitCorrespondingSelfValue(ManagedValue selfValue, Expr *selfArg) { while (true) { // Handle archetype-to-super and derived-to-base upcasts. if (isa(selfArg) || isa(selfArg)) { auto ice = cast(selfArg); auto resultTy = ice->getType()->getCanonicalType(); // If the 'self' value is a metatype, update the target type // accordingly. if (auto selfMetaTy = selfValue.getSwiftType()->getAs()) { resultTy = CanMetatypeType::get(resultTy, selfMetaTy->getRepresentation()); } auto loweredResultTy = SGF.getLoweredLoadableType(resultTy); if (loweredResultTy != selfValue.getType()) { auto upcast = SGF.B.createUpcast(ice, selfValue.getValue(), loweredResultTy); selfValue = ManagedValue(upcast, selfValue.getCleanup()); } selfArg = ice->getSubExpr(); continue; } // Skip over loads. if (auto load = dyn_cast(selfArg)) { selfArg = load->getSubExpr(); continue; } // Skip over inout expressions. if (auto inout = dyn_cast(selfArg)) { selfArg = inout->getSubExpr(); continue; } // Declaration references terminate the search. if (isa(selfArg)) break; llvm_unreachable("unhandled conversion for metatype value"); } return selfValue; } /// Try to emit the given application as initializer delegation. bool applyInitDelegation(ApplyExpr *expr) { // Dig out the constructor we're delegating to. Expr *fn = expr->getFn(); auto ctorRef = dyn_cast( fn->getSemanticsProvidingExpr()); if (!ctorRef) return false; // Determine whether we'll need to use an allocating constructor (vs. the // initializing constructor). auto nominal = ctorRef->getDecl()->getDeclContext() ->getAsNominalTypeOrNominalTypeExtensionContext(); bool useAllocatingCtor; // Value types only have allocating initializers. if (isa(nominal) || isa(nominal)) useAllocatingCtor = true; // Protocols only witness allocating initializers, except for @objc // protocols, which only witness initializing initializers. else if (auto proto = dyn_cast(nominal)) { useAllocatingCtor = !proto->isObjC(); // Factory initializers are effectively "allocating" initializers with no // corresponding initializing entry point. } else if (ctorRef->getDecl()->isFactoryInit()) { useAllocatingCtor = true; } else { // We've established we're in a class initializer or a protocol extension // initializer for a class-bound protocol, In either case, we're // delegating initialization, but we only have an instance in the former // case. assert(isa(nominal) && "some new kind of init context we haven't implemented"); useAllocatingCtor = static_cast(SGF.AllocatorMetatype) && !ctorRef->getDecl()->isObjC(); } // Load the 'self' argument. Expr *arg = expr->getArg(); ManagedValue self; CanType selfFormalType = arg->getType()->getCanonicalType(); // If we're using the allocating constructor, we need to pass along the // metatype. if (useAllocatingCtor) { selfFormalType = CanMetatypeType::get( selfFormalType->getInOutObjectType()->getCanonicalType()); if (SGF.AllocatorMetatype) self = emitCorrespondingSelfValue( ManagedValue::forUnmanaged(SGF.AllocatorMetatype), arg); else self = ManagedValue::forUnmanaged(SGF.emitMetatypeOfValue(expr, arg)); } else { // If we're in a protocol extension initializer, we haven't allocated // "self" yet at this point. Do so. Use alloc_ref_dynamic since we should // only ever get here in ObjC protocol extensions currently. if (SGF.AllocatorMetatype) { assert(ctorRef->getDecl()->isObjC() && "only expect to delegate an initializer from an allocator " "in objc protocol extensions"); self = allocateObjCObject( ManagedValue::forUnmanaged(SGF.AllocatorMetatype), arg); // Perform any adjustments needed to 'self'. self = emitCorrespondingSelfValue(self, arg); } else { self = SGF.emitRValueAsSingleValue(arg); } } setSelfParam(ArgumentSource(arg, RValue(SGF, expr, selfFormalType, self)), expr); // Determine the callee. For structs and enums, this is the allocating // constructor (because there is no initializing constructor). For protocol // default implementations, we also use the allocating constructor, because // that's the only thing that's witnessed. For classes, // this is the initializing constructor, to which we will dynamically // dispatch. if (SelfParam.getSubstRValueType()->getRValueInstanceType()->is() && isa(ctorRef->getDecl()->getDeclContext())) { // Look up the witness for the constructor. auto constant = SILDeclRef(ctorRef->getDecl(), useAllocatingCtor ? SILDeclRef::Kind::Allocator : SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(ctorRef->getDecl())); setCallee(Callee::forArchetype(SGF, SILValue(), self.getType().getSwiftRValueType(), constant, cast(expr->getType()->getCanonicalType()), expr)); } else if (getMethodDispatch(ctorRef->getDecl()) == MethodDispatch::Class) { // Dynamic dispatch to the initializer. setCallee(Callee::forClassMethod( SGF, self.getValue(), SILDeclRef(ctorRef->getDecl(), useAllocatingCtor ? SILDeclRef::Kind::Allocator : SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, requiresObjCDispatch(ctorRef->getDecl())), getSubstFnType(), fn)); } else { // Directly call the peer constructor. setCallee(Callee::forDirect(SGF, SILDeclRef(ctorRef->getDecl(), useAllocatingCtor ? SILDeclRef::Kind::Allocator : SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion), getSubstFnType(useAllocatingCtor), fn)); } // Set up the substitutions, if we have any. if (ctorRef->getDeclRef().isSpecialized()) ApplyCallee->setSubstitutions(SGF, fn, ctorRef->getDeclRef().getSubstitutions(), CallDepth-1); return true; } Callee getCallee() { assert(ApplyCallee && "did not find callee?!"); return *std::move(ApplyCallee); } /// Ignore parentheses and implicit conversions. static Expr *ignoreParensAndImpConversions(Expr *expr) { while (true) { if (auto ice = dyn_cast(expr)) { expr = ice->getSubExpr(); continue; } // Simple optional-to-optional conversions. This doesn't work // for the full generality of OptionalEvaluationExpr, but it // works given that we check the result for certain forms. if (auto eval = dyn_cast(expr)) { if (auto inject = dyn_cast(eval->getSubExpr())) { if (auto bind = dyn_cast(inject->getSubExpr())) { if (bind->getDepth() == 0) return bind->getSubExpr(); } } } auto valueProviding = expr->getValueProvidingExpr(); if (valueProviding != expr) { expr = valueProviding; continue; } return expr; } } void visitForceValueExpr(ForceValueExpr *e) { // If this application is a dynamic member reference that is forced to // succeed with the '!' operator, emit it as a direct invocation of the // method we found. if (emitForcedDynamicMemberRef(e)) return; visitExpr(e); } /// If this application forces a dynamic member reference with !, emit /// a direct reference to the member. bool emitForcedDynamicMemberRef(ForceValueExpr *e) { // Check whether the argument is a dynamic member reference. auto arg = ignoreParensAndImpConversions(e->getSubExpr()); auto openExistential = dyn_cast(arg); if (openExistential) arg = openExistential->getSubExpr(); auto dynamicMemberRef = dyn_cast(arg); if (!dynamicMemberRef) return false; // Since we'll be collapsing this call site, make sure there's another // call site that will actually perform the invocation. if (CallSites.empty()) return false; // Only @objc methods can be forced. auto *fd = dyn_cast(dynamicMemberRef->getMember().getDecl()); if (!fd || !fd->isObjC()) return false; // Local function that actually emits the dynamic member reference. auto emitDynamicMemberRef = [&] { // We found it. Emit the base. ManagedValue base = SGF.emitRValueAsSingleValue(dynamicMemberRef->getBase()); setSelfParam(ArgumentSource(dynamicMemberRef->getBase(), RValue(SGF, dynamicMemberRef, base.getType().getSwiftRValueType(), base)), dynamicMemberRef); // Determine the type of the method we referenced, by replacing the // class type of the 'Self' parameter with Builtin.UnknownObject. SILDeclRef member(fd, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isObjC=*/true); setCallee(Callee::forDynamic(SGF, base.getValue(), member, getSubstFnType(), e)); }; // When we have an open existential, open it and then emit the // member reference. if (openExistential) { SGF.emitOpenExistentialExpr(openExistential, [&](Expr*) { emitDynamicMemberRef(); }); } else { emitDynamicMemberRef(); } return true; } }; } // end anonymous namespace #ifndef NDEBUG static bool areOnlyAbstractionDifferent(CanType type1, CanType type2) { assert(type1->isLegalSILType()); assert(type2->isLegalSILType()); // Exact equality is fine. if (type1 == type2) return true; // Either both types should be tuples or neither should be. if (auto tuple1 = dyn_cast(type1)) { auto tuple2 = dyn_cast(type2); if (!tuple2) return false; if (tuple1->getNumElements() != tuple2->getNumElements()) return false; for (auto i : indices(tuple2->getElementTypes())) if (!areOnlyAbstractionDifferent(tuple1.getElementType(i), tuple2.getElementType(i))) return false; return true; } if (isa(type2)) return false; // Either both types should be metatypes or neither should be. if (auto meta1 = dyn_cast(type1)) { auto meta2 = dyn_cast(type2); if (!meta2) return false; if (meta1.getInstanceType() != meta2.getInstanceType()) return false; return true; } // Either both types should be functions or neither should be. if (auto fn1 = dyn_cast(type1)) { auto fn2 = dyn_cast(type2); if (!fn2) return false; // TODO: maybe there are checks we can do here? (void) fn1; (void) fn2; return true; } if (isa(type2)) return false; llvm_unreachable("no other types should differ by abstraction"); } #endif /// Given two SIL types which are representations of the same type, /// check whether they have an abstraction difference. static bool hasAbstractionDifference(SILFunctionTypeRepresentation rep, SILType type1, SILType type2) { CanType ct1 = type1.getSwiftRValueType(); CanType ct2 = type2.getSwiftRValueType(); assert(getSILFunctionLanguage(rep) == SILFunctionLanguage::C || areOnlyAbstractionDifferent(ct1, ct2)); (void)ct1; (void)ct2; // Assuming that we've applied the same substitutions to both types, // abstraction equality should equal type equality. return (type1 != type2); } /// Emit either an 'apply' or a 'try_apply', with the error branch of /// the 'try_apply' simply branching out of all cleanups and throwing. SILValue SILGenFunction::emitApplyWithRethrow(SILLocation loc, SILValue fn, SILType substFnType, ArrayRef subs, ArrayRef args) { CanSILFunctionType silFnType = substFnType.castTo(); SILType resultType = silFnType->getSILResult(); if (!silFnType->hasErrorResult()) { return B.createApply(loc, fn, substFnType, resultType, subs, args); } SILBasicBlock *errorBB = createBasicBlock(); SILBasicBlock *normalBB = createBasicBlock(); B.createTryApply(loc, fn, substFnType, subs, args, normalBB, errorBB); // Emit the rethrow logic. { B.emitBlock(errorBB); SILValue error = errorBB->createBBArg(silFnType->getErrorResult().getSILType()); B.createBuiltin(loc, SGM.getASTContext().getIdentifier("willThrow"), SGM.Types.getEmptyTupleType(), {}, {error}); Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc)); B.createThrow(loc, error); } // Enter the normal path. B.emitBlock(normalBB); return normalBB->createBBArg(resultType); } /// Emit a raw apply operation, performing no additional lowering of /// either the arguments or the result. static SILValue emitRawApply(SILGenFunction &gen, SILLocation loc, ManagedValue fn, ArrayRef subs, ArrayRef args, CanSILFunctionType substFnType, ApplyOptions options, ArrayRef indirectResultAddrs) { // Get the callee value. SILValue fnValue = substFnType->isCalleeConsumed() ? fn.forward(gen) : fn.getValue(); SmallVector argValues; // Add the buffers for the indirect results if needed. #ifndef NDEBUG assert(indirectResultAddrs.size() == substFnType->getNumIndirectResults()); for (auto i : indices(indirectResultAddrs)) { assert(indirectResultAddrs[i]->getType() == substFnType->getIndirectResults()[i].getSILType()); } #endif argValues.append(indirectResultAddrs.begin(), indirectResultAddrs.end()); auto inputTypes = substFnType->getParameters(); assert(inputTypes.size() == args.size()); // Gather the arguments. for (auto i : indices(args)) { auto argValue = (inputTypes[i].isConsumed() ? args[i].forward(gen) : args[i].getValue()); #ifndef NDEBUG if (argValue->getType() != inputTypes[i].getSILType()) { auto &out = llvm::errs(); out << "TYPE MISMATCH IN ARGUMENT " << i << " OF APPLY AT "; printSILLocationDescription(out, loc, gen.getASTContext()); out << " argument value: "; argValue->print(out); out << " parameter type: "; inputTypes[i].print(out); out << "\n"; abort(); } #endif argValues.push_back(argValue); } auto resultType = substFnType->getSILResult(); auto calleeType = SILType::getPrimitiveObjectType(substFnType); // If we don't have an error result, we can make a simple 'apply'. SILValue result; if (!substFnType->hasErrorResult()) { result = gen.B.createApply(loc, fnValue, calleeType, resultType, subs, argValues); // Otherwise, we need to create a try_apply. } else { SILBasicBlock *normalBB = gen.createBasicBlock(); result = normalBB->createBBArg(resultType); SILBasicBlock *errorBB = gen.getTryApplyErrorDest(loc, substFnType->getErrorResult(), options & ApplyOptions::DoesNotThrow); gen.B.createTryApply(loc, fnValue, calleeType, subs, argValues, normalBB, errorBB); gen.B.emitBlock(normalBB); } // Given any guaranteed arguments that are not being passed at +0, insert the // decrement here instead of at the end of scope. Guaranteed just means that // we guarantee the lifetime of the object for the duration of the call. // Be sure to use a CleanupLocation so that unreachable code diagnostics don't // trigger. for (auto i : indices(args)) { if (!inputTypes[i].isGuaranteed() || args[i].isPlusZeroRValueOrTrivial()) continue; SILValue argValue = args[i].forward(gen); SILType argType = argValue->getType(); CleanupLocation cleanupLoc = CleanupLocation::get(loc); if (!argType.isAddress()) gen.getTypeLowering(argType).emitDestroyRValue(gen.B, cleanupLoc, argValue); else gen.getTypeLowering(argType).emitDestroyAddress(gen.B, cleanupLoc, argValue); } return result; } static std::pair emitForeignErrorArgument(SILGenFunction &gen, SILLocation loc, SILParameterInfo errorParameter) { // We assume that there's no interesting reabstraction here. auto errorPtrType = errorParameter.getType(); PointerTypeKind ptrKind; auto errorType = CanType(errorPtrType->getAnyPointerElementType(ptrKind)); auto &errorTL = gen.getTypeLowering(errorType); // Allocate a temporary. SILValue errorTemp = gen.emitTemporaryAllocation(loc, errorTL.getLoweredType()); // Nil-initialize it. gen.emitInjectOptionalNothingInto(loc, errorTemp, errorTL); // Enter a cleanup to destroy the value there. auto managedErrorTemp = gen.emitManagedBufferWithCleanup(errorTemp, errorTL); // Create the appropriate pointer type. LValue lvalue = LValue::forAddress(ManagedValue::forLValue(errorTemp), AbstractionPattern(errorType), errorType); auto pointerValue = gen.emitLValueToPointer(loc, std::move(lvalue), errorPtrType, ptrKind, AccessKind::ReadWrite); return {managedErrorTemp, pointerValue}; } namespace { /// An abstract class for working with results. class ResultPlan { public: virtual RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) = 0; virtual ~ResultPlan() = default; }; using ResultPlanPtr = std::unique_ptr; /// The class for building result plans. struct ResultPlanBuilder { SILGenFunction &Gen; SILLocation Loc; ArrayRef AllResults; SILFunctionTypeRepresentation Rep; SmallVectorImpl &IndirectResultAddrs; ResultPlanBuilder(SILGenFunction &gen, SILLocation loc, ArrayRef allResults, SILFunctionTypeRepresentation rep, SmallVectorImpl &resultAddrs) : Gen(gen), Loc(loc), AllResults(allResults), Rep(rep), IndirectResultAddrs(resultAddrs) { } ResultPlanPtr build(Initialization *emitInto, AbstractionPattern origType, CanType substType); ResultPlanPtr buildForTuple(Initialization *emitInto, AbstractionPattern origType, CanTupleType substType); ~ResultPlanBuilder() { assert(AllResults.empty() && "didn't consume all results!"); } }; /// A result plan for evaluating an indirect result into the address /// associated with an initialization. class InPlaceInitializationResultPlan : public ResultPlan { Initialization *Init; public: InPlaceInitializationResultPlan(Initialization *init) : Init(init) {} RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { Init->finishInitialization(gen); return RValue(); } }; /// A result plan for working with a single value and potentially /// reabstracting it. The value can actually be a tuple if the /// abstraction is opaque. class ScalarResultPlan : public ResultPlan { std::unique_ptr Temporary; AbstractionPattern OrigType; Initialization *Init; SILFunctionTypeRepresentation Rep; public: ScalarResultPlan(std::unique_ptr &&temporary, AbstractionPattern origType, Initialization *init, SILFunctionTypeRepresentation rep) : Temporary(std::move(temporary)), OrigType(origType), Init(init), Rep(rep) {} RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { // Lower the unabstracted result type. auto &substTL = gen.getTypeLowering(substType); // Claim the value: ManagedValue value; // If we were created with a temporary, that address was passed as // an indirect result. if (Temporary) { // Establish the cleanup. Temporary->finishInitialization(gen); value = Temporary->getManagedAddress(); // If the value isn't address-only, go ahead and load. if (!substTL.isAddressOnly()) { auto load = gen.B.createLoad(loc, value.forward(gen)); value = gen.emitManagedRValueWithCleanup(load); } // Otherwise, it was returned as a direct result. } else { value = directResults.front(); directResults = directResults.slice(1); } // Reabstract the value if the types don't match. This can happen // due to either substitution reabstractions or bridging. if (hasAbstractionDifference(Rep, value.getType(), substTL.getLoweredType())) { // Assume that a C-language API doesn't have substitution // reabstractions. This shouldn't be necessary, but // emitOrigToSubstValue can get upset. if (getSILFunctionLanguage(Rep) == SILFunctionLanguage::C) { value = gen.emitBridgedToNativeValue(loc, value, Rep, substType); } else { value = gen.emitOrigToSubstValue(loc, value, OrigType, substType, SGFContext(Init)); // If that successfully emitted into the initialization, we're done. if (value.isInContext()) return RValue(); } } // Otherwise, forcibly emit into the initialization if it exists. if (Init) { Init->copyOrInitValueInto(gen, loc, value, /*init*/ true); Init->finishInitialization(gen); return RValue(); // Otherwise, we've got the r-value we want. } else { return RValue(gen, loc, substType, value); } } }; /// A result plan which calls copyOrInitValueInto on an Initialization /// using a temporary buffer initialized by a sub-plan. class InitValueFromTemporaryResultPlan : public ResultPlan { Initialization *Init; ResultPlanPtr SubPlan; std::unique_ptr Temporary; public: InitValueFromTemporaryResultPlan(Initialization *init, ResultPlanPtr &&subPlan, std::unique_ptr &&temporary) : Init(init), SubPlan(std::move(subPlan)), Temporary(std::move(temporary)) {} RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { RValue subResult = SubPlan->finish(gen, loc, substType, directResults); assert(subResult.isUsed() && "sub-plan didn't emit into context?"); (void) subResult; ManagedValue value = Temporary->getManagedAddress(); Init->copyOrInitValueInto(gen, loc, value, /*init*/ true); Init->finishInitialization(gen); return RValue(); } }; /// A result plan which calls copyOrInitValueInto using the result of /// a sub-plan. class InitValueFromRValueResultPlan : public ResultPlan { Initialization *Init; ResultPlanPtr SubPlan; public: InitValueFromRValueResultPlan(Initialization *init, ResultPlanPtr &&subPlan) : Init(init), SubPlan(std::move(subPlan)) {} RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { RValue subResult = SubPlan->finish(gen, loc, substType, directResults); ManagedValue value = std::move(subResult).getAsSingleValue(gen, loc); Init->copyOrInitValueInto(gen, loc, value, /*init*/ true); Init->finishInitialization(gen); return RValue(); } }; /// A result plan which produces a larger RValue from a bunch of /// components. class TupleRValueResultPlan : public ResultPlan { SmallVector EltPlans; public: TupleRValueResultPlan(ResultPlanBuilder &builder, AbstractionPattern origType, CanTupleType substType) { // Create plans for all the elements. EltPlans.reserve(substType->getNumElements()); for (auto i : indices(substType->getElementTypes())) { AbstractionPattern origEltType = origType.getTupleElementType(i); CanType substEltType = substType.getElementType(i); EltPlans.push_back(builder.build(nullptr, origEltType, substEltType)); } } RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { RValue tupleRV(substType); // Finish all the component tuples. auto substTupleType = cast(substType); assert(substTupleType.getElementTypes().size() == EltPlans.size()); for (auto i : indices(substTupleType.getElementTypes())) { RValue eltRV = EltPlans[i]->finish(gen, loc, substTupleType.getElementType(i), directResults); tupleRV.addElement(std::move(eltRV)); } return tupleRV; } }; /// A result plan which evaluates into the sub-components /// of a splittable tuple initialization. class TupleInitializationResultPlan : public ResultPlan { Initialization *TupleInit; SmallVector EltInitsBuffer; MutableArrayRef EltInits; SmallVector EltPlans; public: TupleInitializationResultPlan(ResultPlanBuilder &builder, Initialization *tupleInit, AbstractionPattern origType, CanTupleType substType) : TupleInit(tupleInit) { // Get the sub-initializations. EltInits = tupleInit->splitIntoTupleElements(builder.Gen, builder.Loc, substType, EltInitsBuffer); // Create plans for all the sub-initializations. EltPlans.reserve(substType->getNumElements()); for (auto i : indices(substType->getElementTypes())) { AbstractionPattern origEltType = origType.getTupleElementType(i); CanType substEltType = substType.getElementType(i); Initialization *eltInit = EltInits[i].get(); EltPlans.push_back(builder.build(eltInit, origEltType, substEltType)); } } RValue finish(SILGenFunction &gen, SILLocation loc, CanType substType, ArrayRef &directResults) override { auto substTupleType = cast(substType); assert(substTupleType.getElementTypes().size() == EltPlans.size()); for (auto i : indices(substTupleType.getElementTypes())) { auto eltType = substTupleType.getElementType(i); RValue eltRV = EltPlans[i]->finish(gen, loc, eltType, directResults); assert(eltRV.isUsed()); (void) eltRV; } TupleInit->finishInitialization(gen); return RValue(); } }; } /// Build a result plan for the results of an apply. /// /// If the initialization is non-null, the result plan will emit into it. ResultPlanPtr ResultPlanBuilder::build(Initialization *init, AbstractionPattern origType, CanType substType) { // Destructure original tuples. if (origType.isTuple()) { return buildForTuple(init, origType, cast(substType)); } // Otherwise, grab the next result. auto result = AllResults.front(); AllResults = AllResults.slice(1); SILValue initAddr; if (init) { initAddr = init->getAddressForInPlaceInitialization(); // If the result is indirect, and we have an address to emit into, and // there are no abstraction differences, then just do it. if (initAddr && result.isIndirect() && !hasAbstractionDifference(Rep, initAddr->getType(), result.getSILType())) { IndirectResultAddrs.push_back(initAddr); return ResultPlanPtr(new InPlaceInitializationResultPlan(init)); } } // Otherwise, we need to: // - get the value, either directly or indirectly // - possibly reabstract it // - store it to the destination // We could break this down into different ResultPlan implementations, // but it's easier not to. // Create a temporary if the result is indirect. std::unique_ptr temporary; if (result.isIndirect()) { auto &resultTL = Gen.getTypeLowering(result.getSILType()); temporary = Gen.emitTemporary(Loc, resultTL); IndirectResultAddrs.push_back(temporary->getAddress()); } return ResultPlanPtr( new ScalarResultPlan(std::move(temporary), origType, init, Rep)); } ResultPlanPtr ResultPlanBuilder::buildForTuple(Initialization *init, AbstractionPattern origType, CanTupleType substType) { // If we don't have an initialization for the tuple, just build the // individual components. if (!init) { return ResultPlanPtr(new TupleRValueResultPlan(*this, origType, substType)); } // Okay, we have an initialization for the tuple that we need to emit into. // If we can just split the initialization, do so. if (init->canSplitIntoTupleElements()) { return ResultPlanPtr( new TupleInitializationResultPlan(*this, init, origType, substType)); } // Otherwise, we're going to have to call copyOrInitValueInto, which only // takes a single value. // If the tuple is address-only, we'll get much better code if we // emit into a single buffer. auto &substTL = Gen.getTypeLowering(substType); if (substTL.isAddressOnly()) { // Create a temporary. auto temporary = Gen.emitTemporary(Loc, substTL); // Build a sub-plan to emit into the temporary. auto subplan = buildForTuple(temporary.get(), origType, substType); // Make a plan to initialize into that. return ResultPlanPtr( new InitValueFromTemporaryResultPlan(init, std::move(subplan), std::move(temporary))); } // Build a sub-plan that doesn't know about the initialization. auto subplan = buildForTuple(nullptr, origType, substType); // Make a plan that calls copyOrInitValueInto. return ResultPlanPtr( new InitValueFromRValueResultPlan(init, std::move(subplan))); } static bool hasUnownedInnerPointerResult(CanSILFunctionType fnType) { for (auto result : fnType->getAllResults()) { if (result.getConvention() == ResultConvention::UnownedInnerPointer) return true; } return false; } /// Emit a function application, assuming that the arguments have been /// lowered appropriately for the abstraction level but that the /// result does need to be turned back into something matching a /// formal type. RValue SILGenFunction::emitApply( SILLocation loc, ManagedValue fn, ArrayRef subs, ArrayRef args, CanSILFunctionType substFnType, AbstractionPattern origResultType, CanType substResultType, ApplyOptions options, Optional overrideRep, const Optional &foreignError, SGFContext evalContext) { auto rep = overrideRep ? *overrideRep : substFnType->getRepresentation(); // Create the result plan. SmallVector indirectResultAddrs; ResultPlanPtr resultPlan = [&]() -> ResultPlanPtr { auto origResultTypeForPlan = origResultType; auto substResultTypeForPlan = substResultType; ArrayRef allResults = substFnType->getAllResults(); SILResultInfo optResult; // The plan needs to be built using the formal result type // after foreign-error adjustment. if (foreignError) { switch (foreignError->getKind()) { // These conventions make the formal result type (). case ForeignErrorConvention::ZeroResult: case ForeignErrorConvention::NonZeroResult: assert(substResultType->isVoid()); allResults = {}; break; // These conventions leave the formal result alone. case ForeignErrorConvention::ZeroPreservedResult: case ForeignErrorConvention::NonNilError: break; // This convention changes the formal result to the optional object // type; we need to make our own make SILResultInfo array. case ForeignErrorConvention::NilResult: { assert(allResults.size() == 1); OptionalTypeKind optKind; SILType objectType = allResults[0].getSILType().getAnyOptionalObjectType(SGM.M, optKind); optResult = allResults[0].getWithType(objectType.getSwiftRValueType()); allResults = optResult; break; } } } ResultPlanBuilder builder(*this, loc, allResults, rep, indirectResultAddrs); return builder.build(evalContext.getEmitInto(), origResultTypeForPlan, substResultTypeForPlan); }(); // If the function returns an inner pointer, we'll need to lifetime-extend // the 'self' parameter. SILValue lifetimeExtendedSelf; bool hasAlreadyLifetimeExtendedSelf = false; if (hasUnownedInnerPointerResult(substFnType)) { auto selfMV = args.back(); lifetimeExtendedSelf = selfMV.getValue(); switch (substFnType->getParameters().back().getConvention()) { case ParameterConvention::Direct_Owned: // If the callee will consume the 'self' parameter, let's retain it so we // can keep it alive. B.emitRetainValueOperation(loc, lifetimeExtendedSelf); break; case ParameterConvention::Direct_Guaranteed: case ParameterConvention::Direct_Unowned: // We'll manually manage the argument's lifetime after the // call. Disable its cleanup, forcing a copy if it was emitted +0. if (selfMV.hasCleanup()) { selfMV.forwardCleanup(*this); } else { lifetimeExtendedSelf = selfMV.copyUnmanaged(*this, loc).forward(*this); } break; // If self is already deallocating, self does not need to be retained or // released since the deallocating bit has been set. case ParameterConvention::Direct_Deallocating: break; case ParameterConvention::Indirect_In_Guaranteed: case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: // We may need to support this at some point, but currently only imported // objc methods are returns_inner_pointer. llvm_unreachable("indirect self argument to method that" " returns_inner_pointer?!"); } } // If there's a foreign error parameter, fill it in. Optional errorTempWriteback; ManagedValue errorTemp; if (foreignError) { // Error-temporary emission may need writeback. errorTempWriteback.emplace(*this); auto errorParamIndex = foreignError->getErrorParameterIndex(); auto errorParam = substFnType->getParameters()[errorParamIndex]; // This is pretty evil. auto &errorArgSlot = const_cast(args[errorParamIndex]); std::tie(errorTemp, errorArgSlot) = emitForeignErrorArgument(*this, loc, errorParam); } // Emit the raw application. SILValue rawDirectResult = emitRawApply(*this, loc, fn, subs, args, substFnType, options, indirectResultAddrs); // Explode the direct results. SmallVector directResults; auto addManagedDirectResult = [&](SILValue result, SILResultInfo resultInfo) { auto &resultTL = getTypeLowering(resultInfo.getSILType()); switch (resultInfo.getConvention()) { case ResultConvention::Indirect: llvm_unreachable("indirect direct result?"); // For owned results, the value is already retained. case ResultConvention::Owned: break; // For autoreleased results, the reclaim is implicit, so the value is // effectively +1. case ResultConvention::Autoreleased: break; // Autorelease the 'self' value to lifetime-extend it. case ResultConvention::UnownedInnerPointer: assert(lifetimeExtendedSelf && "did not save lifetime-extended self param"); if (!hasAlreadyLifetimeExtendedSelf) { B.createAutoreleaseValue(loc, lifetimeExtendedSelf); hasAlreadyLifetimeExtendedSelf = true; } SWIFT_FALLTHROUGH; case ResultConvention::Unowned: // Unretained. Retain the value. resultTL.emitRetainValue(B, loc, result); break; } directResults.push_back(emitManagedRValueWithCleanup(result, resultTL)); }; auto formalDirectResults = substFnType->getDirectResults(); if (formalDirectResults.empty()) { // Nothing to do. } else if (formalDirectResults.size() == 1) { addManagedDirectResult(rawDirectResult, formalDirectResults[0]); } else { for (auto i : indices(formalDirectResults)) { auto elt = B.createTupleExtract(loc, rawDirectResult, i, formalDirectResults[i].getSILType()); addManagedDirectResult(elt, formalDirectResults[i]); } } // If there was a foreign error convention, consider it. // TODO: maybe this should happen after managing the result if it's // not a result-checking convention? if (foreignError) { // Force immediate writeback to the error temporary. errorTempWriteback.reset(); bool doesNotThrow = (options & ApplyOptions::DoesNotThrow); emitForeignErrorCheck(loc, directResults, errorTemp, doesNotThrow, *foreignError); } auto directResultsArray = makeArrayRef(directResults); RValue result = resultPlan->finish(*this, loc, substResultType, directResultsArray); assert(directResultsArray.empty() && "didn't claim all direct results"); return result; } RValue SILGenFunction::emitMonomorphicApply(SILLocation loc, ManagedValue fn, ArrayRef args, CanType resultType, ApplyOptions options, Optional overrideRep, const Optional &foreignError){ auto fnType = fn.getType().castTo(); assert(!fnType->isPolymorphic()); return emitApply(loc, fn, {}, args, fnType, AbstractionPattern(resultType), resultType, options, overrideRep, foreignError, SGFContext()); } /// Count the number of SILParameterInfos that are needed in order to /// pass the given argument. static unsigned getFlattenedValueCount(AbstractionPattern origType, CanType substType) { // The count is always 1 unless the substituted type is a tuple. auto substTuple = dyn_cast(substType); if (!substTuple) return 1; // If the original type is opaque and the substituted type is // materializable, the count is 1 anyway. if (origType.isTypeParameter() && substTuple->isMaterializable()) return 1; // Otherwise, add up the elements. unsigned count = 0; for (auto i : indices(substTuple.getElementTypes())) { count += getFlattenedValueCount(origType.getTupleElementType(i), substTuple.getElementType(i)); } return count; } static AbstractionPattern claimNextParamClause(AbstractionPattern &type) { auto result = type.getFunctionInputType(); type = type.getFunctionResultType(); return result; } static CanType claimNextParamClause(CanAnyFunctionType &type) { auto result = type.getInput(); type = dyn_cast(type.getResult()); return result; } using InOutArgument = std::pair; /// Begin all the formal accesses for a set of inout arguments. static void beginInOutFormalAccesses(SILGenFunction &gen, MutableArrayRef inoutArgs, MutableArrayRef> args) { assert(!inoutArgs.empty()); SmallVector, 4> emittedInoutArgs; auto inoutNext = inoutArgs.begin(); // The assumption we make is that 'args' and 'inoutArgs' were built // up in parallel, with empty spots being dropped into 'args' // wherever there's an inout argument to insert. // // Note that this also begins the formal accesses in evaluation order. for (auto &siteArgs : args) { for (ManagedValue &siteArg : siteArgs) { if (siteArg) continue; LValue &inoutArg = inoutNext->first; SILLocation loc = inoutNext->second; ManagedValue address = gen.emitAddressOfLValue(loc, std::move(inoutArg), AccessKind::ReadWrite); siteArg = address; emittedInoutArgs.push_back({address.getValue(), loc}); if (++inoutNext == inoutArgs.end()) goto done; } } llvm_unreachable("ran out of null arguments before we ran out of inouts"); done: // Check to see if we have multiple inout arguments which obviously // alias. Note that we could do this in a later SILDiagnostics pass // as well: this would be stronger (more equivalences exposed) but // would have worse source location information. for (auto i = emittedInoutArgs.begin(), e = emittedInoutArgs.end(); i != e; ++i) { for (auto j = emittedInoutArgs.begin(); j != i; ++j) { // TODO: This uses exact SILValue equivalence to detect aliases, // we could do something stronger here to catch other obvious cases. if (i->first != j->first) continue; gen.SGM.diagnose(i->second, diag::inout_argument_alias) .highlight(i->second.getSourceRange()); gen.SGM.diagnose(j->second, diag::previous_inout_alias) .highlight(j->second.getSourceRange()); } } } /// Given a scalar value, materialize it into memory with the /// exact same level of cleanup it had before. static ManagedValue emitMaterializeIntoTemporary(SILGenFunction &gen, SILLocation loc, ManagedValue object) { auto temporary = gen.emitTemporaryAllocation(loc, object.getType()); bool hadCleanup = object.hasCleanup(); gen.B.createStore(loc, object.forward(gen), temporary); // The temporary memory is +0 if the value was. if (hadCleanup) { return ManagedValue(temporary, gen.enterDestroyCleanup(temporary)); } else { return ManagedValue::forUnmanaged(temporary); } } namespace { /// A destination for an argument other than just "onto to the end /// of the arguments lists". /// /// This allows us to re-use the argument expression emitter for /// some weird cases, like a shuffled tuple where some of the /// arguments are going into a varargs array. struct ArgSpecialDest { VarargsInfo *SharedInfo; unsigned Index; CleanupHandle Cleanup; ArgSpecialDest() : SharedInfo(nullptr) {} explicit ArgSpecialDest(VarargsInfo &info, unsigned index) : SharedInfo(&info), Index(index) {} // Reference semantics: need to preserve the cleanup handle. ArgSpecialDest(const ArgSpecialDest &) = delete; ArgSpecialDest &operator=(const ArgSpecialDest &) = delete; ArgSpecialDest(ArgSpecialDest &&other) : SharedInfo(other.SharedInfo), Index(other.Index), Cleanup(other.Cleanup) { other.SharedInfo = nullptr; } ArgSpecialDest &operator=(ArgSpecialDest &&other) { assert(!isValid() && "overwriting valid special destination!"); SharedInfo = other.SharedInfo; Index = other.Index; Cleanup = other.Cleanup; other.SharedInfo = nullptr; return *this; } ~ArgSpecialDest() { assert(!isValid() && "failed to deactivate special dest"); } /// Is this a valid special destination? /// /// Most of the time, most arguments don't have special /// destinations, and making an array of OptionalgetBaseAddress(); if (Index != 0) { SILValue index = gen.B.createIntegerLiteral(loc, SILType::getBuiltinWordType(gen.getASTContext()), Index); destAddr = gen.B.createIndexAddr(loc, destAddr, index); } assert(destAddr->getType() == loweredSubstParamType.getAddressType()); auto &destTL = SharedInfo->getBaseTypeLowering(); Cleanup = gen.enterDormantTemporaryCleanup(destAddr, destTL); TemporaryInitialization init(destAddr, Cleanup); std::move(arg).forwardInto(gen, SharedInfo->getBaseAbstractionPattern(), &init, destTL); } /// Deactivate this special destination. Must always be called /// before destruction. void deactivate(SILGenFunction &gen) { assert(isValid() && "deactivating an invalid destination"); if (Cleanup.isValid()) gen.Cleanups.forwardCleanup(Cleanup); SharedInfo = nullptr; } }; using ArgSpecialDestArray = MutableArrayRef; class ArgEmitter { SILGenFunction &SGF; SILFunctionTypeRepresentation Rep; const Optional &ForeignError; ArrayRef ParamInfos; SmallVectorImpl &Args; /// Track any inout arguments that are emitted. Each corresponds /// in order to a "hole" (a null value) in Args. SmallVectorImpl &InOutArguments; Optional SpecialDests; public: ArgEmitter(SILGenFunction &SGF, SILFunctionTypeRepresentation Rep, ArrayRef paramInfos, SmallVectorImpl &args, SmallVectorImpl &inoutArgs, const Optional &foreignError, Optional specialDests = None) : SGF(SGF), Rep(Rep), ForeignError(foreignError), ParamInfos(paramInfos), Args(args), InOutArguments(inoutArgs), SpecialDests(specialDests) { assert(!specialDests || specialDests->size() == paramInfos.size()); } void emitTopLevel(ArgumentSource &&arg, AbstractionPattern origParamType) { emit(std::move(arg), origParamType); maybeEmitForeignErrorArgument(); } private: void emit(ArgumentSource &&arg, AbstractionPattern origParamType) { // If it was a tuple in the original type, the parameters will // have been exploded. if (origParamType.isTuple()) { emitExpanded(std::move(arg), origParamType); return; } auto substArgType = arg.getSubstType(); // Otherwise, if the substituted type is a tuple, then we should // emit the tuple in its most general form, because there's a // substitution of an opaque archetype to a tuple or function // type in play. The most general convention is generally to // pass the entire tuple indirectly, but if it's not // materializable, the convention is actually to break it up // into materializable chunks. See the comment in SILType.cpp. if (isUnmaterializableTupleType(substArgType)) { assert(origParamType.isTypeParameter()); emitExpanded(std::move(arg), origParamType); return; } // Okay, everything else will be passed as a single value, one // way or another. // Adjust for the foreign-error argument if necessary. maybeEmitForeignErrorArgument(); // The substituted parameter type. Might be different from the // substituted argument type by abstraction and/or bridging. SILParameterInfo param = claimNextParameter(); ArgSpecialDest *specialDest = claimNextSpecialDest(); // Make sure we use the same value category for these so that we // can hereafter just use simple equality checks to test for // abstraction. SILType loweredSubstArgType = SGF.getLoweredType(substArgType); SILType loweredSubstParamType = SILType::getPrimitiveType(param.getType(), loweredSubstArgType.getCategory()); // If the caller takes the argument indirectly, the argument has an // inout type. if (param.isIndirectInOut()) { assert(!specialDest); assert(isa(substArgType)); emitInOut(std::move(arg), loweredSubstArgType, loweredSubstParamType, origParamType, substArgType); return; } // If the original type is passed indirectly, copy to memory if // it's not already there. (Note that this potentially includes // conventions which pass indirectly without transferring // ownership, like Itanium C++.) if (param.isIndirect()) { if (specialDest) { emitIndirectInto(std::move(arg), origParamType, loweredSubstParamType, *specialDest); Args.push_back(ManagedValue::forInContext()); } else { auto value = emitIndirect(std::move(arg), loweredSubstArgType, origParamType, param); Args.push_back(value); } return; } // Okay, if the original parameter is passed directly, then we // just need to handle abstraction differences and bridging. assert(!specialDest); emitDirect(std::move(arg), loweredSubstArgType, origParamType, param); } SILParameterInfo claimNextParameter() { assert(!ParamInfos.empty()); auto param = ParamInfos.front(); ParamInfos = ParamInfos.slice(1); return param; } /// Claim the next destination, returning a null pointer if there /// is no special destination. ArgSpecialDest *claimNextSpecialDest() { if (!SpecialDests) return nullptr; assert(!SpecialDests->empty()); auto dest = &SpecialDests->front(); SpecialDests = SpecialDests->slice(1); return (dest->isValid() ? dest : nullptr); } bool isUnmaterializableTupleType(CanType type) { if (auto tuple = dyn_cast(type)) if (!tuple->isMaterializable()) return true; return false; } /// Emit an argument as an expanded tuple. void emitExpanded(ArgumentSource &&arg, AbstractionPattern origParamType) { CanTupleType substArgType = cast(arg.getSubstType()); // The original type isn't necessarily a tuple. assert(origParamType.matchesTuple(substArgType)); assert(!arg.isLValue() && "argument is l-value but parameter is tuple?"); // If we're working with an r-value, just expand it out and emit // all the elements individually. if (arg.isRValue()) { auto loc = arg.getKnownRValueLocation(); SmallVector elts; std::move(arg).asKnownRValue().extractElements(elts); for (auto i : indices(substArgType.getElementTypes())) { emit({ loc, std::move(elts[i]) }, origParamType.getTupleElementType(i)); } return; } // Otherwise, we're working with an expression. Expr *e = std::move(arg).asKnownExpr(); e = e->getSemanticsProvidingExpr(); // If the source expression is a tuple literal, we can break it // up directly. if (auto tuple = dyn_cast(e)) { for (auto i : indices(tuple->getElements())) { emit(tuple->getElement(i), origParamType.getTupleElementType(i)); } return; } if (auto shuffle = dyn_cast(e)) { emitShuffle(shuffle, origParamType); return; } // Fall back to the r-value case. emitExpanded({ e, SGF.emitRValue(e) }, origParamType); } void emitShuffle(Expr *inner, Expr *outer, ArrayRef innerElts, ConcreteDeclRef defaultArgsOwner, ArrayRef callerDefaultArgs, ArrayRef elementMapping, ArrayRef variadicArgs, Type varargsArrayType, AbstractionPattern origParamType); void emitShuffle(TupleShuffleExpr *shuffle, AbstractionPattern origType); ManagedValue emitIndirect(ArgumentSource &&arg, SILType loweredSubstArgType, AbstractionPattern origParamType, SILParameterInfo param) { auto contexts = getRValueEmissionContexts(loweredSubstArgType, param); // If no abstraction is required, try to honor the emission contexts. if (loweredSubstArgType.getSwiftRValueType() == param.getType()) { auto loc = arg.getLocation(); ManagedValue result = std::move(arg).getAsSingleValue(SGF, contexts.ForEmission); // If it's already in memory, great. if (result.getType().isAddress()) { return result; // Otherwise, put it there. } else { return emitMaterializeIntoTemporary(SGF, loc, result); } } // Otherwise, simultaneously emit and reabstract. return std::move(arg).materialize(SGF, origParamType, param.getSILType()); } void emitIndirectInto(ArgumentSource &&arg, AbstractionPattern origType, SILType loweredSubstParamType, ArgSpecialDest &dest) { dest.fill(SGF, std::move(arg), origType, loweredSubstParamType); } void emitInOut(ArgumentSource &&arg, SILType loweredSubstArgType, SILType loweredSubstParamType, AbstractionPattern origType, CanType substType) { SILLocation loc = arg.getLocation(); LValue lv = [&]{ // If the argument is already lowered to an LValue, it must be the // receiver of a self argument, which will be the first inout. if (arg.isLValue()) { return std::move(arg).asKnownLValue(); // This is logically wrong, but propagating l-values within // RValues is hard to avoid in custom argument-emission code // without making ArgumentSource capable of holding mixed // RValue/LValue tuples. (materializeForSet has to do this, // for one.) The onus is on the caller to ensure that formal // access semantics are honored. } else if (arg.isRValue()) { auto address = std::move(arg).asKnownRValue() .getAsSingleValue(SGF, arg.getKnownRValueLocation()); assert(address.isLValue()); auto substObjectType = cast(substType).getObjectType(); return LValue::forAddress(address, AbstractionPattern(substObjectType), substObjectType); } else { auto *e = cast(std::move(arg).asKnownExpr()-> getSemanticsProvidingExpr()); return SGF.emitLValue(e->getSubExpr(), AccessKind::ReadWrite); } }(); if (hasAbstractionDifference(Rep, loweredSubstParamType, loweredSubstArgType)) { AbstractionPattern origObjectType = origType.transformType( [](CanType type)->CanType { return CanType(type->getInOutObjectType()); }); lv.addSubstToOrigComponent(origObjectType, loweredSubstParamType); } // Leave an empty space in the ManagedValue sequence and // remember that we had an inout argument. InOutArguments.push_back({std::move(lv), loc}); Args.push_back(ManagedValue()); return; } void emitDirect(ArgumentSource &&arg, SILType loweredSubstArgType, AbstractionPattern origParamType, SILParameterInfo param) { auto contexts = getRValueEmissionContexts(loweredSubstArgType, param); if (arg.isRValue()) { emitDirect(arg.getKnownRValueLocation(), std::move(arg).asKnownRValue(), origParamType, param, contexts.ForReabstraction); } else { Expr *e = std::move(arg).asKnownExpr(); emitDirect(e, SGF.emitRValue(e, contexts.ForEmission), origParamType, param, contexts.ForReabstraction); } } void emitDirect(SILLocation loc, RValue &&arg, AbstractionPattern origParamType, SILParameterInfo param, SGFContext ctxt) { auto value = std::move(arg).getScalarValue(); switch (getSILFunctionLanguage(Rep)) { case SILFunctionLanguage::Swift: value = SGF.emitSubstToOrigValue(loc, value, origParamType, arg.getType(), ctxt); break; case SILFunctionLanguage::C: value = SGF.emitNativeToBridgedValue(loc, value, Rep, param.getType()); break; } Args.push_back(value); } void maybeEmitForeignErrorArgument() { if (!ForeignError || ForeignError->getErrorParameterIndex() != Args.size()) return; SILParameterInfo param = claimNextParameter(); ArgSpecialDest *specialDest = claimNextSpecialDest(); assert(param.getConvention() == ParameterConvention::Direct_Unowned); assert(!specialDest && "special dest for error argument?"); (void) param; (void) specialDest; // Leave a placeholder in the position. Args.push_back(ManagedValue::forInContext()); } struct EmissionContexts { /// The context for emitting the r-value. SGFContext ForEmission; /// The context for reabstracting the r-value. SGFContext ForReabstraction; }; static EmissionContexts getRValueEmissionContexts(SILType loweredArgType, SILParameterInfo param) { // If the parameter is consumed, we have to emit at +1. if (param.isConsumed()) { return { SGFContext(), SGFContext() }; } // Otherwise, we can emit the final value at +0 (but only with a // guarantee that the value will survive). // // TODO: we can pass at +0 (immediate) to an unowned parameter // if we know that there will be no arbitrary side-effects // between now and the call. SGFContext finalContext = SGFContext::AllowGuaranteedPlusZero; // If the r-value doesn't require reabstraction, the final context // is the emission context. if (loweredArgType.getSwiftRValueType() == param.getType()) { return { finalContext, SGFContext() }; } // Otherwise, the final context is the reabstraction context. return { SGFContext(), finalContext }; } }; } void ArgEmitter::emitShuffle(Expr *inner, Expr *outer, ArrayRef innerElts, ConcreteDeclRef defaultArgsOwner, ArrayRef callerDefaultArgs, ArrayRef elementMapping, ArrayRef variadicArgs, Type varargsArrayType, AbstractionPattern origParamType) { auto outerTuple = cast(outer->getType()->getCanonicalType()); CanType canVarargsArrayType; if (varargsArrayType) canVarargsArrayType = varargsArrayType->getCanonicalType(); // We could support dest addrs here, but it can't actually happen // with the current limitations on default arguments in tuples. assert(!SpecialDests && "shuffle nested within varargs expansion?"); struct ElementExtent { /// The parameters which go into this tuple element. /// This is set in the first pass. ArrayRef Params; /// The destination index, if any. /// This is set in the first pass. unsigned DestIndex : 30; unsigned HasDestIndex : 1; #ifndef NDEBUG unsigned Used : 1; #endif /// The arguments which feed this tuple element. /// This is set in the second pass. ArrayRef Args; /// The inout arguments which feed this tuple element. /// This is set in the second pass. MutableArrayRef InOutArgs; ElementExtent() : HasDestIndex(false) #ifndef NDEBUG , Used(false) #endif {} }; // The original parameter type. SmallVector origInnerElts(innerElts.size(), AbstractionPattern::getInvalid()); AbstractionPattern innerOrigParamType = AbstractionPattern::getInvalid(); // Flattened inner parameter sequence. SmallVector innerParams; // Extents of the inner elements. SmallVector innerExtents(innerElts.size()); Optional varargsInfo; SILParameterInfo variadicParamInfo; // innerExtents will point at this Optional> innerSpecialDests; // First, construct an abstraction pattern and parameter sequence // which we can use to emit the inner tuple. { unsigned nextParamIndex = 0; for (unsigned outerIndex : indices(outerTuple.getElementTypes())) { CanType substEltType = outerTuple.getElementType(outerIndex); AbstractionPattern origEltType = origParamType.getTupleElementType(outerIndex); unsigned numParams = getFlattenedValueCount(origEltType, substEltType); // Skip the foreign-error parameter. assert((!ForeignError || ForeignError->getErrorParameterIndex() <= nextParamIndex || ForeignError->getErrorParameterIndex() >= nextParamIndex + numParams) && "error parameter falls within shuffled range?"); if (numParams && // Don't skip it twice if there's an empty tuple. ForeignError && ForeignError->getErrorParameterIndex() == nextParamIndex) { nextParamIndex++; } // Grab the parameter infos corresponding to this tuple element // (but don't drop them from ParamInfos yet). auto eltParams = ParamInfos.slice(nextParamIndex, numParams); nextParamIndex += numParams; int innerIndex = elementMapping[outerIndex]; if (innerIndex >= 0) { #ifndef NDEBUG assert(!innerExtents[innerIndex].Used && "using element twice"); innerExtents[innerIndex].Used = true; #endif innerExtents[innerIndex].Params = eltParams; origInnerElts[innerIndex] = origEltType; } else if (innerIndex == TupleShuffleExpr::Variadic) { auto &varargsField = outerTuple->getElement(outerIndex); assert(varargsField.isVararg()); assert(!varargsInfo.hasValue() && "already had varargs entry?"); CanType varargsEltType = CanType(varargsField.getVarargBaseTy()); unsigned numVarargs = variadicArgs.size(); assert(canVarargsArrayType == substEltType); // Create the array value. varargsInfo.emplace(emitBeginVarargs(SGF, outer, varargsEltType, canVarargsArrayType, numVarargs)); // If we have any varargs, we'll need to actually initialize // the array buffer. if (numVarargs) { // For this, we'll need special destinations. assert(!innerSpecialDests); innerSpecialDests.emplace(); // Prepare the variadic "arguments" as single +1 indirect // parameters with the array's desired abstraction pattern. // The vararg element type should be materializable, and the // abstraction pattern should be opaque, so ArgEmitter's // lowering should always generate exactly one "argument" // per element even if the substituted element type is a tuple. variadicParamInfo = SILParameterInfo(varargsInfo->getBaseTypeLowering() .getLoweredType().getSwiftRValueType(), ParameterConvention::Indirect_In); unsigned i = 0; for (unsigned innerIndex : variadicArgs) { // Find out where the next varargs element is coming from. assert(innerIndex >= 0 && "special source for varargs element??"); #ifndef NDEBUG assert(!innerExtents[innerIndex].Used && "using element twice"); innerExtents[innerIndex].Used = true; #endif // Set the destination index. innerExtents[innerIndex].HasDestIndex = true; innerExtents[innerIndex].DestIndex = i++; // Use the singleton param info we prepared before. innerExtents[innerIndex].Params = variadicParamInfo; // Propagate the element abstraction pattern. origInnerElts[innerIndex] = varargsInfo->getBaseAbstractionPattern(); } } } } // The inner abstraction pattern is opaque if we started with an // opaque pattern; otherwise, it's a tuple of the de-shuffled // tuple elements. innerOrigParamType = origParamType; if (!origParamType.isTypeParameter()) { // That "tuple" might not actually be a tuple. if (innerElts.size() == 1 && !innerElts[0].hasName()) { innerOrigParamType = origInnerElts[0]; } else { innerOrigParamType = AbstractionPattern::getTuple(origInnerElts); } } // Flatten the parameters from innerExtents into innerParams, and // fill out varargsAddrs if necessary. for (auto &extent : innerExtents) { assert(extent.Used && "didn't use all the inner tuple elements!"); innerParams.append(extent.Params.begin(), extent.Params.end()); // Fill in the special destinations array. if (innerSpecialDests) { // Use the saved index if applicable. if (extent.HasDestIndex) { assert(extent.Params.size() == 1); innerSpecialDests->push_back( ArgSpecialDest(*varargsInfo, extent.DestIndex)); // Otherwise, fill in with the appropriate number of invalid // special dests. } else { // ArgSpecialDest isn't copyable, so we can't just use append. for (auto &p : extent.Params) { (void) p; innerSpecialDests->push_back(ArgSpecialDest()); } } } } } // Emit the inner expression. SmallVector innerArgs; SmallVector innerInOutArgs; ArgEmitter(SGF, Rep, innerParams, innerArgs, innerInOutArgs, /*foreign error*/ None, (innerSpecialDests ? ArgSpecialDestArray(*innerSpecialDests) : Optional())) .emitTopLevel(ArgumentSource(inner), innerOrigParamType); // Make a second pass to split the inner arguments correctly. { ArrayRef nextArgs = innerArgs; MutableArrayRef nextInOutArgs = innerInOutArgs; for (auto &extent : innerExtents) { auto length = extent.Params.size(); // Claim the next N inner args for this inner argument. extent.Args = nextArgs.slice(0, length); nextArgs = nextArgs.slice(length); // Claim the correct number of inout arguments as well. unsigned numInOut = 0; for (auto arg : extent.Args) { assert(!arg.isInContext() || extent.HasDestIndex); if (!arg) numInOut++; } extent.InOutArgs = nextInOutArgs.slice(0, numInOut); nextInOutArgs = nextInOutArgs.slice(numInOut); } assert(nextArgs.empty() && "didn't claim all args"); assert(nextInOutArgs.empty() && "didn't claim all inout args"); } // Make a final pass to emit default arguments and move things into // the outer arguments lists. unsigned nextCallerDefaultArg = 0; for (unsigned outerIndex = 0, e = outerTuple->getNumElements(); outerIndex != e; ++outerIndex) { // If this comes from an inner element, move the appropriate // inner element values over. int innerIndex = elementMapping[outerIndex]; if (innerIndex >= 0) { auto &extent = innerExtents[innerIndex]; auto numArgs = extent.Args.size(); maybeEmitForeignErrorArgument(); // Drop N parameters off of ParamInfos. ParamInfos = ParamInfos.slice(numArgs); // Move the appropriate inner arguments over as outer arguments. Args.append(extent.Args.begin(), extent.Args.end()); for (auto &inoutArg : extent.InOutArgs) InOutArguments.push_back(std::move(inoutArg)); // If this is default initialization, call the default argument // generator. } else if (innerIndex == TupleShuffleExpr::DefaultInitialize) { // Otherwise, emit the default initializer, then map that as a // default argument. CanType eltType = outerTuple.getElementType(outerIndex); auto origType = origParamType.getTupleElementType(outerIndex); RValue value = SGF.emitApplyOfDefaultArgGenerator(outer, defaultArgsOwner, outerIndex, eltType, origType); emit(ArgumentSource(outer, std::move(value)), origType); // If this is caller default initialization, generate the // appropriate value. } else if (innerIndex == TupleShuffleExpr::CallerDefaultInitialize) { auto arg = callerDefaultArgs[nextCallerDefaultArg++]; emit(ArgumentSource(arg), origParamType.getTupleElementType(outerIndex)); // If we're supposed to create a varargs array with the rest, do so. } else if (innerIndex == TupleShuffleExpr::Variadic) { auto &varargsField = outerTuple->getElement(outerIndex); assert(varargsField.isVararg() && "Cannot initialize nonvariadic element"); assert(varargsInfo.hasValue()); (void) varargsField; // We've successfully built the varargs array; deactivate all // the special destinations. if (innerSpecialDests) { for (auto &dest : *innerSpecialDests) { if (dest.isValid()) dest.deactivate(SGF); } } CanType eltType = outerTuple.getElementType(outerIndex); ManagedValue varargs = emitEndVarargs(SGF, outer, std::move(*varargsInfo)); emit(ArgumentSource(outer, RValue(SGF, outer, eltType, varargs)), origParamType.getTupleElementType(outerIndex)); // That's the last special case defined so far. } else { llvm_unreachable("unexpected special case in tuple shuffle!"); } } } void ArgEmitter::emitShuffle(TupleShuffleExpr *E, AbstractionPattern origParamType) { ArrayRef srcElts; TupleTypeElt singletonSrcElt; if (E->isSourceScalar()) { singletonSrcElt = E->getSubExpr()->getType()->getCanonicalType(); srcElts = singletonSrcElt; } else { srcElts = cast(E->getSubExpr()->getType()->getCanonicalType()) ->getElements(); } emitShuffle(E->getSubExpr(), E, srcElts, E->getDefaultArgsOwner(), E->getCallerDefaultArgs(), E->getElementMapping(), E->getVariadicArgs(), E->getVarargsArrayTypeOrNull(), origParamType); } namespace { /// Cleanup to destroy an uninitialized box. class DeallocateUninitializedBox : public Cleanup { SILValue box; public: DeallocateUninitializedBox(SILValue box) : box(box) {} void emit(SILGenFunction &gen, CleanupLocation l) override { gen.B.createDeallocBox(l, box); } }; } // end anonymous namespace static CleanupHandle enterDeallocBoxCleanup(SILGenFunction &gen, SILValue box) { gen.Cleanups.pushCleanup(box); return gen.Cleanups.getTopCleanup(); } /// This is an initialization for a box. class BoxInitialization : public SingleBufferInitialization { SILValue box; SILValue addr; CleanupHandle uninitCleanup; CleanupHandle initCleanup; public: BoxInitialization(SILValue box, SILValue addr, CleanupHandle uninitCleanup, CleanupHandle initCleanup) : box(box), addr(addr), uninitCleanup(uninitCleanup), initCleanup(initCleanup) {} void finishInitialization(SILGenFunction &gen) override { SingleBufferInitialization::finishInitialization(gen); gen.Cleanups.setCleanupState(uninitCleanup, CleanupState::Dead); if (initCleanup.isValid()) gen.Cleanups.setCleanupState(initCleanup, CleanupState::Active); } SILValue getAddressOrNull() const override { return addr; } ManagedValue getManagedBox() const { return ManagedValue(box, initCleanup); } }; /// Emits SIL instructions to create an enum value. Attempts to avoid /// unnecessary copies by emitting the payload directly into the enum /// payload, or into the box in the case of an indirect payload. ManagedValue SILGenFunction::emitInjectEnum(SILLocation loc, ArgumentSource payload, SILType enumTy, EnumElementDecl *element, SGFContext C) { // Easy case -- no payload if (!payload) { if (enumTy.isLoadable(SGM.M)) { return emitManagedRValueWithCleanup( B.createEnum(loc, SILValue(), element, enumTy.getObjectType())); } // Emit the enum directly into the context if possible SILValue resultSlot = getBufferForExprResult(loc, enumTy, C); B.createInjectEnumAddr(loc, resultSlot, element); return manageBufferForExprResult(resultSlot, getTypeLowering(enumTy), C); } ManagedValue payloadMV; AbstractionPattern origFormalType = SGM.M.Types.getAbstractionPattern(element); auto &payloadTL = getTypeLowering(origFormalType, payload.getSubstType()); SILType loweredPayloadType = payloadTL.getLoweredType(); // If the payload is indirect, emit it into a heap allocated box. // // To avoid copies, evaluate it directly into the box, being // careful to stage the cleanups so that if the expression // throws, we know to deallocate the uninitialized box. if (element->isIndirect() || element->getParentEnum()->isIndirect()) { auto *box = B.createAllocBox(loc, payloadTL.getLoweredType()); auto *addr = B.createProjectBox(loc, box); CleanupHandle initCleanup = enterDestroyCleanup(box); Cleanups.setCleanupState(initCleanup, CleanupState::Dormant); CleanupHandle uninitCleanup = enterDeallocBoxCleanup(*this, box); BoxInitialization dest(box, addr, uninitCleanup, initCleanup); std::move(payload).forwardInto(*this, origFormalType, &dest, payloadTL); payloadMV = dest.getManagedBox(); loweredPayloadType = payloadMV.getType(); } // Loadable with payload if (enumTy.isLoadable(SGM.M)) { if (!payloadMV) { // If the payload was indirect, we already evaluated it and // have a single value. Otherwise, evaluate the payload. payloadMV = std::move(payload).getAsSingleValue(*this, origFormalType); } SILValue argValue = payloadMV.forward(*this); return emitManagedRValueWithCleanup( B.createEnum(loc, argValue, element, enumTy.getObjectType())); } // Address-only with payload SILValue resultSlot = getBufferForExprResult(loc, enumTy, C); SILValue resultData = B.createInitEnumDataAddr(loc, resultSlot, element, loweredPayloadType.getAddressType()); if (payloadMV) { // If the payload was indirect, we already evaluated it and // have a single value. Store it into the result. B.createStore(loc, payloadMV.forward(*this), resultData); } else if (payloadTL.isLoadable()) { // The payload of this specific enum case might be loadable // even if the overall enum is address-only. payloadMV = std::move(payload).getAsSingleValue(*this, origFormalType); B.createStore(loc, payloadMV.forward(*this), resultData); } else { // The payload is address-only. Evaluate it directly into // the enum. TemporaryInitialization dest(resultData, CleanupHandle::invalid()); std::move(payload).forwardInto(*this, origFormalType, &dest, payloadTL); } // The payload is initialized, now apply the tag. B.createInjectEnumAddr(loc, resultSlot, element); return manageBufferForExprResult(resultSlot, getTypeLowering(enumTy), C); } namespace { /// A structure for conveniently claiming sets of uncurried parameters. struct ParamLowering { ArrayRef Params; SILFunctionTypeRepresentation Rep; ParamLowering(CanSILFunctionType fnType) : Params(fnType->getParameters()), Rep(fnType->getRepresentation()) {} ArrayRef claimParams(AbstractionPattern origParamType, CanType substParamType, const Optional &foreignError) { unsigned count = getFlattenedValueCount(origParamType, substParamType); if (foreignError) count++; assert(count <= Params.size()); auto result = Params.slice(Params.size() - count, count); Params = Params.slice(0, Params.size() - count); return result; } ArrayRef claimCaptureParams(ArrayRef captures) { auto firstCapture = Params.size() - captures.size(); #ifndef NDEBUG assert(Params.size() >= captures.size() && "more captures than params?!"); for (unsigned i = 0; i < captures.size(); ++i) { assert(Params[i + firstCapture].getSILType() == captures[i].getType() && "capture doesn't match param type"); } #endif auto result = Params.slice(firstCapture, captures.size()); Params = Params.slice(0, firstCapture); return result; } ~ParamLowering() { assert(Params.empty() && "didn't consume all the parameters"); } }; /// An application of possibly unevaluated arguments in the form of an /// ArgumentSource to a Callee. class CallSite { public: SILLocation Loc; CanType SubstResultType; private: ArgumentSource ArgValue; bool Throws; public: CallSite(ApplyExpr *apply) : Loc(apply), SubstResultType(apply->getType()->getCanonicalType()), ArgValue(apply->getArg()), Throws(apply->throws()) { } CallSite(SILLocation loc, ArgumentSource &&value, CanType resultType, bool throws) : Loc(loc), SubstResultType(resultType), ArgValue(std::move(value)), Throws(throws) { } CallSite(SILLocation loc, ArgumentSource &&value, CanAnyFunctionType fnType) : CallSite(loc, std::move(value), fnType.getResult(), fnType->throws()) { } /// Return the substituted, unlowered AST type of the argument. CanType getSubstArgType() const { return ArgValue.getSubstType(); } /// Return the substituted, unlowered AST type of the result of /// this application. CanType getSubstResultType() const { return SubstResultType; } bool throws() const { return Throws; } /// Evaluate arguments and begin any inout formal accesses. void emit(SILGenFunction &gen, AbstractionPattern origParamType, ParamLowering &lowering, SmallVectorImpl &args, SmallVectorImpl &inoutArgs, const Optional &foreignError) && { auto params = lowering.claimParams(origParamType, getSubstArgType(), foreignError); ArgEmitter emitter(gen, lowering.Rep, params, args, inoutArgs, foreignError); emitter.emitTopLevel(std::move(ArgValue), origParamType); } /// Take the arguments for special processing, in place of the above. ArgumentSource &&forward() && { return std::move(ArgValue); } /// Returns true if the argument of this value is a single valued RValue /// that is passed either at plus zero or is trivial. bool isArgPlusZeroOrTrivialRValue() { if (!ArgValue.isRValue()) return false; return ArgValue.peekRValue().peekIsPlusZeroRValueOrTrivial(); } /// If callsite has an argument that is a plus zero or trivial rvalue, emit /// a retain so that the argument is at PlusOne. void convertToPlusOneFromPlusZero(SILGenFunction &gen) { assert(isArgPlusZeroOrTrivialRValue() && "Must have a plus zero or " "trivial rvalue as an argument."); SILValue ArgSILValue = ArgValue.peekRValue().peekScalarValue(); SILType ArgTy = ArgSILValue->getType(); // If we are trivial, there is no difference in between +1 and +0 since // a trivial object is not reference counted. if (ArgTy.isTrivial(gen.SGM.M)) return; // Grab the SILLocation and the new managed value. SILLocation ArgLoc = ArgValue.getKnownRValueLocation(); ManagedValue ArgManagedValue = gen.emitManagedRetain(ArgLoc, ArgSILValue); // Ok now we make our transformation. First set ArgValue to a used albeit // invalid, empty ArgumentSource. ArgValue = ArgumentSource(); // Reassign ArgValue. RValue NewRValue = RValue(gen, ArgLoc, ArgTy.getSwiftRValueType(), ArgManagedValue); ArgValue = ArgumentSource(ArgLoc, std::move(NewRValue)); } }; /// Once the Callee and CallSites have been prepared by SILGenApply, /// generate SIL for a fully-formed call. /// /// The lowered function type of the callee defines an abstraction pattern /// for evaluating argument values of tuple type directly into explosions of /// scalars where possible. /// /// If there are more call sites than the natural uncurry level, they are /// have to be applied recursively to each intermediate callee. /// /// Also inout formal access and parameter and result conventions are /// handled here, with some special logic required for calls with +0 self. class CallEmission { SILGenFunction &gen; std::vector uncurriedSites; std::vector extraSites; Callee callee; WritebackScope InitialWritebackScope; unsigned uncurries; bool applied; bool AssumedPlusZeroSelf; public: /// Create an emission for a call of the given callee. CallEmission(SILGenFunction &gen, Callee &&callee, WritebackScope &&writebackScope, bool assumedPlusZeroSelf = false) : gen(gen), callee(std::move(callee)), InitialWritebackScope(std::move(writebackScope)), uncurries(callee.getNaturalUncurryLevel() + 1), applied(false), AssumedPlusZeroSelf(assumedPlusZeroSelf) { // Subtract an uncurry level for captures, if any. // TODO: Encapsulate this better in Callee. if (this->callee.hasCaptures()) { assert(uncurries > 0 && "captures w/o uncurry level?"); --uncurries; } } /// Add a level of function application by passing in its possibly /// unevaluated arguments and their formal type. void addCallSite(CallSite &&site) { assert(!applied && "already applied!"); // Append to the main argument list if we have uncurry levels remaining. if (uncurries > 0) { --uncurries; uncurriedSites.push_back(std::move(site)); return; } // Otherwise, apply these arguments to the result of the previous call. extraSites.push_back(std::move(site)); } /// Add a level of function application by passing in its possibly /// unevaluated arguments and their formal type template void addCallSite(T &&...args) { addCallSite(CallSite{std::forward(args)...}); } /// If we assumed that self was being passed at +0 before we knew what the /// final uncurried level of the callee was, but given the final uncurried /// level of the callee, we are actually passing self at +1, add in a retain /// of self. void convertSelfToPlusOneFromPlusZero() { // Self is always the first callsite. if (!uncurriedSites[0].isArgPlusZeroOrTrivialRValue()) return; // Insert an invalid ArgumentSource into uncurriedSites[0] so it is. uncurriedSites[0].convertToPlusOneFromPlusZero(gen); } /// Is this a fully-applied enum element constructor call? bool isEnumElementConstructor() { return (callee.kind == Callee::Kind::EnumElement && uncurries == 0); } /// True if this is a completely unapplied super method call bool isPartiallyAppliedSuperMethod(unsigned uncurryLevel) { return (callee.kind == Callee::Kind::SuperMethod && uncurryLevel == 0); } /// Emit the fully-formed call. RValue apply(SGFContext C = SGFContext()) { assert(!applied && "already applied!"); applied = true; // Get the callee value at the needed uncurry level, uncurrying as // much as possible. If the number of calls is less than the natural // uncurry level, the callee emission might create a curry thunk. unsigned uncurryLevel = callee.getNaturalUncurryLevel() - uncurries; // Get either the specialized emitter for a known function, or the // function value for a normal callee. // Check for a specialized emitter. Optional specializedEmitter = callee.getSpecializedEmitter(gen.SGM, uncurryLevel); CanSILFunctionType substFnType; ManagedValue mv; Optional foreignError; ApplyOptions initialOptions = ApplyOptions::None; AbstractionPattern origFormalType(callee.getOrigFormalType()); CanAnyFunctionType formalType = callee.getSubstFormalType(); if (specializedEmitter || isPartiallyAppliedSuperMethod(uncurryLevel)) { // We want to emit the arguments as fully-substituted values // because that's what the specialized emitters expect. origFormalType = AbstractionPattern(formalType); substFnType = gen.getLoweredType(formalType, uncurryLevel) .castTo(); } else if (isEnumElementConstructor()) { // Enum payloads are always stored at the abstraction level // of the unsubstituted payload type. This means that unlike // with specialized emitters above, enum constructors use // the AST-level abstraction pattern, to ensure that function // types in payloads are re-abstracted correctly. assert(!AssumedPlusZeroSelf); substFnType = gen.getLoweredType(origFormalType, formalType, uncurryLevel) .castTo(); } else { std::tie(mv, substFnType, foreignError, initialOptions) = callee.getAtUncurryLevel(gen, uncurryLevel); } // Now that we know the substFnType, check if we assumed that we were // passing self at +0. If we did and self is not actually passed at +0, // retain Self. if (AssumedPlusZeroSelf) { // If the final emitted function does not have a self param or it does // have a self param that is consumed, convert what we think is self to // be plus zero. if (!substFnType->hasSelfParam() || substFnType->getSelfParameter().isConsumed()) { convertSelfToPlusOneFromPlusZero(); } } // Emit the first level of call. RValue result; // We use the context emit-into initialization only for the // outermost call. SGFContext uncurriedContext = (extraSites.empty() ? C : SGFContext()); // If we have an early emitter, just let it take over for the // uncurried call site. if (specializedEmitter && specializedEmitter->isEarlyEmitter()) { auto emitter = specializedEmitter->getEarlyEmitter(); assert(uncurriedSites.size() == 1); CanFunctionType formalApplyType = cast(formalType); assert(!formalApplyType->getExtInfo().throws()); CanType formalResultType = formalApplyType.getResult(); SILLocation uncurriedLoc = uncurriedSites[0].Loc; claimNextParamClause(origFormalType); claimNextParamClause(formalType); // We should be able to enforce that these arguments are // always still expressions. Expr *argument = std::move(uncurriedSites[0]).forward().asKnownExpr(); ManagedValue resultMV = emitter(gen, uncurriedLoc, callee.getSubstitutions(), argument, formalApplyType, uncurriedContext); result = RValue(gen, uncurriedLoc, formalResultType, resultMV); } else if (isEnumElementConstructor()) { // If we have a fully-applied enum element constructor, open-code // the construction. EnumElementDecl *element = callee.getEnumElementDecl(); SILLocation uncurriedLoc = uncurriedSites[0].Loc; CanType formalResultType = formalType.getResult(); // Ignore metatype argument claimNextParamClause(origFormalType); claimNextParamClause(formalType); std::move(uncurriedSites[0]).forward().getAsSingleValue(gen); // Get the payload argument. ArgumentSource payload; if (element->hasArgumentType()) { assert(uncurriedSites.size() == 2); formalResultType = formalType.getResult(); claimNextParamClause(origFormalType); claimNextParamClause(formalType); payload = std::move(uncurriedSites[1]).forward(); } else { assert(uncurriedSites.size() == 1); } assert(substFnType->getNumAllResults() == 1); ManagedValue resultMV = gen.emitInjectEnum(uncurriedLoc, std::move(payload), substFnType->getAllResults()[0].getSILType(), element, uncurriedContext); result = RValue(gen, uncurriedLoc, formalResultType, resultMV); // Otherwise, emit the uncurried arguments now and perform // the call. } else { // Emit the arguments. Optional uncurriedLoc; SmallVector, 2> args; SmallVector inoutArgs; CanFunctionType formalApplyType; args.reserve(uncurriedSites.size()); { ParamLowering paramLowering(substFnType); assert(!foreignError || uncurriedSites.size() == 1 || (uncurriedSites.size() == 2 && substFnType->hasSelfParam())); if (!uncurriedSites.back().throws()) { initialOptions |= ApplyOptions::DoesNotThrow; } // Collect the captures, if any. if (callee.hasCaptures()) { // The captures are represented as a placeholder curry level in the // formal type. // TODO: Remove this hack. paramLowering.claimCaptureParams(callee.getCaptures()); claimNextParamClause(origFormalType); claimNextParamClause(formalType); args.push_back({}); args.back().append(callee.getCaptures().begin(), callee.getCaptures().end()); } // Collect the arguments to the uncurried call. for (auto &site : uncurriedSites) { AbstractionPattern origParamType = claimNextParamClause(origFormalType); formalApplyType = cast(formalType); claimNextParamClause(formalType); uncurriedLoc = site.Loc; args.push_back({}); std::move(site).emit(gen, origParamType, paramLowering, args.back(), inoutArgs, &site == &uncurriedSites.back() ? foreignError : static_cast(None)); } } assert(uncurriedLoc); assert(formalApplyType); // Begin the formal accesses to any inout arguments we have. if (!inoutArgs.empty()) { beginInOutFormalAccesses(gen, inoutArgs, args); } // Uncurry the arguments in calling convention order. SmallVector uncurriedArgs; for (auto &argSet : reversed(args)) uncurriedArgs.append(argSet.begin(), argSet.end()); args = {}; // Emit the uncurried call. // Special case for superclass method calls. if (isPartiallyAppliedSuperMethod(uncurryLevel)) { assert(uncurriedArgs.size() == 1 && "Can only partially apply the self parameter of a super method call"); auto constant = callee.getMethodName(); auto loc = uncurriedLoc.getValue(); auto subs = callee.getSubstitutions(); auto upcastedSelf = uncurriedArgs.back(); auto self = cast(upcastedSelf.getValue())->getOperand(); auto constantInfo = gen.getConstantInfo(callee.getMethodName()); auto functionTy = constantInfo.getSILType(); SILValue superMethodVal = gen.B.createSuperMethod( loc, self, constant, functionTy, /*volatile*/ constant.isForeign); auto closureTy = SILGenBuilder::getPartialApplyResultType( constantInfo.getSILType(), 1, gen.B.getModule(), subs); auto &module = gen.getFunction().getModule(); auto partialApplyTy = functionTy; if (constantInfo.SILFnType->isPolymorphic() && !subs.empty()) partialApplyTy = partialApplyTy.substGenericArgs(module, subs); SILValue partialApply = gen.B.createPartialApply( loc, superMethodVal, partialApplyTy, subs, { upcastedSelf.forward(gen) }, closureTy); result = RValue(gen, loc, formalApplyType.getResult(), ManagedValue::forUnmanaged(partialApply)); // Handle a regular call. } else if (!specializedEmitter) { result = gen.emitApply(uncurriedLoc.getValue(), mv, callee.getSubstitutions(), uncurriedArgs, substFnType, origFormalType, uncurriedSites.back().getSubstResultType(), initialOptions, None, foreignError, uncurriedContext); // Handle a specialized emitter operating on evaluated arguments. } else if (specializedEmitter->isLateEmitter()) { auto emitter = specializedEmitter->getLateEmitter(); result = RValue(gen, *uncurriedLoc, formalApplyType.getResult(), emitter(gen, uncurriedLoc.getValue(), callee.getSubstitutions(), uncurriedArgs, formalApplyType, uncurriedContext)); // Builtins. } else { assert(specializedEmitter->isNamedBuiltin()); auto builtinName = specializedEmitter->getBuiltinName(); SmallVector consumedArgs; for (auto arg : uncurriedArgs) { consumedArgs.push_back(arg.forward(gen)); } auto resultVal = gen.B.createBuiltin(uncurriedLoc.getValue(), builtinName, substFnType->getSILResult(), callee.getSubstitutions(), consumedArgs); result = RValue(gen, *uncurriedLoc, formalApplyType.getResult(), gen.emitManagedRValueWithCleanup(resultVal)); } } // End the initial writeback scope. InitialWritebackScope.pop(); // If there are remaining call sites, apply them to the result function. // Each chained call gets its own writeback scope. for (unsigned i = 0, size = extraSites.size(); i < size; ++i) { WritebackScope writebackScope(gen); SILLocation loc = extraSites[i].Loc; auto functionMV = std::move(result).getAsSingleValue(gen, loc); auto substFnType = functionMV.getType().castTo(); ParamLowering paramLowering(substFnType); SmallVector siteArgs; SmallVector inoutArgs; // TODO: foreign errors for block or function pointer values? assert(substFnType->hasErrorResult() || !cast(formalType)->getExtInfo().throws()); foreignError = None; // The result function has already been reabstracted to the substituted // type, so use the substituted formal type as the abstraction pattern // for argument passing now. AbstractionPattern origResultType(formalType.getResult()); AbstractionPattern origParamType(claimNextParamClause(formalType)); std::move(extraSites[i]).emit(gen, origParamType, paramLowering, siteArgs, inoutArgs, foreignError); if (!inoutArgs.empty()) { beginInOutFormalAccesses(gen, inoutArgs, siteArgs); } SGFContext context = i == size - 1 ? C : SGFContext(); ApplyOptions options = ApplyOptions::None; result = gen.emitApply(loc, functionMV, {}, siteArgs, substFnType, origResultType, extraSites[i].getSubstResultType(), options, None, foreignError, context); } return result; } ~CallEmission() { assert(applied && "never applied!"); } // Movable, but not copyable. CallEmission(CallEmission &&e) : gen(e.gen), uncurriedSites(std::move(e.uncurriedSites)), extraSites(std::move(e.extraSites)), callee(std::move(e.callee)), InitialWritebackScope(std::move(e.InitialWritebackScope)), uncurries(e.uncurries), applied(e.applied) { e.applied = true; } private: CallEmission(const CallEmission &) = delete; CallEmission &operator=(const CallEmission &) = delete; }; } // end anonymous namespace static CallEmission prepareApplyExpr(SILGenFunction &gen, Expr *e) { // Set up writebacks for the call(s). WritebackScope writebacks(gen); SILGenApply apply(gen); // Decompose the call site. apply.decompose(e); // Evaluate and discard the side effect if present. if (apply.SideEffect) gen.emitRValue(apply.SideEffect); // Build the call. // Pass the writeback scope on to CallEmission so it can thread scopes through // nested calls. CallEmission emission(gen, apply.getCallee(), std::move(writebacks), apply.AssumedPlusZeroSelf); // Apply 'self' if provided. if (apply.SelfParam) emission.addCallSite(RegularLocation(e), std::move(apply.SelfParam), apply.SelfType->getCanonicalType(), /*throws*/ false); // Apply arguments from call sites, innermost to outermost. for (auto site = apply.CallSites.rbegin(), end = apply.CallSites.rend(); site != end; ++site) { emission.addCallSite(*site); } return emission; } RValue SILGenFunction::emitApplyExpr(Expr *e, SGFContext c) { return prepareApplyExpr(*this, e).apply(c); } RValue SILGenFunction::emitApplyOfLibraryIntrinsic(SILLocation loc, FuncDecl *fn, ArrayRef subs, ArrayRef args, SGFContext ctx) { auto origFormalType = cast(fn->getInterfaceType()->getCanonicalType()); auto substFormalType = origFormalType; if (!subs.empty()) { auto genericFnType = cast(substFormalType); auto applied = genericFnType->substGenericArgs(SGM.SwiftModule, subs); substFormalType = cast(applied->getCanonicalType()); } auto callee = Callee::forDirect(*this, SILDeclRef(fn), substFormalType, loc); callee.setSubstitutions(*this, loc, subs, 0); ManagedValue mv; CanSILFunctionType substFnType; Optional foreignError; ApplyOptions options; std::tie(mv, substFnType, foreignError, options) = callee.getAtUncurryLevel(*this, 0); assert(!foreignError); assert(substFnType->getExtInfo().getLanguage() == SILFunctionLanguage::Swift); return emitApply(loc, mv, subs, args, substFnType, AbstractionPattern(origFormalType).getFunctionResultType(), substFormalType.getResult(), options, None, None, ctx); } /// Allocate an uninitialized array of a given size, returning the array /// and a pointer to its uninitialized contents, which must be initialized /// before the array is valid. std::pair SILGenFunction::emitUninitializedArrayAllocation(Type ArrayTy, SILValue Length, SILLocation Loc) { auto &Ctx = getASTContext(); auto allocate = Ctx.getAllocateUninitializedArray(nullptr); auto arrayElementTy = ArrayTy->castTo() ->getGenericArgs()[0]; // Invoke the intrinsic, which returns a tuple. Substitution sub{arrayElementTy, {}}; auto result = emitApplyOfLibraryIntrinsic(Loc, allocate, sub, ManagedValue::forUnmanaged(Length), SGFContext()); // Explode the tuple. SmallVector resultElts; std::move(result).getAll(resultElts); return {resultElts[0], resultElts[1].getUnmanagedValue()}; } /// Deallocate an uninitialized array. void SILGenFunction::emitUninitializedArrayDeallocation(SILLocation loc, SILValue array) { auto &Ctx = getASTContext(); auto deallocate = Ctx.getDeallocateUninitializedArray(nullptr); CanType arrayElementTy = array->getType().castTo().getGenericArgs()[0]; // Invoke the intrinsic. Substitution sub{arrayElementTy, {}}; emitApplyOfLibraryIntrinsic(loc, deallocate, sub, ManagedValue::forUnmanaged(array), SGFContext()); } namespace { /// A cleanup that deallocates an uninitialized array. class DeallocateUninitializedArray: public Cleanup { SILValue Array; public: DeallocateUninitializedArray(SILValue array) : Array(array) {} void emit(SILGenFunction &gen, CleanupLocation l) override { gen.emitUninitializedArrayDeallocation(l, Array); } }; } CleanupHandle SILGenFunction::enterDeallocateUninitializedArrayCleanup(SILValue array) { Cleanups.pushCleanup(array); return Cleanups.getTopCleanup(); } static Callee getBaseAccessorFunctionRef(SILGenFunction &gen, SILLocation loc, SILDeclRef constant, ArgumentSource &selfValue, bool isSuper, bool isDirectUse, CanAnyFunctionType substAccessorType, ArrayRef &substitutions){ auto *decl = cast(constant.getDecl()); // If this is a method in a protocol, generate it as a protocol call. if (isa(decl->getDeclContext())) { assert(!isDirectUse && "direct use of protocol accessor?"); assert(!isSuper && "super call to protocol method?"); return prepareArchetypeCallee(gen, loc, constant, selfValue, substAccessorType, substitutions); } bool isClassDispatch = false; if (!isDirectUse) { switch (getMethodDispatch(decl)) { case MethodDispatch::Class: isClassDispatch = true; break; case MethodDispatch::Static: isClassDispatch = false; break; } } // Dispatch in a struct/enum or to a final method is always direct. if (!isClassDispatch || decl->isFinal()) return Callee::forDirect(gen, constant, substAccessorType, loc); // Otherwise, if we have a non-final class dispatch to a normal method, // perform a dynamic dispatch. auto self = selfValue.forceAndPeekRValue(gen).peekScalarValue(); if (!isSuper) return Callee::forClassMethod(gen, self, constant, substAccessorType, loc); // If this is a "super." dispatch, we do a dynamic dispatch for objc methods // or non-final native Swift methods. while (auto *upcast = dyn_cast(self)) self = upcast->getOperand(); if (constant.isForeign || !canUseStaticDispatch(gen, constant)) return Callee::forSuperMethod(gen, self, constant, substAccessorType,loc); return Callee::forDirect(gen, constant, substAccessorType, loc); } static Callee emitSpecializedAccessorFunctionRef(SILGenFunction &gen, SILLocation loc, SILDeclRef constant, ArrayRef substitutions, ArgumentSource &selfValue, bool isSuper, bool isDirectUse) { SILConstantInfo constantInfo = gen.getConstantInfo(constant); // Apply substitutions to the callee type. CanAnyFunctionType substAccessorType = constantInfo.FormalInterfaceType; if (!substitutions.empty()) { auto genericFn = cast(substAccessorType); auto substFn = genericFn->substGenericArgs(gen.SGM.SwiftModule, substitutions); substAccessorType = cast(substFn->getCanonicalType()); } // Get the accessor function. The type will be a polymorphic function if // the Self type is generic. Callee callee = getBaseAccessorFunctionRef(gen, loc, constant, selfValue, isSuper, isDirectUse, substAccessorType, substitutions); // Collect captures if the accessor has them. auto accessorFn = cast(constant.getDecl()); if (accessorFn->getCaptureInfo().hasLocalCaptures()) { assert(!selfValue && "local property has self param?!"); SmallVector captures; gen.emitCaptures(loc, accessorFn, CaptureEmission::ImmediateApplication, captures); callee.setCaptures(std::move(captures)); } // If there are substitutions, specialize the generic accessor. // FIXME: Generic subscript operator could add another layer of // substitutions. if (!substitutions.empty()) { callee.setSubstitutions(gen, loc, substitutions, 0); } return callee; } ArgumentSource SILGenFunction::prepareAccessorBaseArg(SILLocation loc, ManagedValue base, CanType baseFormalType, SILDeclRef accessor) { auto accessorType = SGM.Types.getConstantFunctionType(accessor); SILParameterInfo selfParam = accessorType->getParameters().back(); assert(!base.isInContext()); assert(!base.isLValue() || !base.hasCleanup()); SILType baseLoweredType = base.getType(); // If the base is a boxed existential, we will open it later. if (baseLoweredType.getPreferredExistentialRepresentation(SGM.M) == ExistentialRepresentation::Boxed) { assert(!baseLoweredType.isAddress() && "boxed existential should not be an address"); } else if (baseLoweredType.isAddress()) { // If the base is currently an address, we may have to copy it. auto needsLoad = [&] { switch (selfParam.getConvention()) { // If the accessor wants the value 'inout', always pass the // address we were given. This is semantically required. case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: return false; // If the accessor wants the value 'in', we have to copy if the // base isn't a temporary. We aren't allowed to pass aliased // memory to 'in', and we have pass at +1. case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: // TODO: We shouldn't be able to get an lvalue here, but the AST // sometimes produces an inout base for non-mutating accessors. // rdar://problem/19782170 // assert(!base.isLValue()); return base.isLValue() || base.isPlusZeroRValueOrTrivial(); // If the accessor wants the value directly, we definitely have to // load. TODO: don't load-and-retain if the value is passed at +0. case ParameterConvention::Direct_Owned: case ParameterConvention::Direct_Unowned: case ParameterConvention::Direct_Guaranteed: case ParameterConvention::Direct_Deallocating: return true; } llvm_unreachable("bad convention"); }; if (needsLoad()) { // The load can only be a take if the base is a +1 rvalue. auto shouldTake = IsTake_t(base.hasCleanup()); base = emitLoad(loc, base.forward(*this), getTypeLowering(baseLoweredType), SGFContext(), shouldTake); // Handle inout bases specially here. } else if (selfParam.isIndirectInOut()) { // It sometimes happens that we get r-value bases here, // e.g. when calling a mutating setter on a materialized // temporary. Just don't claim the value. if (!base.isLValue()) { base = ManagedValue::forLValue(base.getValue()); } // FIXME: this assumes that there's never meaningful // reabstraction of self arguments. return ArgumentSource(loc, LValue::forAddress(base, AbstractionPattern(baseFormalType), baseFormalType)); } // If the base is currently scalar, we may have to drop it in // memory or copy it. } else { assert(!base.isLValue()); // We need to produce the value at +1 if it's going to be consumed. if (selfParam.isConsumed() && !base.hasCleanup()) { base = base.copyUnmanaged(*this, loc); } // If the parameter is indirect, we need to drop the value into // temporary memory. if (selfParam.isIndirect()) { // It's usually a really bad idea to materialize when we're // about to pass a value to an inout argument, because it's a // really easy way to silently drop modifications (e.g. from a // mutating getter in a writeback pair). Our caller should // always take responsibility for that decision (by doing the // materialization itself). // // However, when the base is a reference type and the target is // a non-class protocol, this is innocuous. #ifndef NDEBUG auto isNonClassProtocolMember = [](Decl *d) { auto p = d->getDeclContext()->getAsProtocolOrProtocolExtensionContext(); return (p && !p->requiresClass()); }; #endif assert((!selfParam.isIndirectMutating() || (baseFormalType->isAnyClassReferenceType() && isNonClassProtocolMember(accessor.getDecl()))) && "passing unmaterialized r-value as inout argument"); base = emitMaterializeIntoTemporary(*this, loc, base); if (selfParam.isIndirectInOut()) { // Drop the cleanup if we have one. auto baseLV = ManagedValue::forLValue(base.getValue()); return ArgumentSource(loc, LValue::forAddress(baseLV, AbstractionPattern(baseFormalType), baseFormalType)); } } } return ArgumentSource(loc, RValue(*this, loc, baseFormalType, base)); } SILDeclRef SILGenFunction::getGetterDeclRef(AbstractStorageDecl *storage, bool isDirectUse) { return SILDeclRef(storage->getGetter(), SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, !isDirectUse && storage->requiresObjCGetterAndSetter()); } /// Emit a call to a getter. RValue SILGenFunction:: emitGetAccessor(SILLocation loc, SILDeclRef get, ArrayRef substitutions, ArgumentSource &&selfValue, bool isSuper, bool isDirectUse, RValue &&subscripts, SGFContext c) { // Scope any further writeback just within this operation. WritebackScope writebackScope(*this); Callee getter = emitSpecializedAccessorFunctionRef(*this, loc, get, substitutions, selfValue, isSuper, isDirectUse); bool hasCaptures = getter.hasCaptures(); bool hasSelf = (bool)selfValue; CanAnyFunctionType accessType = getter.getSubstFormalType(); CallEmission emission(*this, std::move(getter), std::move(writebackScope)); // Self -> if (hasSelf) { emission.addCallSite(loc, std::move(selfValue), accessType); } // TODO: Have Callee encapsulate the captures better. if (hasSelf || hasCaptures) { accessType = cast(accessType.getResult()); } // Index or () if none. if (!subscripts) subscripts = emitEmptyTupleRValue(loc, SGFContext()); emission.addCallSite(loc, ArgumentSource(loc, std::move(subscripts)), accessType); // T return emission.apply(c); } SILDeclRef SILGenFunction::getSetterDeclRef(AbstractStorageDecl *storage, bool isDirectUse) { return SILDeclRef(storage->getSetter(), SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, !isDirectUse && storage->requiresObjCGetterAndSetter()); } void SILGenFunction::emitSetAccessor(SILLocation loc, SILDeclRef set, ArrayRef substitutions, ArgumentSource &&selfValue, bool isSuper, bool isDirectUse, RValue &&subscripts, RValue &&setValue) { // Scope any further writeback just within this operation. WritebackScope writebackScope(*this); Callee setter = emitSpecializedAccessorFunctionRef(*this, loc, set, substitutions, selfValue, isSuper, isDirectUse); bool hasCaptures = setter.hasCaptures(); bool hasSelf = (bool)selfValue; CanAnyFunctionType accessType = setter.getSubstFormalType(); CallEmission emission(*this, std::move(setter), std::move(writebackScope)); // Self -> if (hasSelf) { emission.addCallSite(loc, std::move(selfValue), accessType); } // TODO: Have Callee encapsulate the captures better. if (hasSelf || hasCaptures) { accessType = cast(accessType.getResult()); } // (value) or (value, indices) if (subscripts) { // If we have a value and index list, create a new rvalue to represent the // both of them together. The value goes first. SmallVector Elts; std::move(setValue).getAll(Elts); std::move(subscripts).getAll(Elts); setValue = RValue(Elts, accessType.getInput()); } else { setValue.rewriteType(accessType.getInput()); } emission.addCallSite(loc, ArgumentSource(loc, std::move(setValue)), accessType); // () emission.apply(); } SILDeclRef SILGenFunction::getMaterializeForSetDeclRef(AbstractStorageDecl *storage, bool isDirectUse) { return SILDeclRef(storage->getMaterializeForSetFunc(), SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*foreign*/ false); } MaterializedLValue SILGenFunction:: emitMaterializeForSetAccessor(SILLocation loc, SILDeclRef materializeForSet, ArrayRef substitutions, ArgumentSource &&selfValue, bool isSuper, bool isDirectUse, RValue &&subscripts, SILValue buffer, SILValue callbackStorage) { // Scope any further writeback just within this operation. WritebackScope writebackScope(*this); assert(!materializeForSet.getDecl() ->getDeclContext()->getAsProtocolExtensionContext() && "direct use of materializeForSet from a protocol extension is" " probably a miscompile"); Callee callee = emitSpecializedAccessorFunctionRef(*this, loc, materializeForSet, substitutions, selfValue, isSuper, isDirectUse); bool hasCaptures = callee.hasCaptures(); bool hasSelf = (bool)selfValue; CanAnyFunctionType accessType = callee.getSubstFormalType(); CanAnyFunctionType origAccessType = callee.getOrigFormalType(); CallEmission emission(*this, std::move(callee), std::move(writebackScope)); // Self -> if (hasSelf) { emission.addCallSite(loc, std::move(selfValue), accessType); } // TODO: Have Callee encapsulate the captures better. if (hasSelf || hasCaptures) { accessType = cast(accessType.getResult()); } // (buffer, callbackStorage) or (buffer, callbackStorage, indices) -> // Note that this "RValue" stores a mixed LValue/RValue tuple. RValue args = [&] { SmallVector elts; auto bufferPtr = B.createAddressToPointer(loc, buffer, SILType::getRawPointerType(getASTContext())); elts.push_back(ManagedValue::forUnmanaged(bufferPtr)); elts.push_back(ManagedValue::forLValue(callbackStorage)); if (subscripts) { std::move(subscripts).getAll(elts); } return RValue(elts, accessType.getInput()); }(); emission.addCallSite(loc, ArgumentSource(loc, std::move(args)), accessType); // (buffer, optionalCallback) SmallVector results; emission.apply().getAll(results); // Project out the materialized address. SILValue address = results[0].getUnmanagedValue(); address = B.createPointerToAddress(loc, address, buffer->getType()); // Project out the optional callback. SILValue optionalCallback = results[1].getUnmanagedValue(); CanType origSelfType = origAccessType->getInput() ->getRValueInstanceType() ->getCanonicalType(); CanGenericSignature genericSig; if (auto genericFnType = dyn_cast(origAccessType)) genericSig = genericFnType.getGenericSignature(); return MaterializedLValue(ManagedValue::forUnmanaged(address), origSelfType, genericSig, optionalCallback, callbackStorage); } SILDeclRef SILGenFunction::getAddressorDeclRef(AbstractStorageDecl *storage, AccessKind accessKind, bool isDirectUse) { FuncDecl *addressorFunc = storage->getAddressorForAccess(accessKind); return SILDeclRef(addressorFunc, SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*foreign*/ false); } /// Emit a call to an addressor. /// /// The first return value is the address, which will always be an /// l-value managed value. The second return value is the owner /// pointer, if applicable. std::pair SILGenFunction:: emitAddressorAccessor(SILLocation loc, SILDeclRef addressor, ArrayRef substitutions, ArgumentSource &&selfValue, bool isSuper, bool isDirectUse, RValue &&subscripts, SILType addressType) { // Scope any further writeback just within this operation. WritebackScope writebackScope(*this); Callee callee = emitSpecializedAccessorFunctionRef(*this, loc, addressor, substitutions, selfValue, isSuper, isDirectUse); bool hasCaptures = callee.hasCaptures(); bool hasSelf = (bool)selfValue; CanAnyFunctionType accessType = callee.getSubstFormalType(); CallEmission emission(*this, std::move(callee), std::move(writebackScope)); // Self -> if (hasSelf) { emission.addCallSite(loc, std::move(selfValue), accessType); } // TODO: Have Callee encapsulate the captures better. if (hasSelf || hasCaptures) { accessType = cast(accessType.getResult()); } // Index or () if none. if (!subscripts) subscripts = emitEmptyTupleRValue(loc, SGFContext()); emission.addCallSite(loc, ArgumentSource(loc, std::move(subscripts)), accessType); // Unsafe{Mutable}Pointer or // (Unsafe{Mutable}Pointer, Builtin.UnknownPointer) or // (Unsafe{Mutable}Pointer, Builtin.NativePointer) or // (Unsafe{Mutable}Pointer, Builtin.NativePointer?) or SmallVector results; emission.apply().getAll(results); SILValue pointer; ManagedValue owner; switch (cast(addressor.getDecl())->getAddressorKind()) { case AddressorKind::NotAddressor: llvm_unreachable("not an addressor!"); case AddressorKind::Unsafe: assert(results.size() == 1); pointer = results[0].getUnmanagedValue(); owner = ManagedValue(); break; case AddressorKind::Owning: case AddressorKind::NativeOwning: case AddressorKind::NativePinning: assert(results.size() == 2); pointer = results[0].getUnmanagedValue(); owner = results[1]; break; } // Drill down to the raw pointer using intrinsic knowledge of those types. auto pointerType = pointer->getType().castTo()->getDecl(); auto props = pointerType->getStoredProperties(); assert(props.begin() != props.end()); assert(std::next(props.begin()) == props.end()); VarDecl *rawPointerField = *props.begin(); pointer = B.createStructExtract(loc, pointer, rawPointerField, SILType::getRawPointerType(getASTContext())); // Convert to the appropriate address type and return. SILValue address = B.createPointerToAddress(loc, pointer, addressType); // Mark dependence as necessary. switch (cast(addressor.getDecl())->getAddressorKind()) { case AddressorKind::NotAddressor: llvm_unreachable("not an addressor!"); case AddressorKind::Unsafe: // TODO: we should probably mark dependence on the base. break; case AddressorKind::Owning: case AddressorKind::NativeOwning: case AddressorKind::NativePinning: address = B.createMarkDependence(loc, address, owner.getValue()); break; } return { ManagedValue::forLValue(address), owner }; } RValue SILGenFunction::emitApplyConversionFunction(SILLocation loc, Expr *funcExpr, Type resultType, RValue &&operand) { // Walk the function expression, which should produce a reference to the // callee, leaving the final curry level unapplied. CallEmission emission = prepareApplyExpr(*this, funcExpr); // Rewrite the operand type to the expected argument type, to handle tuple // conversions etc. auto funcTy = cast(funcExpr->getType()->getCanonicalType()); operand.rewriteType(funcTy.getInput()); // Add the operand as the final callsite. emission.addCallSite(loc, ArgumentSource(loc, std::move(operand)), resultType->getCanonicalType(), funcTy->throws()); return emission.apply(); } // Create a partial application of a dynamic method, applying bridging thunks // if necessary. static SILValue emitDynamicPartialApply(SILGenFunction &gen, SILLocation loc, SILValue method, SILValue self, CanFunctionType methodTy) { // Pop the self type off of the function type. // Just to be weird, partially applying an objc method produces a native // function (?!) auto fnTy = method->getType().castTo(); // If the original method has an @unowned_inner_pointer return, the partial // application thunk will lifetime-extend 'self' for us, converting the // return value to @unowned. // // If the original method has an @autoreleased return, the partial application // thunk will retain it for us, converting the return value to @owned. SmallVector results; results.append(fnTy->getAllResults().begin(), fnTy->getAllResults().end()); for (auto &result : results) { if (result.getConvention() == ResultConvention::UnownedInnerPointer) result = SILResultInfo(result.getType(), ResultConvention::Unowned); else if (result.getConvention() == ResultConvention::Autoreleased) result = SILResultInfo(result.getType(), ResultConvention::Owned); } auto partialApplyTy = SILFunctionType::get(fnTy->getGenericSignature(), fnTy->getExtInfo() .withRepresentation(SILFunctionType::Representation::Thick), ParameterConvention::Direct_Owned, fnTy->getParameters() .slice(0, fnTy->getParameters().size() - 1), results, fnTy->getOptionalErrorResult(), gen.getASTContext()); // Retain 'self' because the partial apply will take ownership. // We can't simply forward 'self' because the partial apply is conditional. #if 0 auto CMV = ConsumableManagedValue(ManagedValue::forUnmanaged(self), CastConsumptionKind::CopyOnSuccess); self = gen.getManagedValue(loc, CMV).forward(gen); #else if (!self->getType().isAddress()) gen.B.emitRetainValueOperation(loc, self); #endif SILValue result = gen.B.createPartialApply(loc, method, method->getType(), {}, self, SILType::getPrimitiveObjectType(partialApplyTy)); // If necessary, thunk to the native ownership conventions and bridged types. auto nativeTy = gen.getLoweredLoadableType(methodTy).castTo(); if (nativeTy != partialApplyTy) { result = gen.emitBlockToFunc(loc, ManagedValue::forUnmanaged(result), nativeTy).forward(gen); } return result; } RValue SILGenFunction::emitDynamicMemberRefExpr(DynamicMemberRefExpr *e, SGFContext c) { // Emit the operand. ManagedValue base = emitRValueAsSingleValue(e->getBase()); SILValue operand = base.getValue(); if (!e->getMember().getDecl()->isInstanceMember()) { auto metatype = operand->getType().castTo(); assert(metatype->getRepresentation() == MetatypeRepresentation::Thick); metatype = CanMetatypeType::get(metatype.getInstanceType(), MetatypeRepresentation::ObjC); operand = B.createThickToObjCMetatype(e, operand, SILType::getPrimitiveObjectType(metatype)); } // Create the continuation block. SILBasicBlock *contBB = createBasicBlock(); // Create the no-member block. SILBasicBlock *noMemberBB = createBasicBlock(); // Create the has-member block. SILBasicBlock *hasMemberBB = createBasicBlock(); // The continuation block const TypeLowering &optTL = getTypeLowering(e->getType()); auto loweredOptTy = optTL.getLoweredType(); SILValue optTemp = emitTemporaryAllocation(e, loweredOptTy); // Create the branch. FuncDecl *memberFunc; if (auto *VD = dyn_cast(e->getMember().getDecl())) memberFunc = VD->getGetter(); else memberFunc = cast(e->getMember().getDecl()); SILDeclRef member(memberFunc, SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isObjC=*/true); B.createDynamicMethodBranch(e, operand, member, hasMemberBB, noMemberBB); // Create the has-member branch. { B.emitBlock(hasMemberBB); FullExpr hasMemberScope(Cleanups, CleanupLocation(e)); // The argument to the has-member block is the uncurried method. auto valueTy = e->getType()->getCanonicalType().getAnyOptionalObjectType(); auto methodTy = valueTy; // For a computed variable, we want the getter. if (isa(e->getMember().getDecl())) methodTy = CanFunctionType::get(TupleType::getEmpty(getASTContext()), methodTy); auto dynamicMethodTy = getDynamicMethodLoweredType(*this, operand, member); auto loweredMethodTy = SILType::getPrimitiveObjectType(dynamicMethodTy); SILValue memberArg = new (F.getModule()) SILArgument(hasMemberBB, loweredMethodTy); // Create the result value. SILValue result = emitDynamicPartialApply(*this, e, memberArg, operand, cast(methodTy)); if (isa(e->getMember().getDecl())) { result = B.createApply(e, result, result->getType(), getLoweredType(valueTy), {}, {}); } // Package up the result in an optional. RValue resultRV = RValue(*this, e, valueTy, emitManagedRValueWithCleanup(result)); emitInjectOptionalValueInto(e, {e, std::move(resultRV)}, optTemp, optTL); // Branch to the continuation block. B.createBranch(e, contBB); } // Create the no-member branch. { B.emitBlock(noMemberBB); emitInjectOptionalNothingInto(e, optTemp, optTL); // Branch to the continuation block. B.createBranch(e, contBB); } // Emit the continuation block. B.emitBlock(contBB); // Package up the result. auto optResult = B.createLoad(e, optTemp); return RValue(*this, e, emitManagedRValueWithCleanup(optResult, optTL)); } RValue SILGenFunction::emitDynamicSubscriptExpr(DynamicSubscriptExpr *e, SGFContext c) { // Emit the base operand. ManagedValue managedBase = emitRValueAsSingleValue(e->getBase()); SILValue base = managedBase.getValue(); // Emit the index. RValue index = emitRValue(e->getIndex()); // Create the continuation block. SILBasicBlock *contBB = createBasicBlock(); // Create the no-member block. SILBasicBlock *noMemberBB = createBasicBlock(); // Create the has-member block. SILBasicBlock *hasMemberBB = createBasicBlock(); const TypeLowering &optTL = getTypeLowering(e->getType()); auto loweredOptTy = optTL.getLoweredType(); SILValue optTemp = emitTemporaryAllocation(e, loweredOptTy); // Create the branch. auto subscriptDecl = cast(e->getMember().getDecl()); SILDeclRef member(subscriptDecl->getGetter(), SILDeclRef::Kind::Func, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isObjC=*/true); B.createDynamicMethodBranch(e, base, member, hasMemberBB, noMemberBB); // Create the has-member branch. { B.emitBlock(hasMemberBB); FullExpr hasMemberScope(Cleanups, CleanupLocation(e)); // The argument to the has-member block is the uncurried method. auto valueTy = e->getType()->getCanonicalType().getAnyOptionalObjectType(); auto methodTy = subscriptDecl->getGetter()->getType()->castTo() ->getResult()->getCanonicalType(); auto dynamicMethodTy = getDynamicMethodLoweredType(*this, base, member); auto loweredMethodTy = SILType::getPrimitiveObjectType(dynamicMethodTy); SILValue memberArg = new (F.getModule()) SILArgument(hasMemberBB, loweredMethodTy); // Emit the application of 'self'. SILValue result = emitDynamicPartialApply(*this, e, memberArg, base, cast(methodTy)); // Emit the index. llvm::SmallVector indexArgs; std::move(index).forwardAll(*this, indexArgs); auto &valueTL = getTypeLowering(valueTy); result = B.createApply(e, result, result->getType(), valueTL.getLoweredType(), {}, indexArgs); // Package up the result in an optional. RValue resultRV = RValue(*this, e, valueTy, emitManagedRValueWithCleanup(result, valueTL)); emitInjectOptionalValueInto(e, {e, std::move(resultRV)}, optTemp, optTL); // Branch to the continuation block. B.createBranch(e, contBB); } // Create the no-member branch. { B.emitBlock(noMemberBB); emitInjectOptionalNothingInto(e, optTemp, optTL); // Branch to the continuation block. B.createBranch(e, contBB); } // Emit the continuation block. B.emitBlock(contBB); // Package up the result. auto optValue = B.createLoad(e, optTemp); return RValue(*this, e, emitManagedRValueWithCleanup(optValue, optTL)); }