//===--- GenMeta.cpp - IR generation for metadata constructs --------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file implements IR generation for type metadata constructs. // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "type-metadata-layout" #include "swift/ABI/MetadataValues.h" #include "swift/ABI/TypeIdentity.h" #include "swift/AST/ASTContext.h" #include "swift/AST/ASTMangler.h" #include "swift/AST/Attr.h" #include "swift/AST/Decl.h" #include "swift/AST/DiagnosticsIRGen.h" #include "swift/AST/GenericEnvironment.h" #include "swift/AST/IRGenOptions.h" #include "swift/AST/PrettyStackTrace.h" #include "swift/AST/SubstitutionMap.h" #include "swift/AST/Types.h" #include "swift/Basic/Assertions.h" #include "swift/Basic/Mangler.h" #include "swift/ClangImporter/ClangModule.h" #include "swift/IRGen/Linking.h" #include "swift/Parse/Lexer.h" #include "swift/SIL/FormalLinkage.h" #include "swift/SIL/SILModule.h" #include "swift/SIL/TypeLowering.h" #include "swift/Strings.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/SmallString.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Module.h" #include "Address.h" #include "Callee.h" #include "ClassLayout.h" #include "ClassMetadataVisitor.h" #include "ClassTypeInfo.h" #include "ConstantBuilder.h" #include "EnumMetadataVisitor.h" #include "ExtendedExistential.h" #include "Field.h" #include "FixedTypeInfo.h" #include "ForeignClassMetadataVisitor.h" #include "GenArchetype.h" #include "GenClass.h" #include "GenDecl.h" #include "GenPointerAuth.h" #include "GenPoly.h" #include "GenStruct.h" #include "GenValueWitness.h" #include "GenericArguments.h" #include "HeapTypeInfo.h" #include "IRGenDebugInfo.h" #include "IRGenMangler.h" #include "IRGenModule.h" #include "MetadataLayout.h" #include "MetadataRequest.h" #include "ProtocolInfo.h" #include "ScalarTypeInfo.h" #include "StructLayout.h" #include "StructMetadataVisitor.h" #include "GenMeta.h" using namespace swift; using namespace irgen; static Address emitAddressOfMetadataSlotAtIndex(IRGenFunction &IGF, llvm::Value *metadata, int index, llvm::Type *objectTy) { // Require the metadata to be some type that we recognize as a // metadata pointer. assert(metadata->getType() == IGF.IGM.TypeMetadataPtrTy); return IGF.emitAddressAtOffset(metadata, Offset(index * IGF.IGM.getPointerSize()), objectTy, IGF.IGM.getPointerAlignment()); } /// Emit a load from the given metadata at a constant index. static llvm::LoadInst *emitLoadFromMetadataAtIndex(IRGenFunction &IGF, llvm::Value *metadata, llvm::Value **slotPtr, int index, llvm::Type *objectTy, const llvm::Twine &suffix = "") { Address slot = emitAddressOfMetadataSlotAtIndex(IGF, metadata, index, objectTy); if (slotPtr) *slotPtr = slot.getAddress(); // Load. return IGF.Builder.CreateLoad(slot, metadata->getName() + suffix); } static Address createPointerSizedGEP(IRGenFunction &IGF, Address base, Size offset) { return IGF.Builder.CreateConstArrayGEP(base, IGF.IGM.getOffsetInWords(offset), offset); } void IRGenModule::setTrueConstGlobal(llvm::GlobalVariable *var) { disableAddressSanitizer(*this, var); switch (TargetInfo.OutputObjectFormat) { case llvm::Triple::DXContainer: case llvm::Triple::GOFF: case llvm::Triple::SPIRV: case llvm::Triple::UnknownObjectFormat: llvm_unreachable("unknown object format"); case llvm::Triple::MachO: var->setSection("__TEXT,__const"); break; case llvm::Triple::ELF: case llvm::Triple::Wasm: var->setSection(".rodata"); break; case llvm::Triple::XCOFF: case llvm::Triple::COFF: var->setSection(".rdata"); break; } } /*****************************************************************************/ /** Metadata completion ******************************************************/ /*****************************************************************************/ /// Does the metadata for the given type, which we are currently emitting, /// require singleton metadata initialization structures and functions? static bool needsSingletonMetadataInitialization(IRGenModule &IGM, NominalTypeDecl *typeDecl) { // Generic types never have singleton metadata initialization. if (typeDecl->isGenericContext()) return false; // Non-generic classes use singleton initialization if they have anything // non-trivial about their metadata. if (auto *classDecl = dyn_cast(typeDecl)) { switch (IGM.getClassMetadataStrategy(classDecl)) { case ClassMetadataStrategy::Resilient: case ClassMetadataStrategy::Singleton: case ClassMetadataStrategy::Update: case ClassMetadataStrategy::FixedOrUpdate: return true; case ClassMetadataStrategy::Fixed: return false; } } assert(isa(typeDecl) || isa(typeDecl)); // If the type is known to be fixed-layout, we can emit its metadata such // that it doesn't need dynamic initialization. auto &ti = IGM.getTypeInfoForUnlowered(typeDecl->getDeclaredTypeInContext()); if (ti.isFixedSize(ResilienceExpansion::Maximal)) return false; return true; } using MetadataCompletionBodyEmitter = void (IRGenFunction &IGF, llvm::Value *metadata, MetadataDependencyCollector *collector); static void emitMetadataCompletionFunction(IRGenModule &IGM, NominalTypeDecl *typeDecl, llvm::function_ref body) { llvm::Function *f = IGM.getAddrOfTypeMetadataCompletionFunction(typeDecl, ForDefinition); f->setAttributes(IGM.constructInitialAttributes()); f->setDoesNotThrow(); IGM.setHasNoFramePointer(f); IGM.setColocateMetadataSection(f); IRGenFunction IGF(IGM, f); // Skip instrumentation when building for TSan to avoid false positives. // The synchronization for this happens in the Runtime and we do not see it. if (IGM.IRGen.Opts.Sanitizers & SanitizerKind::Thread) f->removeFnAttr(llvm::Attribute::SanitizeThread); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(IGF, f); Explosion params = IGF.collectParameters(); llvm::Value *metadata = params.claimNext(); llvm::Value *context = params.claimNext(); llvm::Value *templatePointer = params.claimNext(); // TODO: use these? (void) context; (void) templatePointer; MetadataDependencyCollector collector; body(IGF, metadata, &collector); // At the current insertion point, the metadata is now complete. // Merge with any metadata dependencies we may have collected. auto dependency = collector.finish(IGF); auto returnValue = dependency.combine(IGF); IGF.Builder.CreateRet(returnValue); } static bool needsForeignMetadataCompletionFunction(IRGenModule &IGM, StructDecl *decl) { // Currently, foreign structs never need a completion function. return false; } static bool needsForeignMetadataCompletionFunction(IRGenModule &IGM, EnumDecl *decl) { // Currently, foreign enums never need a completion function. return false; } static bool needsForeignMetadataCompletionFunction(IRGenModule &IGM, ClassDecl *decl) { return IGM.getOptions().LazyInitializeClassMetadata || decl->hasSuperclass(); } /*****************************************************************************/ /** Nominal Type Descriptor Emission *****************************************/ /*****************************************************************************/ template static Flags getMethodDescriptorFlags(ValueDecl *fn) { if (isa(fn)) { auto flags = Flags(Flags::Kind::Init); // 'init' is considered static if (auto *afd = dyn_cast(fn)) flags = flags.withIsAsync(afd->hasAsync()); return flags; } auto kindAndIsCalleeAllocatedCoroutine = [&]() -> std::pair { auto accessor = dyn_cast(fn); if (!accessor) return {Flags::Kind::Method, false}; switch (accessor->getAccessorKind()) { case AccessorKind::Get: return {Flags::Kind::Getter, false}; case AccessorKind::Set: return {Flags::Kind::Setter, false}; case AccessorKind::Read: return {Flags::Kind::ReadCoroutine, false}; case AccessorKind::Read2: return {Flags::Kind::ReadCoroutine, true}; case AccessorKind::Modify: return {Flags::Kind::ModifyCoroutine, false}; case AccessorKind::Modify2: return {Flags::Kind::ModifyCoroutine, true}; case AccessorKind::DistributedGet: return {Flags::Kind::Getter, false}; #define OPAQUE_ACCESSOR(ID, KEYWORD) #define ACCESSOR(ID, KEYWORD) case AccessorKind::ID: #include "swift/AST/AccessorKinds.def" llvm_unreachable("these accessors never appear in protocols or v-tables"); } llvm_unreachable("bad kind"); }(); auto kind = kindAndIsCalleeAllocatedCoroutine.first; // Because no async old-ABI accessor coroutines exist or presumably ever will // (if async coroutines accessors are added, they will presumably be new-ABI), // the pairs {Flags::Kind::ReadCoroutine, isAsync} and // {Flags::Kind::ModifyCoroutine, isAsync} can't mean "async old-ABI // accessor coroutine". As such, we repurpose that pair to mean "new-ABI // accessor coroutine". This has the important virtue of resulting in ptrauth // authing/signing coro function pointers as data on old OSes where the bit // means "async" and where adding new accessor kinds requires a back // deployment library. bool hasAsync = kindAndIsCalleeAllocatedCoroutine.second; if (auto *afd = dyn_cast(fn)) hasAsync |= afd->hasAsync(); return Flags(kind).withIsInstance(!fn->isStatic()).withIsAsync(hasAsync); } static void buildMethodDescriptorFields(IRGenModule &IGM, const SILVTable *VTable, SILDeclRef fn, ConstantStructBuilder &descriptor, ClassDecl *classDecl) { auto *func = cast(fn.getDecl()); // Classify the method. using Flags = MethodDescriptorFlags; auto flags = getMethodDescriptorFlags(func); // Remember if the declaration was dynamic. if (func->shouldUseObjCDispatch()) flags = flags.withIsDynamic(true); auto *accessor = dyn_cast(func); // Include the pointer-auth discriminator. if (auto &schema = func->hasAsync() ? IGM.getOptions().PointerAuth.AsyncSwiftClassMethods : accessor && requiresFeatureCoroutineAccessors(accessor->getAccessorKind()) ? IGM.getOptions().PointerAuth.CoroSwiftClassMethods : IGM.getOptions().PointerAuth.SwiftClassMethods) { auto discriminator = PointerAuthInfo::getOtherDiscriminator(IGM, schema, fn); flags = flags.withExtraDiscriminator(discriminator->getZExtValue()); } // TODO: final? open? descriptor.addInt(IGM.Int32Ty, flags.getIntValue()); if (auto entry = VTable->getEntry(IGM.getSILModule(), fn)) { assert(entry->getKind() == SILVTable::Entry::Kind::Normal); auto *impl = entry->getImplementation(); if (impl->isAsync()) { llvm::Constant *implFn = IGM.getAddrOfAsyncFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else if (impl->getLoweredFunctionType()->isCalleeAllocatedCoroutine()) { llvm::Constant *implFn = IGM.getAddrOfCoroFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else { llvm::Function *implFn = IGM.getAddrOfSILFunction(impl, NotForDefinition); if (IGM.getOptions().UseProfilingMarkerThunks && classDecl->getSelfNominalTypeDecl()->isGenericContext() && !impl->getLoweredFunctionType()->isCoroutine()) { implFn = IGM.getAddrOfVTableProfilingThunk(implFn, classDecl); } descriptor.addCompactFunctionReference(implFn); } } else { // The method is removed by dead method elimination. // It should be never called. We add a pointer to an error function. descriptor.addRelativeAddressOrNull(nullptr); } } void IRGenModule::emitNonoverriddenMethodDescriptor(const SILVTable *VTable, SILDeclRef declRef, ClassDecl *classDecl) { auto entity = LinkEntity::forMethodDescriptor(declRef); auto *var = cast( getAddrOfLLVMVariable(entity, ConstantInit(), DebugTypeInfo())); if (!var->isDeclaration()) { assert(IRGen.isLazilyReemittingNominalTypeDescriptor(VTable->getClass())); return; } var->setConstant(true); setTrueConstGlobal(var); ConstantInitBuilder ib(*this); ConstantStructBuilder sb(ib.beginStruct(MethodDescriptorStructTy)); buildMethodDescriptorFields(*this, VTable, declRef, sb, classDecl); auto init = sb.finishAndCreateFuture(); getAddrOfLLVMVariable(entity, init, DebugTypeInfo()); } void IRGenModule::setVCallVisibility(llvm::GlobalVariable *var, llvm::GlobalObject::VCallVisibility vis, std::pair range) { // Insert attachment of !vcall_visibility !{ vis, range.first, range.second } var->addMetadata( llvm::LLVMContext::MD_vcall_visibility, *llvm::MDNode::get(getLLVMContext(), { llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(Int64Ty, vis)), llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(Int64Ty, range.first)), llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(Int64Ty, range.second)), })); // Insert attachment of !typed_global_not_for_cfi !{} var->addMetadata("typed_global_not_for_cfi", *llvm::MDNode::get(getLLVMContext(), {})); } void IRGenModule::addVTableTypeMetadata( ClassDecl *decl, llvm::GlobalVariable *var, SmallVector, 8> vtableEntries) { if (vtableEntries.empty()) return; uint64_t minOffset = UINT64_MAX; uint64_t maxOffset = 0; for (auto ventry : vtableEntries) { auto method = ventry.second; auto offset = ventry.first.getValue(); var->addTypeMetadata(offset, typeIdForMethod(*this, method)); minOffset = std::min(minOffset, offset); maxOffset = std::max(maxOffset, offset); } using VCallVisibility = llvm::GlobalObject::VCallVisibility; VCallVisibility vis = VCallVisibility::VCallVisibilityPublic; auto AS = decl->getFormalAccessScope(); if (decl->isObjC()) { // Swift methods are called from Objective-C via objc_MsgSend // and thus such call sites are not taken into consideration // by VFE in GlobalDCE. We cannot for the timebeing at least // safely eliminate a virtual function that might be called from // Objective-C. Setting vcall_visibility to public ensures this is // prevented. vis = VCallVisibility::VCallVisibilityPublic; } else if (AS.isFileScope()) { vis = VCallVisibility::VCallVisibilityTranslationUnit; } else if (AS.isPrivate() || AS.isInternal()) { vis = VCallVisibility::VCallVisibilityLinkageUnit; } else if (getOptions().InternalizeAtLink) { vis = VCallVisibility::VCallVisibilityLinkageUnit; } auto relptrSize = DataLayout.getTypeAllocSize(Int32Ty).getKnownMinValue(); setVCallVisibility(var, vis, std::make_pair(minOffset, maxOffset + relptrSize)); } static void addPaddingAfterGenericParamDescriptors(IRGenModule &IGM, ConstantStructBuilder &b, unsigned numDescriptors) { unsigned padding = (unsigned) -numDescriptors & 3; for (unsigned i = 0; i < padding; ++i) b.addInt(IGM.Int8Ty, 0); } namespace { struct GenericSignatureHeaderBuilder { using PlaceholderPosition = ConstantAggregateBuilderBase::PlaceholderPosition; PlaceholderPosition NumParamsPP; PlaceholderPosition NumRequirementsPP; PlaceholderPosition NumGenericKeyArgumentsPP; PlaceholderPosition FlagsPP; unsigned NumParams = 0; unsigned NumRequirements = 0; unsigned NumGenericKeyArguments = 0; SmallVector ShapeClasses; SmallVector GenericPackArguments; InvertibleProtocolSet ConditionalInvertedProtocols; SmallVector GenericValueArguments; GenericSignatureHeaderBuilder(IRGenModule &IGM, ConstantStructBuilder &builder) : NumParamsPP(builder.addPlaceholderWithSize(IGM.Int16Ty)), NumRequirementsPP(builder.addPlaceholderWithSize(IGM.Int16Ty)), NumGenericKeyArgumentsPP(builder.addPlaceholderWithSize(IGM.Int16Ty)), FlagsPP(builder.addPlaceholderWithSize(IGM.Int16Ty)) {} void add(const GenericArgumentMetadata &info) { ShapeClasses.append(info.ShapeClasses.begin(), info.ShapeClasses.end()); NumParams += info.NumParams; NumRequirements += info.NumRequirements; for (auto pack : info.GenericPackArguments) { // Compute the final index. pack.Index += NumGenericKeyArguments + ShapeClasses.size(); GenericPackArguments.push_back(pack); } NumGenericKeyArguments += info.NumGenericKeyArguments; for (auto value : info.GenericValueArguments) { GenericValueArguments.push_back(value); } } void finish(IRGenModule &IGM, ConstantStructBuilder &b) { assert(GenericPackArguments.empty() == ShapeClasses.empty() && "Can't have one without the other"); assert(NumParams <= UINT16_MAX && "way too generic"); b.fillPlaceholderWithInt(NumParamsPP, IGM.Int16Ty, NumParams); assert(NumRequirements <= UINT16_MAX && "way too generic"); b.fillPlaceholderWithInt(NumRequirementsPP, IGM.Int16Ty, NumRequirements); assert(NumGenericKeyArguments <= UINT16_MAX && "way too generic"); b.fillPlaceholderWithInt(NumGenericKeyArgumentsPP, IGM.Int16Ty, NumGenericKeyArguments + ShapeClasses.size()); bool hasTypePacks = !GenericPackArguments.empty(); bool hasConditionalInvertedProtocols = !ConditionalInvertedProtocols.empty(); bool hasValues = !GenericValueArguments.empty(); GenericContextDescriptorFlags flags( hasTypePacks, hasConditionalInvertedProtocols, hasValues); b.fillPlaceholderWithInt(FlagsPP, IGM.Int16Ty, flags.getIntValue()); } }; template class ContextDescriptorBuilderBase { protected: Impl &asImpl() { return *static_cast(this); } IRGenModule &IGM; private: ConstantInitBuilder InitBuilder; protected: ConstantStructBuilder B; std::optional SignatureHeader; ContextDescriptorBuilderBase(IRGenModule &IGM) : IGM(IGM), InitBuilder(IGM), B(InitBuilder.beginStruct()) { B.setPacked(true); } public: void layout() { asImpl().addFlags(); asImpl().addParent(); } void addFlags() { B.addInt32( ContextDescriptorFlags(asImpl().getContextKind(), !asImpl().getGenericSignature().isNull(), asImpl().isUniqueDescriptor(), !asImpl().getInvertedProtocols().empty(), asImpl().getKindSpecificFlags()) .getIntValue()); } void addParent() { ConstantReference parent = asImpl().getParent(); if (parent.getValue()) { B.addRelativeAddress(parent); } else { B.addInt32(0); // null offset } } void addGenericSignature() { if (!asImpl().getGenericSignature()) return; asImpl().addGenericParametersHeader(); asImpl().addGenericParameters(); asImpl().addGenericRequirements(); asImpl().addGenericPackShapeDescriptors(); asImpl().addConditionalInvertedProtocols(); asImpl().addGenericValueDescriptors(); asImpl().finishGenericParameters(); } void addGenericParametersHeader() { // Drop placeholders for the counts. We'll fill these in when we emit // the related sections. SignatureHeader.emplace(IGM, B); } void addGenericParameters() { GenericSignature sig = asImpl().getGenericSignature(); auto metadata = irgen::addGenericParameters(IGM, B, sig, /*implicit=*/false); assert(metadata.NumParams == metadata.NumParamsEmitted && "We can't use implicit GenericParamDescriptors here"); SignatureHeader->add(metadata); // Pad the structure up to four bytes for the following requirements. addPaddingAfterGenericParamDescriptors(IGM, B, SignatureHeader->NumParams); } void addGenericRequirements() { auto metadata = irgen::addGenericRequirements(IGM, B, asImpl().getGenericSignature()); SignatureHeader->add(metadata); } /// Adds the set of suppressed protocols, which must be explicitly called /// by the concrete subclasses. void addInvertedProtocols() { auto protocols = asImpl().getInvertedProtocols(); if (protocols.empty()) return; B.addInt(IGM.Int16Ty, protocols.rawBits()); } InvertibleProtocolSet getConditionalInvertedProtocols() { return InvertibleProtocolSet(); } void addConditionalInvertedProtocols() { assert(asImpl().getConditionalInvertedProtocols().empty() && "Subclass must implement this operation"); } void finishGenericParameters() { SignatureHeader->finish(IGM, B); } void addGenericPackShapeDescriptors() { const auto &shapes = SignatureHeader->ShapeClasses; const auto &packArgs = SignatureHeader->GenericPackArguments; assert(shapes.empty() == packArgs.empty() && "Can't have one without the other"); // If we don't have any pack arguments, there is nothing to emit. if (packArgs.empty()) return; // Emit the GenericPackShapeHeader first. // NumPacks B.addInt(IGM.Int16Ty, packArgs.size()); // NumShapes B.addInt(IGM.Int16Ty, shapes.size()); // Emit each GenericPackShapeDescriptor collected previously. irgen::addGenericPackShapeDescriptors(IGM, B, shapes, packArgs); } void addGenericValueDescriptors() { auto values = SignatureHeader->GenericValueArguments; // If we don't have any value arguments, there is nothing to emit. if (values.empty()) return; // NumValues B.addInt(IGM.Int32Ty, values.size()); // Emit each GenericValueDescriptor collected previously. irgen::addGenericValueDescriptors(IGM, B, values); } /// Retrieve the set of protocols that are suppressed in this context. InvertibleProtocolSet getInvertedProtocols() { return InvertibleProtocolSet(); } uint16_t getKindSpecificFlags() { return 0; } // Subclasses should provide: // // bool isUniqueDescriptor(); // llvm::Constant *getParent(); // ContextDescriptorKind getContextKind(); // GenericSignature getGenericSignature(); // void emit(); }; class ModuleContextDescriptorBuilder : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; ModuleDecl *M; public: ModuleContextDescriptorBuilder(IRGenModule &IGM, ModuleDecl *M) : super(IGM), M(M) {} void layout() { super::layout(); addName(); } void addName() { B.addRelativeAddress(IGM.getAddrOfGlobalIdentifierString( M->getABIName().str(), /*willBeRelativelyAddressed*/ true)); } bool isUniqueDescriptor() { return false; } ConstantReference getParent() { return {nullptr, ConstantReference::Direct}; } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Module; } GenericSignature getGenericSignature() { return nullptr; } void emit() { asImpl().layout(); auto addr = IGM.getAddrOfModuleContextDescriptor(M, B.finishAndCreateFuture()); auto var = cast(addr); var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); } }; class ExtensionContextDescriptorBuilder : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; ExtensionDecl *E; public: ExtensionContextDescriptorBuilder(IRGenModule &IGM, ExtensionDecl *E) : super(IGM), E(E) {} void layout() { super::layout(); addExtendedContext(); addGenericSignature(); } void addExtendedContext() { auto string = IGM.getTypeRef(E->getSelfInterfaceType(), E->getGenericSignature(), MangledTypeRefRole::Metadata).first; B.addRelativeAddress(string); } ConstantReference getParent() { return {IGM.getAddrOfModuleContextDescriptor(E->getParentModule()), ConstantReference::Direct}; } bool isUniqueDescriptor() { // Extensions generated by the Clang importer will be emitted into any // binary that uses the Clang module. Otherwise, we can guarantee that // an extension (and any of its possible sub-contexts) belong to one // translation unit. return !isa(E->getModuleScopeContext()); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Extension; } GenericSignature getGenericSignature() { return E->getGenericSignature(); } void emit() { asImpl().layout(); auto addr = IGM.getAddrOfExtensionContextDescriptor(E, B.finishAndCreateFuture()); auto var = cast(addr); var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); } }; class AnonymousContextDescriptorBuilder : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; PointerUnion Name; DeclContext *getInnermostDeclContext() { if (auto DC = Name.dyn_cast()) { return DC; } if (auto VD = Name.dyn_cast()) { return VD->getInnermostDeclContext(); } llvm_unreachable("unknown name kind"); } public: AnonymousContextDescriptorBuilder(IRGenModule &IGM, PointerUnion Name) : super(IGM), Name(Name) { } void layout() { super::layout(); asImpl().addGenericSignature(); asImpl().addMangledName(); } ConstantReference getParent() { return IGM.getAddrOfParentContextDescriptor( getInnermostDeclContext(), /*fromAnonymousContext=*/true); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Anonymous; } GenericSignature getGenericSignature() { return getInnermostDeclContext()->getGenericSignatureOfContext(); } bool isUniqueDescriptor() { return true; } uint16_t getKindSpecificFlags() { AnonymousContextDescriptorFlags flags{}; flags.setHasMangledName( IGM.IRGen.Opts.EnableAnonymousContextMangledNames); return flags.getOpaqueValue(); } void addMangledName() { if (!IGM.IRGen.Opts.EnableAnonymousContextMangledNames) return; IRGenMangler mangler(IGM.Context); auto mangledName = mangler.mangleAnonymousDescriptorName(Name); auto mangledNameConstant = IGM.getAddrOfGlobalString(mangledName, CStringSectionType::Default, /*willBeRelativelyAddressed*/ true); B.addRelativeAddress(mangledNameConstant); } void emit() { asImpl().layout(); auto addr = IGM.getAddrOfAnonymousContextDescriptor(Name, B.finishAndCreateFuture()); auto var = cast(addr); var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); } }; class ProtocolDescriptorBuilder : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; ProtocolDecl *Proto; SILDefaultWitnessTable *DefaultWitnesses; std::optional NumRequirementsInSignature, NumRequirements; bool Resilient; public: ProtocolDescriptorBuilder(IRGenModule &IGM, ProtocolDecl *Proto, SILDefaultWitnessTable *defaultWitnesses) : super(IGM), Proto(Proto), DefaultWitnesses(defaultWitnesses), Resilient(IGM.getSwiftModule()->isResilient()) {} void layout() { super::layout(); } ConstantReference getParent() { return IGM.getAddrOfParentContextDescriptor( Proto, /*fromAnonymousContext=*/false); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Protocol; } GenericSignature getGenericSignature() { return nullptr; } bool isUniqueDescriptor() { return true; } uint16_t getKindSpecificFlags() { ProtocolContextDescriptorFlags flags; flags.setClassConstraint(Proto->requiresClass() ? ProtocolClassConstraint::Class : ProtocolClassConstraint::Any); flags.setSpecialProtocol(getSpecialProtocolID(Proto)); flags.setIsResilient(DefaultWitnesses != nullptr); return flags.getOpaqueValue(); } void emit() { asImpl().layout(); asImpl().addName(); NumRequirementsInSignature = B.addPlaceholderWithSize(IGM.Int32Ty); NumRequirements = B.addPlaceholderWithSize(IGM.Int32Ty); asImpl().addAssociatedTypeNames(); asImpl().addRequirementSignature(); asImpl().addRequirements(); auto addr = IGM.getAddrOfProtocolDescriptor(Proto, B.finishAndCreateFuture()); auto var = cast(addr); var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); } void addName() { auto nameStr = IGM.getAddrOfGlobalIdentifierString(Proto->getName().str(), /*willBeRelativelyAddressed*/ true); B.addRelativeAddress(nameStr); } void addRequirementSignature() { SmallVector requirements; SmallVector inverses; Proto->getRequirementSignature().getRequirementsWithInverses( Proto, requirements, inverses); auto metadata = irgen::addGenericRequirements(IGM, B, Proto->getGenericSignature(), requirements, inverses); B.fillPlaceholderWithInt(*NumRequirementsInSignature, IGM.Int32Ty, metadata.NumRequirements); } struct RequirementInfo { ProtocolRequirementFlags Flags; llvm::Constant *DefaultImpl; }; /// Build the information which will go into a ProtocolRequirement entry. RequirementInfo getRequirementInfo(const WitnessTableEntry &entry) { using Flags = ProtocolRequirementFlags; if (entry.isBase()) { assert(entry.isOutOfLineBase()); auto flags = Flags(Flags::Kind::BaseProtocol); return { flags, nullptr }; } if (entry.isAssociatedType()) { auto flags = Flags(Flags::Kind::AssociatedTypeAccessFunction); if (auto &schema = IGM.getOptions().PointerAuth .ProtocolAssociatedTypeAccessFunctions) { addDiscriminator(flags, schema, entry.getAssociatedType()); } // Look for a default witness. llvm::Constant *defaultImpl = findDefaultTypeWitness(entry.getAssociatedType()); return { flags, defaultImpl }; } if (entry.isAssociatedConformance()) { auto flags = Flags(Flags::Kind::AssociatedConformanceAccessFunction); if (auto &schema = IGM.getOptions().PointerAuth .ProtocolAssociatedTypeWitnessTableAccessFunctions) { addDiscriminator(flags, schema, AssociatedConformance(Proto, entry.getAssociatedConformancePath(), entry.getAssociatedConformanceRequirement())); } // Look for a default witness. llvm::Constant *defaultImpl = findDefaultAssociatedConformanceWitness( entry.getAssociatedConformancePath(), entry.getAssociatedConformanceRequirement()); return { flags, defaultImpl }; } assert(entry.isFunction()); SILDeclRef func(entry.getFunction()); // Emit the dispatch thunk. auto shouldEmitDispatchThunk = Resilient || IGM.getOptions().WitnessMethodElimination; if (shouldEmitDispatchThunk) { IGM.emitDispatchThunk(func); } { auto *requirement = cast(func.getDecl()); if (requirement->isDistributedThunk()) { // when thunk, because in protocol we want access of for the thunk IGM.emitDistributedTargetAccessor(requirement); } } // Classify the function. auto flags = getMethodDescriptorFlags(func.getDecl()); if (auto &schema = IGM.getOptions().PointerAuth.ProtocolWitnesses) { SILDeclRef declRef(func.getDecl(), isa(func.getDecl()) ? SILDeclRef::Kind::Allocator : SILDeclRef::Kind::Func); if (entry.getFunction().isAutoDiffDerivativeFunction()) declRef = declRef.asAutoDiffDerivativeFunction( entry.getFunction().getAutoDiffDerivativeFunctionIdentifier()); if (entry.getFunction().isDistributedThunk()) { flags = flags.withIsAsync(true); declRef = declRef.asDistributed(); } addDiscriminator(flags, schema, declRef); } // Look for a default witness. llvm::Constant *defaultImpl = findDefaultWitness(func); return { flags, defaultImpl }; } void addDiscriminator(ProtocolRequirementFlags &flags, const PointerAuthSchema &schema, const PointerAuthEntity &entity) { assert(schema); auto discriminator = PointerAuthInfo::getOtherDiscriminator(IGM, schema, entity); flags = flags.withExtraDiscriminator(discriminator->getZExtValue()); } void addRequirements() { auto &pi = IGM.getProtocolInfo(Proto, ProtocolInfoKind::Full); B.fillPlaceholderWithInt(*NumRequirements, IGM.Int32Ty, pi.getNumWitnesses()); if (pi.getNumWitnesses() > 0) { // Define the protocol requirements "base" descriptor, which references // the beginning of the protocol requirements, offset so that // subtracting this address from the address of a given protocol // requirements gives the corresponding offset into the witness // table. auto address = B.getAddrOfCurrentPosition(IGM.ProtocolRequirementStructTy); int offset = WitnessTableFirstRequirementOffset; auto firstReqAdjustment = llvm::ConstantInt::get(IGM.Int32Ty, -offset); address = llvm::ConstantExpr::getGetElementPtr( IGM.ProtocolRequirementStructTy, address, firstReqAdjustment); IGM.defineProtocolRequirementsBaseDescriptor(Proto, address); } for (auto &entry : pi.getWitnessEntries()) { if (Resilient) { if (entry.isFunction()) { // Define the method descriptor. SILDeclRef func(entry.getFunction()); auto *descriptor = B.getAddrOfCurrentPosition(IGM.ProtocolRequirementStructTy); IGM.defineMethodDescriptor(func, Proto, descriptor, IGM.ProtocolRequirementStructTy); } } if (entry.isAssociatedType()) { auto assocType = entry.getAssociatedType(); // Define the associated type descriptor to point to the current // position in the protocol descriptor. IGM.defineAssociatedTypeDescriptor( assocType, B.getAddrOfCurrentPosition(IGM.ProtocolRequirementStructTy)); } if (entry.isAssociatedConformance()) { // Define the associated conformance descriptor to point to the // current position in the protocol descriptor. AssociatedConformance conformance( Proto, entry.getAssociatedConformancePath(), entry.getAssociatedConformanceRequirement()); IGM.defineAssociatedConformanceDescriptor( conformance, B.getAddrOfCurrentPosition(IGM.ProtocolRequirementStructTy)); } if (entry.isBase()) { // Define a base conformance descriptor, which is just an associated // conformance descriptor for a base protocol. BaseConformance conformance(Proto, entry.getBase()); IGM.defineBaseConformanceDescriptor( conformance, B.getAddrOfCurrentPosition(IGM.ProtocolRequirementStructTy)); } auto reqt = B.beginStruct(IGM.ProtocolRequirementStructTy); auto info = getRequirementInfo(entry); // Flags. reqt.addInt32(info.Flags.getIntValue()); // Default implementation. if (info.DefaultImpl) { if (auto *fn = llvm::dyn_cast(info.DefaultImpl)) { reqt.addCompactFunctionReference(fn); } else { reqt.addRelativeAddress(info.DefaultImpl); } } else { reqt.addRelativeAddressOrNull(nullptr); } reqt.finishAndAddTo(B); } } llvm::Constant *findDefaultWitness(SILDeclRef func) { if (!DefaultWitnesses) return nullptr; for (auto &entry : DefaultWitnesses->getEntries()) { if (!entry.isValid() || entry.getKind() != SILWitnessTable::Method || entry.getMethodWitness().Requirement != func) continue; auto silFunc = entry.getMethodWitness().Witness; if (silFunc->isAsync()) { return IGM.getAddrOfAsyncFunctionPointer(silFunc); } if (silFunc->getLoweredFunctionType()->isCalleeAllocatedCoroutine()) { return IGM.getAddrOfCoroFunctionPointer(silFunc); } return IGM.getAddrOfSILFunction(entry.getMethodWitness().Witness, NotForDefinition); } return nullptr; } llvm::Constant *findDefaultTypeWitness(AssociatedTypeDecl *assocType) { if (!DefaultWitnesses) return nullptr; for (auto &entry : DefaultWitnesses->getEntries()) { if (!entry.isValid() || entry.getKind() != SILWitnessTable::AssociatedType || entry.getAssociatedTypeWitness().Requirement != assocType) continue; auto witness = entry.getAssociatedTypeWitness().Witness->mapTypeOutOfContext(); return IGM.getAssociatedTypeWitness(witness, Proto->getGenericSignature(), /*inProtocolContext=*/true); } return nullptr; } llvm::Constant *findDefaultAssociatedConformanceWitness( CanType association, ProtocolDecl *requirement) { if (!DefaultWitnesses) return nullptr; for (auto &entry : DefaultWitnesses->getEntries()) { if (!entry.isValid() || entry.getKind() != SILWitnessTable::AssociatedConformance) continue; auto assocConf = entry.getAssociatedConformanceWitness(); if (assocConf.Requirement != association || assocConf.Witness.getProtocol() != requirement) continue; AssociatedConformance conformance(Proto, association, requirement); defineDefaultAssociatedConformanceAccessFunction( conformance, assocConf.Witness); return IGM.getMangledAssociatedConformance(nullptr, conformance); } return nullptr; } void defineDefaultAssociatedConformanceAccessFunction( AssociatedConformance requirement, ProtocolConformanceRef conformance) { auto accessor = IGM.getAddrOfDefaultAssociatedConformanceAccessor(requirement); IRGenFunction IGF(IGM, accessor); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(IGF, accessor); Explosion parameters = IGF.collectParameters(); llvm::Value *associatedTypeMetadata = parameters.claimNext(); llvm::Value *self = parameters.claimNext(); llvm::Value *wtable = parameters.claimNext(); bool hasArchetype = !conformance.isConcrete() || conformance.getConcrete()->getType()->hasArchetype(); if (hasArchetype) { // Bind local Self type data from the metadata argument. auto selfInContext = Proto->getSelfTypeInContext()->getCanonicalType(); IGF.bindLocalTypeDataFromTypeMetadata(selfInContext, IsExact, self, MetadataState::Abstract); IGF.setUnscopedLocalTypeData( selfInContext, LocalTypeDataKind::forAbstractProtocolWitnessTable(Proto), wtable); // Bind the associated type metadata. IGF.bindLocalTypeDataFromTypeMetadata(requirement.getAssociation(), IsExact, associatedTypeMetadata, MetadataState::Abstract); } // For a concrete witness table, call it. ProtocolDecl *associatedProtocol = requirement.getAssociatedRequirement(); if (conformance.isConcrete()) { auto conformanceI = &IGM.getConformanceInfo(associatedProtocol, conformance.getConcrete()); auto returnValue = conformanceI->getTable(IGF, &associatedTypeMetadata); IGF.Builder.CreateRet(returnValue); return; } // For an abstract table, emit a reference to the witness table. CanType associatedTypeInContext = Proto->mapTypeIntoContext(requirement.getAssociation()) ->getCanonicalType(); auto returnValue = emitArchetypeWitnessTableRef( IGF, cast(associatedTypeInContext), associatedProtocol); IGF.Builder.CreateRet(returnValue); return; } void addAssociatedTypeNames() { llvm::SmallString<256> AssociatedTypeNames; auto &pi = IGM.getProtocolInfo(Proto, ProtocolInfoKind::RequirementSignature); for (auto &entry : pi.getWitnessEntries()) { // Add the associated type name to the list. if (entry.isAssociatedType()) { if (!AssociatedTypeNames.empty()) AssociatedTypeNames += ' '; Identifier name = entry.getAssociatedType()->getName(); if (name.mustAlwaysBeEscaped()) { Mangle::Mangler::appendRawIdentifierForRuntime(name.str(), AssociatedTypeNames); } else { AssociatedTypeNames += name.str(); } } } llvm::Constant *global = nullptr; if (!AssociatedTypeNames.empty()) { global = IGM.getAddrOfGlobalString(AssociatedTypeNames, CStringSectionType::Default, /*willBeRelativelyAddressed=*/true); } B.addRelativeAddressOrNull(global); } }; template class TypeContextDescriptorBuilderBase : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; protected: DeclType *Type; RequireMetadata_t HasMetadata; TypeContextDescriptorFlags::MetadataInitializationKind MetadataInitialization; StringRef UserFacingName; bool IsCxxSpecializedTemplate; std::optional> ImportInfo; using super::IGM; using super::B; using super::asImpl; public: using super::addGenericSignature; TypeContextDescriptorBuilderBase(IRGenModule &IGM, DeclType *Type, RequireMetadata_t requireMetadata) : super(IGM), Type(Type), HasMetadata(requireMetadata), MetadataInitialization(computeMetadataInitialization()) { } void layout() { asImpl().computeIdentity(); super::layout(); asImpl().addName(); asImpl().addAccessFunction(); asImpl().addReflectionFieldDescriptor(); asImpl().addLayoutInfo(); asImpl().addGenericSignature(); asImpl().maybeAddResilientSuperclass(); asImpl().maybeAddMetadataInitialization(); } /// Retrieve the set of protocols that are suppressed by this type. InvertibleProtocolSet getInvertedProtocols() { InvertibleProtocolSet result; auto nominal = dyn_cast(Type); if (!nominal) return result; auto checkProtocol = [&](InvertibleProtocolKind kind) { switch (nominal->canConformTo(kind)) { case TypeDecl::CanBeInvertible::Never: case TypeDecl::CanBeInvertible::Conditionally: result.insert(kind); break; case TypeDecl::CanBeInvertible::Always: break; } }; for (auto kind : InvertibleProtocolSet::allKnown()) checkProtocol(kind); return result; } /// Retrieve the set of invertible protocols to which this type /// conditionally conforms. InvertibleProtocolSet getConditionalInvertedProtocols() { InvertibleProtocolSet result; auto nominal = dyn_cast(Type); if (!nominal) return result; auto checkProtocol = [&](InvertibleProtocolKind kind) { switch (nominal->canConformTo(kind)) { case TypeDecl::CanBeInvertible::Never: case TypeDecl::CanBeInvertible::Always: break; case TypeDecl::CanBeInvertible::Conditionally: result.insert(kind); break; } }; for (auto kind : InvertibleProtocolSet::allKnown()) checkProtocol(kind); return result; } void addConditionalInvertedProtocols() { auto protocols = asImpl().getConditionalInvertedProtocols(); if (protocols.empty()) return; // Note the conditional suppressed protocols. this->SignatureHeader->ConditionalInvertedProtocols = protocols; // The suppressed protocols with conditional conformances. B.addInt(IGM.Int16Ty, protocols.rawBits()); // Create placeholders for the counts of the conditional requirements // for each conditional conformance to a supressible protocol. unsigned numProtocols = 0; using PlaceholderPosition = ConstantAggregateBuilderBase::PlaceholderPosition; SmallVector countPlaceholders; for (auto kind : protocols) { (void)kind; numProtocols++; countPlaceholders.push_back( B.addPlaceholderWithSize(IGM.Int16Ty)); } // The conditional invertible protocol set is alone as a 16 bit slot, so // an even amount of conditional invertible protocols will cause an uneven // alignment. if ((numProtocols & 1) == 0) { B.addInt16(0); } // Emit the generic requirements for the conditional conformance // to each invertible protocol. auto nominal = cast(Type); auto genericSig = nominal->getGenericSignatureOfContext(); ASTContext &ctx = nominal->getASTContext(); unsigned index = 0; unsigned totalNumRequirements = 0; for (auto kind : protocols) { auto proto = ctx.getProtocol(getKnownProtocolKind(kind)); SmallVector conformances; (void)nominal->lookupConformance(proto, conformances); auto conformance = conformances.front(); SmallVector inverses; if (auto conformanceSig = conformance->getDeclContext()->getGenericSignatureOfContext()) { SmallVector scratchReqs; conformanceSig->getRequirementsWithInverses(scratchReqs, inverses); } auto metadata = irgen::addGenericRequirements( IGM, B, genericSig, conformance->getConditionalRequirements(), inverses); totalNumRequirements += metadata.NumRequirements; B.fillPlaceholderWithInt(countPlaceholders[index++], IGM.Int16Ty, totalNumRequirements); } } /// Fill out all the aspects of the type identity. void computeIdentity() { // Remember the user-facing name. UserFacingName = Type->getName().str(); IsCxxSpecializedTemplate = false; // For related entities, set the original type name as the ABI name // and remember the related entity tag. std::string abiName; if (auto *synthesizedTypeAttr = Type->getAttrs() .template getAttribute()) { abiName = synthesizedTypeAttr->originalTypeName; getMutableImportInfo().RelatedEntityName = std::string(synthesizedTypeAttr->getManglingName()); // Otherwise, if this was imported from a Clang declaration, use that // declaration's name as the ABI name. } else if (auto clangDecl = Mangle::ASTMangler::getClangDeclForMangling(Type)) { // Class template specializations need to use their mangled name so // that each specialization gets its own metadata. A class template // specialization's Swift name will always be the mangled name, so just // use that. if (auto spec = dyn_cast(clangDecl)) { abiName = Type->getName().str(); IsCxxSpecializedTemplate = true; } else abiName = clangDecl->getQualifiedNameAsString(); // Typedefs and compatibility aliases that have been promoted to // their own nominal types need to be marked specially. if (isa(clangDecl) || isa(clangDecl)) { getMutableImportInfo().SymbolNamespace = TypeImportSymbolNamespace::CTypedef; } } // If the ABI name differs from the user-facing name, add it as // an override. if (!abiName.empty() && abiName != UserFacingName) { getMutableImportInfo().ABIName = abiName; } } /// Get the mutable import info. Note that calling this method itself /// changes the code to cause it to be used, so don't set it unless /// you're about to write something into it. TypeImportInfo &getMutableImportInfo() { if (!ImportInfo) ImportInfo.emplace(); return *ImportInfo; } void addName() { SmallString<32> name; if (!IsCxxSpecializedTemplate && Lexer::identifierMustAlwaysBeEscaped(UserFacingName)) { Mangle::Mangler::appendRawIdentifierForRuntime(UserFacingName, name); } else { name += UserFacingName; } // Collect the import info if present. if (ImportInfo) { name += '\0'; ImportInfo->appendTo(name); // getAddrOfGlobalString will add its own null terminator, so pop // off the second one. assert(name.back() == '\0'); name.pop_back(); assert(name.back() == '\0'); } auto nameStr = IGM.getAddrOfGlobalString(name, CStringSectionType::Default, /*willBeRelativelyAddressed*/ true); B.addRelativeAddress(nameStr); } void addAccessFunction() { llvm::Function *accessor; // Don't include an access function if we're emitting the context // descriptor without metadata. if (!HasMetadata) { accessor = nullptr; // If it's a generic type, use the generic access function. // This has a different prototype from an ordinary function, but // the runtime knows to check for that. } else if (Type->isGenericContext()) { accessor = getGenericTypeMetadataAccessFunction(IGM, Type, NotForDefinition); // Otherwise, use the ordinary access function, which we'll define // when we emit the metadata. } else { CanType type = Type->getDeclaredType()->getCanonicalType(); accessor = getOtherwiseDefinedTypeMetadataAccessFunction(IGM, type); } B.addCompactFunctionReferenceOrNull(accessor); } ConstantReference getParent() { return IGM.getAddrOfParentContextDescriptor( Type, /*fromAnonymousContext=*/false); } GenericSignature getGenericSignature() { return Type->getGenericSignature(); } bool hasInvertibleProtocols() { auto genericSig = asImpl().getGenericSignature(); if (!genericSig) return false; SmallVector requirements; SmallVector inverses; genericSig->getRequirementsWithInverses(requirements, inverses); return !inverses.empty(); } /// Fill in the fields of a TypeGenericContextDescriptorHeader. void addGenericParametersHeader() { asImpl().addMetadataInstantiationCache(); asImpl().addMetadataInstantiationPattern(); super::addGenericParametersHeader(); } void addMetadataInstantiationPattern() { if (!HasMetadata) { B.addInt32(0); return; } auto pattern = IGM.getAddrOfTypeMetadataPattern(Type); B.addRelativeAddress(pattern); } void addMetadataInstantiationCache() { if (!HasMetadata || IGM.getOptions().NoPreallocatedInstantiationCaches) { B.addInt32(0); return; } auto cache = IGM.getAddrOfTypeMetadataInstantiationCache(Type, NotForDefinition); B.addRelativeAddress(cache); } bool isUniqueDescriptor() { return !isa(Type->getModuleScopeContext()); } llvm::Constant *emit() { asImpl().layout(); auto addr = IGM.getAddrOfTypeContextDescriptor(Type, HasMetadata, B.finishAndCreateFuture()); auto var = cast(addr); if (IGM.getOptions().VirtualFunctionElimination) { asImpl().addVTableTypeMetadata(var); } var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); return var; } void setCommonFlags(TypeContextDescriptorFlags &flags) { setClangImportedFlags(flags); setMetadataInitializationKind(flags); setHasCanonicalMetadataPrespecializationsOrSingletonMetadataPointer( flags); } void setClangImportedFlags(TypeContextDescriptorFlags &flags) { if (ImportInfo) { flags.setHasImportInfo(true); } } TypeContextDescriptorFlags::MetadataInitializationKind computeMetadataInitialization() { // Not if we don't have metadata. if (!HasMetadata) return TypeContextDescriptorFlags::NoMetadataInitialization; // Generic types use their own system. if (Type->isGenericContext()) return TypeContextDescriptorFlags::NoMetadataInitialization; // Check for foreign metadata. if (requiresForeignTypeMetadata(Type)) return TypeContextDescriptorFlags::ForeignMetadataInitialization; // The only other option is singleton initialization. if (needsSingletonMetadataInitialization(IGM, Type)) return TypeContextDescriptorFlags::SingletonMetadataInitialization; return TypeContextDescriptorFlags::NoMetadataInitialization; } void setMetadataInitializationKind(TypeContextDescriptorFlags &flags) { flags.setMetadataInitialization(MetadataInitialization); } void setHasCanonicalMetadataPrespecializationsOrSingletonMetadataPointer( TypeContextDescriptorFlags &flags) { flags.setHasCanonicalMetadataPrespecializationsOrSingletonMetadataPointer( hasCanonicalMetadataPrespecializations() || hasSingletonMetadataPointer()); } bool hasCanonicalMetadataPrespecializations() { return IGM.shouldPrespecializeGenericMetadata() && llvm::any_of(IGM.IRGen.metadataPrespecializationsForType(Type), [](auto pair) { return pair.second == TypeMetadataCanonicality::Canonical; }); } bool hasSingletonMetadataPointer() { if (!IGM.IRGen.Opts.EmitSingletonMetadataPointers) return false; bool isGeneric = Type->isGenericContext(); bool noInitialization = MetadataInitialization == TypeContextDescriptorFlags::NoMetadataInitialization; auto isPublic = Type->getFormalAccessScope().isPublic(); auto kind = asImpl().getContextKind(); auto isSupportedKind = kind == ContextDescriptorKind::Class || kind == ContextDescriptorKind::Struct || kind == ContextDescriptorKind::Enum; // Only emit a singleton metadata pointer if: // The type is not generic (there's no single metadata if it's generic). // The metadata doesn't require runtime initialization. (The metadata // can't safely be accessed directly if it does.) // The type is not public. (If it's public it can be found by symbol.) // It's a class, struct, or enum. return !isGeneric && noInitialization && !isPublic && isSupportedKind; } void maybeAddMetadataInitialization() { switch (MetadataInitialization) { case TypeContextDescriptorFlags::NoMetadataInitialization: return; case TypeContextDescriptorFlags::ForeignMetadataInitialization: addForeignMetadataInitialization(); return; case TypeContextDescriptorFlags::SingletonMetadataInitialization: addSingletonMetadataInitialization(); return; } llvm_unreachable("bad kind"); } /// Add a ForeignMetadataInitialization structure to the descriptor. void addForeignMetadataInitialization() { llvm::Function *completionFunction = nullptr; if (asImpl().needsForeignMetadataCompletionFunction()) { completionFunction = IGM.getAddrOfTypeMetadataCompletionFunction(Type, NotForDefinition); } B.addCompactFunctionReferenceOrNull(completionFunction); } bool needsForeignMetadataCompletionFunction() { return ::needsForeignMetadataCompletionFunction(IGM, Type); } /// Add an SingletonMetadataInitialization structure to the descriptor. void addSingletonMetadataInitialization() { // Relative pointer to the initialization cache. // Note that we trigger the definition of it when emitting the // completion function. auto cache = IGM.getAddrOfTypeMetadataSingletonInitializationCache(Type, NotForDefinition); B.addRelativeAddress(cache); asImpl().addIncompleteMetadataOrRelocationFunction(); // Completion function. auto completionFunction = IGM.getAddrOfTypeMetadataCompletionFunction(Type, NotForDefinition); B.addCompactFunctionReference(completionFunction); } void addIncompleteMetadata() { // Relative pointer to the metadata. auto type = Type->getDeclaredTypeInContext()->getCanonicalType(); auto metadata = IGM.getAddrOfTypeMetadata(type); B.addRelativeAddress(metadata); } /// Customization point for ClassContextDescriptorBuilder. void addIncompleteMetadataOrRelocationFunction() { addIncompleteMetadata(); } void maybeAddCanonicalMetadataPrespecializations() { if (Type->isGenericContext() && hasCanonicalMetadataPrespecializations()) { asImpl().addCanonicalMetadataPrespecializations(); asImpl().addCanonicalMetadataPrespecializationCachingOnceToken(); } } void addCanonicalMetadataPrespecializations() { auto specializations = IGM.IRGen.metadataPrespecializationsForType(Type); auto count = llvm::count_if(specializations, [](auto pair) { return pair.second == TypeMetadataCanonicality::Canonical; }); B.addInt32(count); for (auto pair : specializations) { if (pair.second != TypeMetadataCanonicality::Canonical) { continue; } auto specialization = pair.first; auto *metadata = IGM.getAddrOfTypeMetadata(specialization); B.addRelativeAddress(metadata); } } void addCanonicalMetadataPrespecializationCachingOnceToken() { auto *cachingOnceToken = IGM.getAddrOfCanonicalPrespecializedGenericTypeCachingOnceToken(Type); B.addRelativeAddress(cachingOnceToken); } void maybeAddSingletonMetadataPointer() { if (hasSingletonMetadataPointer()) { auto type = Type->getDeclaredTypeInContext()->getCanonicalType(); auto metadata = IGM.getAddrOfTypeMetadata(type); B.addRelativeAddress(metadata); } } // Subclasses should provide: // ContextDescriptorKind getContextKind(); // void addLayoutInfo(); // void addReflectionFieldDescriptor(); }; class StructContextDescriptorBuilder : public TypeContextDescriptorBuilderBase { using super = TypeContextDescriptorBuilderBase; StructDecl *getType() { return cast(Type); } Size FieldVectorOffset; bool hasLayoutString; public: StructContextDescriptorBuilder(IRGenModule &IGM, StructDecl *Type, RequireMetadata_t requireMetadata, bool hasLayoutString) : super(IGM, Type, requireMetadata) , hasLayoutString(hasLayoutString) { auto &layout = IGM.getMetadataLayout(getType()); FieldVectorOffset = layout.getFieldOffsetVectorOffset().getStatic(); } void layout() { super::layout(); maybeAddCanonicalMetadataPrespecializations(); addInvertedProtocols(); maybeAddSingletonMetadataPointer(); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Struct; } void addLayoutInfo() { // uint32_t NumFields; B.addInt32(countExportableFields(IGM, getType())); // uint32_t FieldOffsetVectorOffset; B.addInt32(FieldVectorOffset / IGM.getPointerSize()); } uint16_t getKindSpecificFlags() { TypeContextDescriptorFlags flags; setCommonFlags(flags); flags.setHasLayoutString(hasLayoutString); return flags.getOpaqueValue(); } void maybeAddResilientSuperclass() { } void addReflectionFieldDescriptor() { if (IGM.IRGen.Opts.ReflectionMetadata != ReflectionMetadataMode::Runtime) { B.addInt32(0); return; } IGM.IRGen.noteUseOfFieldDescriptor(getType()); B.addRelativeAddress(IGM.getAddrOfReflectionFieldDescriptor( getType()->getDeclaredType()->getCanonicalType())); } void addVTableTypeMetadata(llvm::GlobalVariable *var) { // Structs don't have vtables. } }; class EnumContextDescriptorBuilder : public TypeContextDescriptorBuilderBase { using super = TypeContextDescriptorBuilderBase; EnumDecl *getType() { return cast(Type); } Size PayloadSizeOffset; const EnumImplStrategy &Strategy; bool hasLayoutString; public: EnumContextDescriptorBuilder(IRGenModule &IGM, EnumDecl *Type, RequireMetadata_t requireMetadata, bool hasLayoutString) : super(IGM, Type, requireMetadata), Strategy(getEnumImplStrategy( IGM, getType()->getDeclaredTypeInContext()->getCanonicalType())), hasLayoutString(hasLayoutString) { auto &layout = IGM.getMetadataLayout(getType()); if (layout.hasPayloadSizeOffset()) PayloadSizeOffset = layout.getPayloadSizeOffset().getStatic(); } void layout() { super::layout(); maybeAddCanonicalMetadataPrespecializations(); addInvertedProtocols(); maybeAddSingletonMetadataPointer(); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Enum; } void addLayoutInfo() { // # payload cases in the low 24 bits, payload size offset in the high 8. unsigned numPayloads = Strategy.getElementsWithPayload().size(); assert(numPayloads < (1<<24) && "too many payload elements for runtime"); assert(PayloadSizeOffset % IGM.getPointerAlignment() == Size(0) && "payload size not word-aligned"); unsigned PayloadSizeOffsetInWords = PayloadSizeOffset / IGM.getPointerSize(); assert(PayloadSizeOffsetInWords < 0x100 && "payload size offset too far from address point for runtime"); // uint32_t NumPayloadCasesAndPayloadSizeOffset; B.addInt32(numPayloads | (PayloadSizeOffsetInWords << 24)); // uint32_t NumEmptyCases; B.addInt32(Strategy.getElementsWithNoPayload().size()); } uint16_t getKindSpecificFlags() { TypeContextDescriptorFlags flags; setCommonFlags(flags); flags.setHasLayoutString(hasLayoutString); return flags.getOpaqueValue(); } void maybeAddResilientSuperclass() { } void addReflectionFieldDescriptor() { if (IGM.IRGen.Opts.ReflectionMetadata != ReflectionMetadataMode::Runtime) { B.addInt32(0); return; } // Force the emission of the field descriptor or fixed descriptor. IGM.IRGen.noteUseOfFieldDescriptor(getType()); // Some enum layout strategies (viz. C compatible layout) aren't // supported by reflection. if (!Strategy.isReflectable()) { B.addInt32(0); return; } B.addRelativeAddress(IGM.getAddrOfReflectionFieldDescriptor( getType()->getDeclaredType()->getCanonicalType())); } void addVTableTypeMetadata(llvm::GlobalVariable *var) { // Enums don't have vtables. } }; class ClassContextDescriptorBuilder : public TypeContextDescriptorBuilderBase, public SILVTableVisitor { using super = TypeContextDescriptorBuilderBase; ClassDecl *getType() { return cast(Type); } // Non-null unless the type is foreign. ClassMetadataLayout *MetadataLayout = nullptr; std::optional ResilientSuperClassRef; SILVTable *VTable; bool Resilient; bool HasNonoverriddenMethods = false; SmallVector VTableEntries; SmallVector, 8> OverrideTableEntries; // As we're constructing the vtable, VTableEntriesForVFE stores the offset // (from the beginning of the global) for each vtable slot. The offsets are // later turned into !type metadata attributes. SmallVector, 8> VTableEntriesForVFE; public: ClassContextDescriptorBuilder(IRGenModule &IGM, ClassDecl *Type, RequireMetadata_t requireMetadata) : super(IGM, Type, requireMetadata), VTable(IGM.getSILModule().lookUpVTable(getType())), Resilient(IGM.hasResilientMetadata(Type, ResilienceExpansion::Minimal)) { if (getType()->isForeign()) return; MetadataLayout = &IGM.getClassMetadataLayout(Type); if (auto superclassDecl = getType()->getSuperclassDecl()) { if (MetadataLayout && MetadataLayout->hasResilientSuperclass()) { assert(!getType()->isRootDefaultActor() && "root default actor has a resilient superclass?"); ResilientSuperClassRef = IGM.getTypeEntityReference(superclassDecl); } } addVTableEntries(getType()); } void addMethod(SILDeclRef fn) { if (!VTable || methodRequiresReifiedVTableEntry(IGM, VTable, fn)) { VTableEntries.push_back(fn); } else { // Emit a stub method descriptor and lookup function for nonoverridden // methods so that resilient code sequences can still use them. emitNonoverriddenMethod(fn); } } void addMethodOverride(SILDeclRef baseRef, SILDeclRef declRef) { OverrideTableEntries.emplace_back(baseRef, declRef); } void layout() { super::layout(); addVTable(); addOverrideTable(); addObjCResilientClassStubInfo(); maybeAddCanonicalMetadataPrespecializations(); addInvertedProtocols(); maybeAddSingletonMetadataPointer(); maybeAddDefaultOverrideTable(); } void addIncompleteMetadataOrRelocationFunction() { if (MetadataLayout == nullptr || !MetadataLayout->hasResilientSuperclass()) { addIncompleteMetadata(); return; } auto *pattern = IGM.getAddrOfTypeMetadataPattern(Type); B.addRelativeAddress(pattern); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::Class; } uint16_t getKindSpecificFlags() { TypeContextDescriptorFlags flags; setCommonFlags(flags); if (!getType()->isForeign()) { if (MetadataLayout->areImmediateMembersNegative()) flags.class_setAreImmediateMembersNegative(true); if (!VTableEntries.empty()) flags.class_setHasVTable(true); if (!OverrideTableEntries.empty()) flags.class_setHasOverrideTable(true); if (MetadataLayout->hasResilientSuperclass()) flags.class_setHasResilientSuperclass(true); if (getType()->isActor()) flags.class_setIsActor(true); if (getType()->isDefaultActor(IGM.getSwiftModule(), ResilienceExpansion::Maximal)) flags.class_setIsDefaultActor(true); if (getDefaultOverrideTable()) flags.class_setHasDefaultOverrideTable(true); } if (ResilientSuperClassRef) { flags.class_setResilientSuperclassReferenceKind( ResilientSuperClassRef->getKind()); } return flags.getOpaqueValue(); } void maybeAddResilientSuperclass() { // RelativeDirectPointer SuperClass; if (ResilientSuperClassRef) { B.addRelativeAddress(ResilientSuperClassRef->getValue()); } } void addReflectionFieldDescriptor() { // Classes are always reflectable, unless reflection is disabled or this // is a foreign class. if ((IGM.IRGen.Opts.ReflectionMetadata != ReflectionMetadataMode::Runtime) || getType()->isForeign()) { B.addInt32(0); return; } B.addRelativeAddress(IGM.getAddrOfReflectionFieldDescriptor( getType()->getDeclaredType()->getCanonicalType())); } Size getFieldVectorOffset() { if (!MetadataLayout) return Size(0); return (MetadataLayout->hasResilientSuperclass() ? MetadataLayout->getRelativeFieldOffsetVectorOffset() : MetadataLayout->getStaticFieldOffsetVectorOffset()); } void addVTable() { LLVM_DEBUG( llvm::dbgs() << "VTable entries for " << getType()->getName() << ":\n"; for (auto entry : VTableEntries) { llvm::dbgs() << " "; entry.print(llvm::dbgs()); llvm::dbgs() << '\n'; } ); // Only emit a method lookup function if the class is resilient // and has a non-empty vtable, as well as no elided methods. if (IGM.hasResilientMetadata(getType(), ResilienceExpansion::Minimal) && (HasNonoverriddenMethods || !VTableEntries.empty())) IGM.emitMethodLookupFunction(getType()); if (VTableEntries.empty()) return; auto offset = MetadataLayout->hasResilientSuperclass() ? MetadataLayout->getRelativeVTableOffset() : MetadataLayout->getStaticVTableOffset(); B.addInt32(offset / IGM.getPointerSize()); B.addInt32(VTableEntries.size()); for (auto fn : VTableEntries) emitMethodDescriptor(fn); } void emitMethodDescriptor(SILDeclRef fn) { // Define the method descriptor to point to the current position in the // nominal type descriptor, if it has a well-defined symbol name. IGM.defineMethodDescriptor( fn, Type, B.getAddrOfCurrentPosition(IGM.MethodDescriptorStructTy), IGM.MethodDescriptorStructTy); if (IGM.getOptions().VirtualFunctionElimination) { auto offset = B.getNextOffsetFromGlobal() + // 1st field of MethodDescriptorStructTy Size(IGM.DataLayout.getTypeAllocSize(IGM.Int32Ty)); VTableEntriesForVFE.push_back(std::pair(offset, fn)); } // Actually build the descriptor. auto descriptor = B.beginStruct(IGM.MethodDescriptorStructTy); buildMethodDescriptorFields(IGM, VTable, fn, descriptor, getType()); descriptor.finishAndAddTo(B); // Emit method dispatch thunk if the class is resilient. auto *func = cast(fn.getDecl()); if ((Resilient && func->getEffectiveAccess() >= AccessLevel::Package) || IGM.getOptions().VirtualFunctionElimination) { IGM.emitDispatchThunk(fn); } } void addVTableTypeMetadata(llvm::GlobalVariable *var) { if (!IGM.getOptions().VirtualFunctionElimination) return; assert(VTable && "no vtable?!"); IGM.addVTableTypeMetadata(getType(), var, VTableEntriesForVFE); } void emitNonoverriddenMethod(SILDeclRef fn) { // TODO: Derivative functions do not distinguish themselves in the mangled // names of method descriptor symbols yet, causing symbol name collisions. if (fn.getDerivativeFunctionIdentifier()) return; HasNonoverriddenMethods = true; // Although this method is non-overridden and therefore left out of the // vtable, we still need to maintain the ABI of a potentially-overridden // method for external clients. // Emit method dispatch thunk. if (hasPublicVisibility(fn.getLinkage(NotForDefinition)) || IGM.getOptions().VirtualFunctionElimination) { IGM.emitDispatchThunk(fn); } if (IGM.getOptions().VirtualFunctionElimination) { auto offset = B.getNextOffsetFromGlobal() + // 1st field of MethodDescriptorStructTy Size(IGM.DataLayout.getTypeAllocSize(IGM.Int32Ty)); VTableEntriesForVFE.push_back(std::pair(offset, fn)); } // Emit a freestanding method descriptor structure. This doesn't have to // exist in the table in the class's context descriptor since it isn't // in the vtable, but external clients need to be able to link against the // symbol. IGM.emitNonoverriddenMethodDescriptor(VTable, fn, getType()); } void addOverrideTable() { LLVM_DEBUG( llvm::dbgs() << "Override Table entries for " << getType()->getName() << ":\n"; for (auto entry : OverrideTableEntries) { llvm::dbgs() << " "; entry.first.print(llvm::dbgs()); llvm::dbgs() << " -> "; entry.second.print(llvm::dbgs()); llvm::dbgs() << '\n'; } ); if (OverrideTableEntries.empty()) return; B.addInt32(OverrideTableEntries.size()); for (auto pair : OverrideTableEntries) emitMethodOverrideDescriptor(pair.first, pair.second); } void emitMethodOverrideDescriptor(SILDeclRef baseRef, SILDeclRef declRef) { if (IGM.getOptions().VirtualFunctionElimination) { auto offset = B.getNextOffsetFromGlobal() + // 1st field of MethodOverrideDescriptorStructTy Size(IGM.DataLayout.getTypeAllocSize(IGM.RelativeAddressTy)) + // 2nd field of MethodOverrideDescriptorStructTy Size(IGM.DataLayout.getTypeAllocSize(IGM.RelativeAddressTy)); VTableEntriesForVFE.push_back( std::pair(offset, baseRef)); } auto descriptor = B.beginStruct(IGM.MethodOverrideDescriptorStructTy); // The class containing the base method. auto *baseClass = cast(baseRef.getDecl()->getDeclContext()); IGM.IRGen.noteUseOfTypeContextDescriptor(baseClass, DontRequireMetadata); auto baseClassEntity = LinkEntity::forNominalTypeDescriptor(baseClass); auto baseClassDescriptor = IGM.getAddrOfLLVMVariableOrGOTEquivalent(baseClassEntity); descriptor.addRelativeAddress(baseClassDescriptor); // The base method. auto baseMethodEntity = LinkEntity::forMethodDescriptor(baseRef); auto baseMethodDescriptor = IGM.getAddrOfLLVMVariableOrGOTEquivalent(baseMethodEntity); descriptor.addRelativeAddress(baseMethodDescriptor); // The implementation of the override. if (auto entry = VTable->getEntry(IGM.getSILModule(), baseRef)) { assert(entry->getKind() == SILVTable::Entry::Kind::Override); auto *impl = entry->getImplementation(); if (impl->isAsync()) { llvm::Constant *implFn = IGM.getAddrOfAsyncFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else if (impl->getLoweredFunctionType() ->isCalleeAllocatedCoroutine()) { llvm::Constant *implFn = IGM.getAddrOfCoroFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else { llvm::Function *implFn = IGM.getAddrOfSILFunction(impl, NotForDefinition); descriptor.addCompactFunctionReference(implFn); } } else { // The method is removed by dead method elimination. // It should be never called. We add a pointer to an error function. descriptor.addRelativeAddressOrNull(nullptr); } descriptor.finishAndAddTo(B); } SILDefaultOverrideTable *getDefaultOverrideTable() { auto *table = IGM.getSILModule().lookUpDefaultOverrideTable(getType()); if (!table) return nullptr; if (table->getEntries().size() == 0) return nullptr; return table; } void maybeAddDefaultOverrideTable() { auto *table = getDefaultOverrideTable(); if (!table) return; LLVM_DEBUG(llvm::dbgs() << "Default Override Table entries for " << getType()->getName() << ":\n"; for (auto entry : table->getEntries()) { llvm::dbgs() << " "; llvm::dbgs() << "original(" << entry.original << ")"; llvm::dbgs() << " -> "; llvm::dbgs() << "replacement(" << entry.method << ")"; llvm::dbgs() << " -> "; llvm::dbgs() << "impl(" << entry.impl->getName() << ")"; llvm::dbgs() << '\n'; }); B.addInt32(table->getEntries().size()); for (auto entry : table->getEntries()) emitDefaultOverrideDescriptor(entry.method, entry.original, entry.impl); } void emitDefaultOverrideDescriptor(SILDeclRef replacement, SILDeclRef original, SILFunction *impl) { auto descriptor = B.beginStruct(IGM.MethodDefaultOverrideDescriptorStructTy); auto replacementEntity = LinkEntity::forMethodDescriptor(replacement); auto replacementDescriptor = IGM.getAddrOfLLVMVariableOrGOTEquivalent(replacementEntity); descriptor.addRelativeAddress(replacementDescriptor); auto originalEntity = LinkEntity::forMethodDescriptor(original); auto originalDescriptor = IGM.getAddrOfLLVMVariableOrGOTEquivalent(originalEntity); descriptor.addRelativeAddress(originalDescriptor); if (impl->isAsync()) { llvm::Constant *implFn = IGM.getAddrOfAsyncFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else if (impl->getLoweredFunctionType()->isCalleeAllocatedCoroutine()) { llvm::Constant *implFn = IGM.getAddrOfCoroFunctionPointer(impl); descriptor.addRelativeAddress(implFn); } else { llvm::Function *implFn = IGM.getAddrOfSILFunction(impl, NotForDefinition); descriptor.addCompactFunctionReference(implFn); } descriptor.finishAndAddTo(B); } void addPlaceholder(MissingMemberDecl *MMD) { llvm_unreachable("cannot generate metadata with placeholders in it"); } void addLayoutInfo() { // TargetRelativeDirectPointer SuperclassType; if (auto superclassType = getSuperclassForMetadata(IGM, getType())) { GenericSignature genericSig = getType()->getGenericSignature(); B.addRelativeAddress(IGM.getTypeRef(superclassType, genericSig, MangledTypeRefRole::Metadata) .first); } else { B.addInt32(0); } // union { // uint32_t MetadataNegativeSizeInWords; // RelativeDirectPointer // ResilientMetadataBounds; // }; if (!MetadataLayout) { // FIXME: do something meaningful for foreign classes? B.addInt32(0); } else if (!MetadataLayout->hasResilientSuperclass()) { B.addInt32(MetadataLayout->getSize().AddressPoint / IGM.getPointerSize()); } else { B.addRelativeAddress( IGM.getAddrOfClassMetadataBounds(getType(), NotForDefinition)); } // union { // uint32_t MetadataPositiveSizeInWords; // ExtraClassContextFlags ExtraClassFlags; // }; if (!MetadataLayout) { // FIXME: do something meaningful for foreign classes? B.addInt32(0); } else if (!MetadataLayout->hasResilientSuperclass()) { B.addInt32(MetadataLayout->getSize().getOffsetToEnd() / IGM.getPointerSize()); } else { ExtraClassDescriptorFlags flags; if (IGM.hasObjCResilientClassStub(getType())) flags.setObjCResilientClassStub(true); B.addInt32(flags.getOpaqueValue()); } // uint32_t NumImmediateMembers; auto numImmediateMembers = (MetadataLayout ? MetadataLayout->getNumImmediateMembers() : 0); B.addInt32(numImmediateMembers); // uint32_t NumFields; B.addInt32(countExportableFields(IGM, getType())); // uint32_t FieldOffsetVectorOffset; B.addInt32(getFieldVectorOffset() / IGM.getPointerSize()); } void addObjCResilientClassStubInfo() { if (IGM.getClassMetadataStrategy(getType()) != ClassMetadataStrategy::Resilient) return; if (!IGM.hasObjCResilientClassStub(getType())) return; B.addRelativeAddress( IGM.getAddrOfObjCResilientClassStub( getType(), NotForDefinition, TypeMetadataAddress::AddressPoint)); } void addCanonicalMetadataPrespecializations() { super::addCanonicalMetadataPrespecializations(); auto specializations = IGM.IRGen.metadataPrespecializationsForType(Type); for (auto pair : specializations) { if (pair.second != TypeMetadataCanonicality::Canonical) { continue; } auto specialization = pair.first; auto *function = IGM.getAddrOfCanonicalSpecializedGenericTypeMetadataAccessFunction(specialization, NotForDefinition); B.addCompactFunctionReference(function); } } }; class OpaqueTypeDescriptorBuilder : public ContextDescriptorBuilderBase { using super = ContextDescriptorBuilderBase; OpaqueTypeDecl *O; /// Whether the given requirement is a conformance requirement that /// requires a witness table in the opaque type descriptor. /// /// When it does, returns the protocol. ProtocolDecl *requiresWitnessTable(const Requirement &req) const { return opaqueTypeRequiresWitnessTable(O, req); } public: OpaqueTypeDescriptorBuilder(IRGenModule &IGM, OpaqueTypeDecl *O) : super(IGM), O(O) {} void layout() { super::layout(); addGenericSignature(); addUnderlyingTypeAndConformances(); } void addUnderlyingTypeAndConformances() { for (unsigned index : indices(O->getOpaqueGenericParams())) { B.addRelativeAddress(getUnderlyingTypeRef(index)); } auto sig = O->getOpaqueInterfaceGenericSignature(); for (const auto &req : sig.getRequirements()) { if (auto *proto = requiresWitnessTable(req)) B.addRelativeAddress(getWitnessTableRef(req, proto)); } } bool isUniqueDescriptor() { switch (LinkEntity::forOpaqueTypeDescriptor(O) .getLinkage(NotForDefinition)) { case SILLinkage::Public: case SILLinkage::PublicExternal: case SILLinkage::Package: case SILLinkage::PackageExternal: case SILLinkage::Hidden: case SILLinkage::HiddenExternal: case SILLinkage::Private: return true; case SILLinkage::Shared: case SILLinkage::PublicNonABI: case SILLinkage::PackageNonABI: return false; } llvm_unreachable("covered switch"); } GenericSignature getGenericSignature() { return O->getOpaqueInterfaceGenericSignature(); } ConstantReference getParent() { // VarDecls aren't normally contexts, but we still want to mangle // an anonymous context for one. if (IGM.IRGen.Opts.EnableAnonymousContextMangledNames) { if (auto namingVar = dyn_cast(O->getNamingDecl())) { return ConstantReference( IGM.getAddrOfAnonymousContextDescriptor(namingVar), ConstantReference::Direct); } } DeclContext *parent = O->getNamingDecl()->getInnermostDeclContext(); // If we have debug mangled names enabled for anonymous contexts, nest // the opaque type descriptor inside an anonymous context for the // defining function. This will let type reconstruction in the debugger // match the opaque context back into the AST. // // Otherwise, we can use the module context for nongeneric contexts. if (!IGM.IRGen.Opts.EnableAnonymousContextMangledNames && !parent->isGenericContext()) { parent = parent->getParentModule(); } return IGM.getAddrOfContextDescriptorForParent(parent, parent, /*fromAnonymous*/ false); } ContextDescriptorKind getContextKind() { return ContextDescriptorKind::OpaqueType; } void emit() { asImpl().layout(); auto addr = IGM.getAddrOfOpaqueTypeDescriptor(O, B.finishAndCreateFuture()); auto var = cast(addr); var->setConstant(true); IGM.setColocateTypeDescriptorSection(var); IGM.emitOpaqueTypeDescriptorAccessor(O); } uint16_t getKindSpecificFlags() { // Store the number of types and witness tables in the flags. unsigned numWitnessTables = llvm::count_if( O->getOpaqueInterfaceGenericSignature().getRequirements(), [&](const Requirement &req) { return requiresWitnessTable(req) != nullptr; }); return O->getOpaqueGenericParams().size() + numWitnessTables; } private: llvm::Constant *getUnderlyingTypeRef(unsigned opaqueParamIdx) const { // If this opaque declaration has a unique set of substitutions, // we can simply emit a direct type reference. if (auto unique = O->getUniqueUnderlyingTypeSubstitutions()) { auto sig = O->getOpaqueInterfaceGenericSignature(); auto contextSig = O->getGenericSignature().getCanonicalSignature(); auto *genericParam = O->getOpaqueGenericParams()[opaqueParamIdx]; auto underlyingType = Type(genericParam).subst(*unique)->getReducedType(sig); return IGM .getTypeRef(underlyingType, contextSig, MangledTypeRefRole::Metadata) .first; } // Otherwise, we have to go through a metadata accessor to // fetch underlying type at runtime. // There are one or more underlying types with limited // availability and one universally available one. This // requires us to build a metadata accessor. auto substitutionSet = O->getConditionallyAvailableSubstitutions(); assert(!substitutionSet.empty()); UnderlyingTypeAccessor accessor(IGM, O, opaqueParamIdx); return accessor.emit(substitutionSet); } llvm::Constant *getWitnessTableRef(const Requirement &req, ProtocolDecl *protocol) { auto contextSig = O->getGenericSignature().getCanonicalSignature(); auto underlyingDependentType = req.getFirstType()->getCanonicalType(); if (auto unique = O->getUniqueUnderlyingTypeSubstitutions()) { auto underlyingType = underlyingDependentType.subst(*unique)->getCanonicalType(); auto underlyingConformance = unique->lookupConformance(underlyingDependentType, protocol); return IGM.emitWitnessTableRefString(underlyingType, underlyingConformance, contextSig, /*setLowBit*/ false); } WitnessTableAccessor accessor(IGM, O, req, protocol); return accessor.emit(O->getConditionallyAvailableSubstitutions()); } class AbstractMetadataAccessor { protected: IRGenModule &IGM; /// The opaque type declaration for this accessor. OpaqueTypeDecl *O; public: AbstractMetadataAccessor(IRGenModule &IGM, OpaqueTypeDecl *O) : IGM(IGM), O(O) {} virtual ~AbstractMetadataAccessor() {} /// The unique symbol this accessor would be reachable by at runtime. virtual std::string getSymbol() const = 0; /// The result type for this accessor. This type would have /// to match a type produced by \c getResultValue. virtual llvm::Type *getResultType() const = 0; /// Produce a result value based on the given set of substitutions. virtual llvm::Value * getResultValue(IRGenFunction &IGF, GenericEnvironment *genericEnv, SubstitutionMap substitutions) const = 0; llvm::Constant * emit(ArrayRef substitutionSet) { auto getInt32Constant = [&](std::optional value) -> llvm::ConstantInt * { return llvm::ConstantInt::get(IGM.Int32Ty, value.value_or(0)); }; auto symbol = getSymbol(); auto *accessor = getAccessorFn(symbol); { IRGenFunction IGF(IGM, accessor); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(IGF, accessor); auto signature = O->getGenericSignature().getCanonicalSignature(); auto *genericEnv = signature.getGenericEnvironment(); // Prepare contextual replacements. { SmallVector requirements; enumerateGenericSignatureRequirements( signature, [&](GenericRequirement req) { requirements.push_back(req); }); auto bindingsBufPtr = IGF.collectParameters().claimNext(); bindFromGenericRequirementsBuffer( IGF, requirements, Address(bindingsBufPtr, IGM.Int8Ty, IGM.getPointerAlignment()), MetadataState::Complete, (genericEnv ? genericEnv->getForwardingSubstitutionMap() : SubstitutionMap())); } SmallVector conditionalTypes; // Pre-allocate a basic block per condition, so that it's // possible to jump between conditions. for (unsigned index : indices(substitutionSet)) { conditionalTypes.push_back( IGF.createBasicBlock((index < substitutionSet.size() - 1) ? "conditional-" + llvm::utostr(index) : "universal")); } // Jump straight to the first conditional type block. IGF.Builder.CreateBr(conditionalTypes.front()); // For each conditionally available substitution // (the last one is universal): // - check all of the conditions via `isOSVersionAtLeast` // - if all checks are true - emit a return of a result value. for (unsigned i = 0; i < substitutionSet.size() - 1; ++i) { auto *underlyingTy = substitutionSet[i]; IGF.Builder.emitBlock(conditionalTypes[i]); auto returnTypeBB = IGF.createBasicBlock("result-" + llvm::utostr(i)); // Emit a #available condition check, if it's `false` - // jump to the next conditionally available type. auto queries = underlyingTy->getAvailabilityQueries(); SmallVector conditionBlocks; for (unsigned queryIndex : indices(queries)) { // cond-- conditionBlocks.push_back(IGF.createBasicBlock( "cond-" + llvm::utostr(i) + "-" + llvm::utostr(queryIndex))); } // Jump to the first condition. IGF.Builder.CreateBr(conditionBlocks.front()); for (unsigned queryIndex : indices(queries)) { const auto &query = queries[queryIndex]; assert(query.getPrimaryArgument()); bool isUnavailability = query.isUnavailability(); auto version = query.getPrimaryArgument().value(); auto *major = getInt32Constant(version.getMajor()); auto *minor = getInt32Constant(version.getMinor()); auto *patch = getInt32Constant(version.getSubminor()); IGF.Builder.emitBlock(conditionBlocks[queryIndex]); auto isAtLeast = IGF.emitTargetOSVersionAtLeastCall(major, minor, patch); auto success = IGF.Builder.CreateICmpNE( isAtLeast, llvm::Constant::getNullValue(IGM.Int32Ty)); if (isUnavailability) { // Invert the result of "at least" check by xor'ing resulting // boolean with `-1`. success = IGF.Builder.CreateXor(success, IGF.Builder.getIntN(1, -1)); } auto nextCondOrRet = queryIndex == queries.size() - 1 ? returnTypeBB : conditionBlocks[queryIndex + 1]; IGF.Builder.CreateCondBr(success, nextCondOrRet, conditionalTypes[i + 1]); } { IGF.Builder.emitBlock(returnTypeBB); ConditionalDominanceScope domScope(IGF); IGF.Builder.CreateRet(getResultValue( IGF, genericEnv, underlyingTy->getSubstitutions())); } } IGF.Builder.emitBlock(conditionalTypes.back()); auto universal = substitutionSet.back(); assert(universal->getAvailabilityQueries().size() == 1 && universal->getAvailabilityQueries()[0].isConstant()); IGF.Builder.CreateRet( getResultValue(IGF, genericEnv, universal->getSubstitutions())); } return getAddrOfMetadataAccessor(symbol, accessor); } private: llvm::Function *getAccessorFn(std::string symbol) const { auto fnTy = llvm::FunctionType::get(getResultType(), {IGM.Int8PtrTy}, /*vararg*/ false); auto *accessor = llvm::Function::Create( fnTy, llvm::GlobalValue::PrivateLinkage, symbol, IGM.getModule()); accessor->setAttributes(IGM.constructInitialAttributes()); return accessor; } llvm::Constant * getAddrOfMetadataAccessor(std::string symbol, llvm::Function *accessor) const { return IGM.getAddrOfStringForMetadataRef( symbol, /*align*/ 2, /*low bit*/ false, [&](ConstantInitBuilder &B) { // Form the mangled name with its relative reference. auto S = B.beginStruct(); S.setPacked(true); S.add(llvm::ConstantInt::get(IGM.Int8Ty, 255)); S.add(llvm::ConstantInt::get(IGM.Int8Ty, 9)); S.addRelativeAddress(accessor); // And a null terminator! S.addInt(IGM.Int8Ty, 0); return S.finishAndCreateFuture(); }); } }; class UnderlyingTypeAccessor final : public AbstractMetadataAccessor { /// The index of the generic parameter accessor is going /// to retrieve the underlying type for. unsigned OpaqueParamIndex; public: UnderlyingTypeAccessor(IRGenModule &IGM, OpaqueTypeDecl *O, unsigned opaqueParamIndex) : AbstractMetadataAccessor(IGM, O), OpaqueParamIndex(opaqueParamIndex) {} std::string getSymbol() const override { IRGenMangler mangler(IGM.Context); return mangler.mangleSymbolNameForUnderlyingTypeAccessorString( O, OpaqueParamIndex); } llvm::Type *getResultType() const override { return IGM.TypeMetadataPtrTy; } llvm::Value * getResultValue(IRGenFunction &IGF, GenericEnvironment *genericEnv, SubstitutionMap substitutions) const override { auto type = Type(O->getOpaqueGenericParams()[OpaqueParamIndex]) .subst(substitutions) ->getReducedType(O->getOpaqueInterfaceGenericSignature()); type = genericEnv ? genericEnv->mapTypeIntoContext(type)->getCanonicalType() : type; return IGF.emitTypeMetadataRef(type); } }; class WitnessTableAccessor final : public AbstractMetadataAccessor { /// The requirement itself. const Requirement &R; /// Protocol requirement. ProtocolDecl *P; public: WitnessTableAccessor(IRGenModule &IGM, OpaqueTypeDecl *O, const Requirement &requirement, ProtocolDecl *P) : AbstractMetadataAccessor(IGM, O), R(requirement), P(P) {} std::string getSymbol() const override { IRGenMangler mangler(IGM.Context); return mangler.mangleSymbolNameForUnderlyingWitnessTableAccessorString( O, R, P); } llvm::Type *getResultType() const override { return IGM.WitnessTablePtrTy; } llvm::Value * getResultValue(IRGenFunction &IGF, GenericEnvironment *genericEnv, SubstitutionMap substitutions) const override { auto underlyingDependentType = R.getFirstType()->getCanonicalType(); auto underlyingType = underlyingDependentType.subst(substitutions); auto underlyingConformance = substitutions.lookupConformance(underlyingDependentType, P); if (underlyingType->hasTypeParameter()) { underlyingType = genericEnv->mapTypeIntoContext( underlyingType); underlyingConformance = underlyingConformance.subst( genericEnv->getForwardingSubstitutionMap()); } return emitWitnessTableRef(IGF, underlyingType->getCanonicalType(), underlyingConformance); } }; }; } // end anonymous namespace ProtocolDecl *irgen::opaqueTypeRequiresWitnessTable( OpaqueTypeDecl *opaque, const Requirement &req) { // We only care about conformance requirements. if (req.getKind() != RequirementKind::Conformance) return nullptr; // The protocol must require a witness table. auto proto = req.getProtocolDecl(); if (!Lowering::TypeConverter::protocolRequiresWitnessTable(proto)) return nullptr; // The type itself must be anchored on one of the generic parameters of // the opaque type (not an outer context). auto *genericParam = req.getFirstType()->getRootGenericParam(); unsigned opaqueDepth = opaque->getOpaqueGenericParams().front()->getDepth(); if (genericParam->getDepth() == opaqueDepth) { return proto; } return nullptr; } static void eraseExistingTypeContextDescriptor(IRGenModule &IGM, NominalTypeDecl *type) { // We may have emitted a partial type context descriptor with some empty // fields, and then later discovered we're emitting complete metadata. // Remove existing definitions of the type context so that we can regenerate // a complete descriptor. auto entity = IGM.getAddrOfTypeContextDescriptor(type, DontRequireMetadata); entity = entity->stripPointerCasts(); auto existingContext = dyn_cast(entity); if (existingContext && !existingContext->isDeclaration()) { existingContext->setInitializer(nullptr); } } void irgen::emitLazyTypeContextDescriptor(IRGenModule &IGM, NominalTypeDecl *type, RequireMetadata_t requireMetadata) { if (type->getASTContext().LangOpts.hasFeature(Feature::Embedded)) { return; } eraseExistingTypeContextDescriptor(IGM, type); bool hasLayoutString = false; auto lowered = getLoweredTypeInPrimaryContext(IGM, type); auto &ti = IGM.getTypeInfo(lowered); auto *typeLayoutEntry = ti.buildTypeLayoutEntry(IGM, lowered, /*useStructLayouts*/ true); if (layoutStringsEnabled(IGM)) { auto genericSig = lowered.getNominalOrBoundGenericNominal()->getGenericSignature(); hasLayoutString = !!typeLayoutEntry->layoutString(IGM, genericSig); if (!hasLayoutString && IGM.Context.LangOpts.hasFeature( Feature::LayoutStringValueWitnessesInstantiation) && IGM.getOptions().EnableLayoutStringValueWitnessesInstantiation) { hasLayoutString |= needsSingletonMetadataInitialization(IGM, type) || (type->isGenericContext() && !isa(ti)); } } if (auto sd = dyn_cast(type)) { StructContextDescriptorBuilder(IGM, sd, requireMetadata, hasLayoutString).emit(); } else if (auto ed = dyn_cast(type)) { EnumContextDescriptorBuilder(IGM, ed, requireMetadata, hasLayoutString) .emit(); } else if (auto cd = dyn_cast(type)) { ClassContextDescriptorBuilder(IGM, cd, requireMetadata).emit(); } else { llvm_unreachable("type does not have a context descriptor"); } } void irgen::emitLazyTypeMetadata(IRGenModule &IGM, NominalTypeDecl *type) { eraseExistingTypeContextDescriptor(IGM, type); if (requiresForeignTypeMetadata(type)) { emitForeignTypeMetadata(IGM, type); } else if (auto sd = dyn_cast(type)) { emitStructMetadata(IGM, sd); } else if (auto ed = dyn_cast(type)) { emitEnumMetadata(IGM, ed); } else if (auto pd = dyn_cast(type)) { IGM.emitProtocolDecl(pd); } else { llvm_unreachable("should not have enqueued a class decl here!"); } } void irgen::emitLazyMetadataAccessor(IRGenModule &IGM, NominalTypeDecl *nominal) { GenericArguments genericArgs; genericArgs.collectTypes(IGM, nominal); llvm::Function *accessor = IGM.getAddrOfGenericTypeMetadataAccessFunction( nominal, genericArgs.Types, ForDefinition); if (IGM.getOptions().optimizeForSize()) accessor->addFnAttr(llvm::Attribute::NoInline); bool isReadNone = !genericArgs.hasPacks && (genericArgs.Types.size() <= NumDirectGenericTypeMetadataAccessFunctionArgs); emitCacheAccessFunction( IGM, accessor, /*cache*/ nullptr, /*cache type*/ nullptr, CacheStrategy::None, [&](IRGenFunction &IGF, Explosion ¶ms) { return emitGenericTypeMetadataAccessFunction(IGF, params, nominal, genericArgs); }, isReadNone); } void irgen::emitLazyCanonicalSpecializedMetadataAccessor(IRGenModule &IGM, CanType theType) { llvm::Function *accessor = IGM.getAddrOfCanonicalSpecializedGenericTypeMetadataAccessFunction( theType, ForDefinition); if (IGM.getOptions().optimizeForSize()) { accessor->addFnAttr(llvm::Attribute::NoInline); } emitCacheAccessFunction( IGM, accessor, /*cache=*/nullptr, /*cache type*/ nullptr, CacheStrategy::None, [&](IRGenFunction &IGF, Explosion ¶ms) { return emitCanonicalSpecializedGenericTypeMetadataAccessFunction( IGF, params, theType); }, /*isReadNone=*/true); } void irgen::emitLazySpecializedGenericTypeMetadata(IRGenModule &IGM, CanType type) { switch (type->getKind()) { case TypeKind::Struct: case TypeKind::BoundGenericStruct: emitSpecializedGenericStructMetadata(IGM, type, *type.getStructOrBoundGenericStruct()); break; case TypeKind::Enum: case TypeKind::BoundGenericEnum: emitSpecializedGenericEnumMetadata(IGM, type, *type.getEnumOrBoundGenericEnum()); break; case TypeKind::Class: case TypeKind::BoundGenericClass: emitSpecializedGenericClassMetadata(IGM, type, *type.getClassOrBoundGenericClass()); break; default: llvm_unreachable( "Cannot statically specialize metadata for generic types of" "kind other than struct, enum, and class."); } } llvm::Constant * IRGenModule::getAddrOfSharedContextDescriptor(LinkEntity entity, ConstantInit definition, llvm::function_ref emit) { if (!definition) { // Generate the definition if it hasn't been generated yet. auto existing = GlobalVars.find(entity); if (existing == GlobalVars.end() || !existing->second || cast(existing->second)->isDeclaration()) { // In some cases we have multiple declarations in the AST that end up // with the same context mangling (a clang module and its overlay, // equivalent extensions, etc.). These can share a context descriptor // at runtime. auto mangledName = entity.mangleAsString(Context); if (auto otherDefinition = Module.getGlobalVariable(mangledName)) { if (!otherDefinition->isDeclaration() || !entity.isAlwaysSharedLinkage()) { GlobalVars.insert({entity, otherDefinition}); return otherDefinition; } } // Otherwise, emit the descriptor. emit(); } } return getAddrOfLLVMVariable(entity, definition, DebugTypeInfo()); } llvm::Constant * IRGenModule::getAddrOfModuleContextDescriptor(ModuleDecl *D, ConstantInit definition) { auto entity = LinkEntity::forModuleDescriptor(D); return getAddrOfSharedContextDescriptor(entity, definition, [&]{ ModuleContextDescriptorBuilder(*this, D).emit(); }); } llvm::Constant * IRGenModule::getAddrOfObjCModuleContextDescriptor() { if (!ObjCModule) ObjCModule = ModuleDecl::createEmpty( Context.getIdentifier(MANGLING_MODULE_OBJC), Context); return getAddrOfModuleContextDescriptor(ObjCModule); } llvm::Constant * IRGenModule::getAddrOfClangImporterModuleContextDescriptor() { if (!ClangImporterModule) ClangImporterModule = ModuleDecl::createEmpty( Context.getIdentifier(MANGLING_MODULE_CLANG_IMPORTER), Context); return getAddrOfModuleContextDescriptor(ClangImporterModule); } llvm::Constant * IRGenModule::getAddrOfExtensionContextDescriptor(ExtensionDecl *ED, ConstantInit definition) { auto entity = LinkEntity::forExtensionDescriptor(ED); return getAddrOfSharedContextDescriptor(entity, definition, [&]{ ExtensionContextDescriptorBuilder(*this, ED).emit(); }); } llvm::Constant * IRGenModule::getAddrOfAnonymousContextDescriptor( PointerUnion DC, ConstantInit definition) { auto entity = LinkEntity::forAnonymousDescriptor(DC); return getAddrOfSharedContextDescriptor(entity, definition, [&]{ AnonymousContextDescriptorBuilder(*this, DC).emit(); }); } llvm::Constant * IRGenModule::getAddrOfOriginalModuleContextDescriptor(StringRef Name) { auto *M = ModuleDecl::createEmpty(Context.getIdentifier(Name), Context); return getAddrOfModuleContextDescriptor( OriginalModules.insert({Name, M}).first->getValue()); } void IRGenFunction:: emitInitializeFieldOffsetVector(SILType T, llvm::Value *metadata, bool isVWTMutable, MetadataDependencyCollector *collector) { auto *target = T.getNominalOrBoundGenericNominal(); llvm::Value *fieldVector = nullptr; // @objc @implementation classes don't actually have a field vector; for them, // we're just trying to update the direct field offsets. if (!isa(target) || !cast(target)->getObjCImplementationDecl()) { fieldVector = emitAddressOfFieldOffsetVector(*this, metadata, target) .getAddress(); } // Collect the stored properties of the type. unsigned numFields = countExportableFields(IGM, target); // Fill out an array with the field type metadata records. Address fields = createAlloca( llvm::ArrayType::get(IGM.Int8PtrPtrTy, numFields), IGM.getPointerAlignment(), "classFields"); Builder.CreateLifetimeStart(fields, IGM.getPointerSize() * numFields); fields = Builder.CreateStructGEP(fields, 0, Size(0)); unsigned index = 0; forEachField(IGM, target, [&](Field field) { assert(field.isConcrete() && "initializing offset vector for type with missing member?"); if (!isExportableField(field)) return; SILType propTy = field.getType(IGM, T); llvm::Value *fieldLayout = emitTypeLayoutRef(*this, propTy, collector); Address fieldLayoutAddr = Builder.CreateConstArrayGEP(fields, index, IGM.getPointerSize()); Builder.CreateStore(fieldLayout, fieldLayoutAddr); ++index; }); assert(index == numFields); // Ask the runtime to lay out the struct or class. auto numFieldsV = IGM.getSize(Size(numFields)); if (auto *classDecl = dyn_cast(target)) { // Compute class layout flags. ClassLayoutFlags flags = ClassLayoutFlags::Swift5Algorithm; switch (IGM.getClassMetadataStrategy(classDecl)) { case ClassMetadataStrategy::Resilient: break; case ClassMetadataStrategy::Singleton: case ClassMetadataStrategy::Update: case ClassMetadataStrategy::FixedOrUpdate: flags |= ClassLayoutFlags::HasStaticVTable; break; case ClassMetadataStrategy::Fixed: llvm_unreachable("Emitting metadata init for fixed class metadata?"); } llvm::Value *dependency = nullptr; switch (IGM.getClassMetadataStrategy(classDecl)) { case ClassMetadataStrategy::Resilient: case ClassMetadataStrategy::Singleton: // Call swift_initClassMetadata(). assert(fieldVector && "Singleton/Resilient strategies not supported for " "objcImplementation"); dependency = Builder.CreateCall( IGM.getInitClassMetadata2FunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), numFieldsV, fields.getAddress(), fieldVector}); break; case ClassMetadataStrategy::Update: case ClassMetadataStrategy::FixedOrUpdate: assert(IGM.Context.LangOpts.EnableObjCInterop); if (fieldVector) { // Call swift_updateClassMetadata(). Note that the static metadata // already references the superclass in this case, but we still want // to ensure the superclass metadata is initialized first. dependency = Builder.CreateCall( IGM.getUpdateClassMetadata2FunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), numFieldsV, fields.getAddress(), fieldVector}); } else { // If we don't have a field vector, we must be updating an // @objc @implementation class layout. Call // swift_updatePureObjCClassMetadata() instead. Builder.CreateCall( IGM.getUpdatePureObjCClassMetadataFunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), numFieldsV, fields.getAddress()}); } break; case ClassMetadataStrategy::Fixed: llvm_unreachable("Emitting metadata init for fixed class metadata?"); } // Collect any possible dependency from initializing the class; generally // this involves the superclass. if (collector && dependency) collector->collect(*this, dependency); } else { assert(isa(target)); // Compute struct layout flags. StructLayoutFlags flags = StructLayoutFlags::Swift5Algorithm; if (isVWTMutable) flags |= StructLayoutFlags::IsVWTMutable; // Call swift_initStructMetadata(). Builder.CreateCall(IGM.getInitStructMetadataFunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), numFieldsV, fields.getAddress(), fieldVector}); } Builder.CreateLifetimeEnd(fields, IGM.getPointerSize() * numFields); } static void emitInitializeFieldOffsetVectorWithLayoutString( IRGenFunction &IGF, SILType T, llvm::Value *metadata, bool isVWTMutable, MetadataDependencyCollector *collector) { auto &IGM = IGF.IGM; assert(IGM.Context.LangOpts.hasFeature( Feature::LayoutStringValueWitnessesInstantiation) && IGM.getOptions().EnableLayoutStringValueWitnesses); auto *target = T.getStructOrBoundGenericStruct(); llvm::Value *fieldVector = emitAddressOfFieldOffsetVector(IGF, metadata, target).getAddress(); // Collect the stored properties of the type. unsigned numFields = countExportableFields(IGM, target); // Ask the runtime to lay out the struct or class. auto numFieldsV = IGM.getSize(Size(numFields)); // Fill out an array with the field type metadata records. Address fieldsMetadata = IGF.createAlloca(llvm::ArrayType::get(IGM.Int8PtrPtrTy, numFields), IGM.getPointerAlignment(), "fieldsMetadata"); IGF.Builder.CreateLifetimeStart(fieldsMetadata, IGM.getPointerSize() * numFields); fieldsMetadata = IGF.Builder.CreateStructGEP(fieldsMetadata, 0, Size(0)); Address fieldTags = IGF.createAlloca(llvm::ArrayType::get(IGM.Int8Ty, numFields), Alignment(1), "fieldTags"); IGF.Builder.CreateLifetimeStart(fieldTags, Size(numFields)); fieldTags = IGF.Builder.CreateStructGEP(fieldTags, 0, Size(0)); unsigned index = 0; forEachField(IGM, target, [&](Field field) { assert(field.isConcrete() && "initializing offset vector for type with missing member?"); if (!isExportableField(field)) return; SILType propTy = field.getType(IGM, T); llvm::Value *fieldMetatype; llvm::Value *fieldTag; if (auto ownership = propTy.getReferenceStorageOwnership()) { auto &ti = IGF.getTypeInfo(propTy.getObjectType()); auto *fixedTI = dyn_cast(&ti); assert(fixedTI && "Reference should have fixed layout"); auto fixedSize = fixedTI->getFixedSize(); fieldMetatype = emitTypeLayoutRef(IGF, propTy, collector); switch (*ownership) { case ReferenceOwnership::Unowned: fieldTag = llvm::Constant::getIntegerValue( IGM.Int8Ty, APInt(IGM.Int8Ty->getBitWidth(), fixedSize == IGM.getPointerSize() ? 0x1 : 0x2)); break; case ReferenceOwnership::Weak: fieldTag = llvm::Constant::getIntegerValue( IGM.Int8Ty, APInt(IGM.Int8Ty->getBitWidth(), fixedSize == IGM.getPointerSize() ? 0x3 : 0x4)); break; case ReferenceOwnership::Unmanaged: fieldTag = llvm::Constant::getIntegerValue( IGM.Int8Ty, APInt(IGM.Int8Ty->getBitWidth(), fixedSize == IGM.getPointerSize() ? 0x5 : 0x6)); break; case ReferenceOwnership::Strong: llvm_unreachable("Strong reference should have been lowered"); break; } } else { fieldTag = llvm::Constant::getIntegerValue( IGM.Int8Ty, APInt(IGM.Int8Ty->getBitWidth(), 0x0)); auto request = DynamicMetadataRequest::getNonBlocking( MetadataState::LayoutComplete, collector); fieldMetatype = IGF.emitTypeMetadataRefForLayout(propTy, request); fieldMetatype = IGF.Builder.CreateBitCast(fieldMetatype, IGM.Int8PtrPtrTy); } Address fieldTagAddr = IGF.Builder.CreateConstArrayGEP( fieldTags, index, Size::forBits(IGM.Int8Ty->getBitWidth())); IGF.Builder.CreateStore(fieldTag, fieldTagAddr); Address fieldMetatypeAddr = IGF.Builder.CreateConstArrayGEP( fieldsMetadata, index, IGM.getPointerSize()); IGF.Builder.CreateStore(fieldMetatype, fieldMetatypeAddr); ++index; }); assert(index == numFields); // Compute struct layout flags. StructLayoutFlags flags = StructLayoutFlags::Swift5Algorithm; if (isVWTMutable) flags |= StructLayoutFlags::IsVWTMutable; // Call swift_initStructMetadataWithLayoutString(). IGF.Builder.CreateCall( IGM.getInitStructMetadataWithLayoutStringFunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), numFieldsV, fieldsMetadata.getAddress(), fieldTags.getAddress(), fieldVector}); IGF.Builder.CreateLifetimeEnd(fieldTags, IGM.getPointerSize() * numFields); IGF.Builder.CreateLifetimeEnd(fieldsMetadata, IGM.getPointerSize() * numFields); } static void emitInitializeRawLayoutOldOld(IRGenFunction &IGF, SILType likeType, llvm::Value *count, SILType T, llvm::Value *metadata, MetadataDependencyCollector *collector) { auto &IGM = IGF.IGM; // This is the list of field type layouts that we're going to pass to the init // function. This will only ever hold 1 field which is the temporary one we're // going to build up from our like type's layout. auto fieldLayouts = IGF.createAlloca(llvm::ArrayType::get(IGM.PtrTy, 1), IGM.getPointerAlignment(), "fieldLayouts"); IGF.Builder.CreateLifetimeStart(fieldLayouts, IGM.getPointerSize()); // We're going to pretend that this is our field offset vector for the init to // write to. We don't actually have fields, so we don't want to write a field // offset in our metadata. auto fieldOffsets = IGF.createAlloca(IGM.Int32Ty, Alignment(4), "fieldOffsets"); IGF.Builder.CreateLifetimeStart(fieldOffsets, Size(4)); // We need to make a temporary type layout with most of the same information // from the type we're like. auto ourTypeLayout = IGF.createAlloca(IGM.TypeLayoutTy, IGM.getPointerAlignment(), "ourTypeLayout"); IGF.Builder.CreateLifetimeStart(ourTypeLayout, IGM.getPointerSize()); // Put our temporary type layout in the list of layouts we're using to // initialize. IGF.Builder.CreateStore(ourTypeLayout.getAddress(), fieldLayouts); // Get the like type's type layout. auto likeTypeLayout = emitTypeLayoutRef(IGF, likeType, collector); // Grab the size, stride, and alignmentMask out of the layout. auto loadedTyLayout = IGF.Builder.CreateLoad( Address(likeTypeLayout, IGM.TypeLayoutTy, IGM.getPointerAlignment()), "typeLayout"); auto size = IGF.Builder.CreateExtractValue(loadedTyLayout, 0, "size"); auto stride = IGF.Builder.CreateExtractValue(loadedTyLayout, 1, "stride"); auto flags = IGF.Builder.CreateExtractValue(loadedTyLayout, 2, "flags"); auto xi = IGF.Builder.CreateExtractValue(loadedTyLayout, 3, "xi"); // This will zero out the other bits. auto alignMask = IGF.Builder.CreateAnd(flags, ValueWitnessFlags::AlignmentMask, "alignMask"); // Set the isNonPOD bit. This is important because older runtimes will attempt // to replace various vwt functions with more optimized ones. In this case, we // want to preserve the fact that noncopyable types have unreachable copy vwt // functions. auto vwtFlags = IGF.Builder.CreateOr(alignMask, ValueWitnessFlags::IsNonPOD, "vwtFlags"); // Count is only ever null if we're not an array like layout. if (count != nullptr) { stride = IGF.Builder.CreateMul(stride, count); size = stride; } llvm::Value *resultAgg = llvm::UndefValue::get(IGM.TypeLayoutTy); resultAgg = IGF.Builder.CreateInsertValue(resultAgg, size, 0); resultAgg = IGF.Builder.CreateInsertValue(resultAgg, stride, 1); resultAgg = IGF.Builder.CreateInsertValue(resultAgg, vwtFlags, 2); resultAgg = IGF.Builder.CreateInsertValue(resultAgg, xi, 3); IGF.Builder.CreateStore(resultAgg, ourTypeLayout); StructLayoutFlags fnFlags = StructLayoutFlags::Swift5Algorithm; // Call swift_initStructMetadata(). IGF.Builder.CreateCall(IGM.getInitStructMetadataFunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(fnFlags))), IGM.getSize(Size(1)), fieldLayouts.getAddress(), fieldOffsets.getAddress()}); IGF.Builder.CreateLifetimeEnd(ourTypeLayout, IGM.getPointerSize()); IGF.Builder.CreateLifetimeEnd(fieldOffsets, Size(4)); IGF.Builder.CreateLifetimeEnd(fieldLayouts, IGM.getPointerSize()); } static void emitInitializeRawLayoutOld(IRGenFunction &IGF, SILType likeType, llvm::Value *count, SILType T, llvm::Value *metadata, MetadataDependencyCollector *collector) { // If our deployment target doesn't contain the swift_initRawStructMetadata, // emit a call to the swift_initStructMetadata tricking it into thinking // we have a single field. auto deploymentAvailability = AvailabilityRange::forDeploymentTarget(IGF.IGM.Context); auto initRawAvail = IGF.IGM.Context.getInitRawStructMetadataAvailability(); if (!IGF.IGM.Context.LangOpts.DisableAvailabilityChecking && !deploymentAvailability.isContainedIn(initRawAvail) && !IGF.IGM.getSwiftModule()->isStdlibModule()) { emitInitializeRawLayoutOldOld(IGF, likeType, count, T, metadata, collector); return; } auto &IGM = IGF.IGM; auto likeTypeLayout = emitTypeLayoutRef(IGF, likeType, collector); StructLayoutFlags flags = StructLayoutFlags::Swift5Algorithm; // If we don't have a count, then we're the 'like:' variant and we need to // pass '-1' to the runtime call. if (!count) { count = llvm::ConstantInt::get(IGF.IGM.Int32Ty, -1); } // Call swift_initRawStructMetadata(). IGF.Builder.CreateCall(IGM.getInitRawStructMetadataFunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(flags))), likeTypeLayout, count}); } static void emitInitializeRawLayout(IRGenFunction &IGF, SILType likeType, llvm::Value *count, SILType T, llvm::Value *metadata, MetadataDependencyCollector *collector) { // If our deployment target doesn't contain the swift_initRawStructMetadata2, // emit a call to the older swift_initRawStructMetadata. auto deploymentAvailability = AvailabilityRange::forDeploymentTarget(IGF.IGM.Context); auto initRaw2Avail = IGF.IGM.Context.getInitRawStructMetadata2Availability(); if (!IGF.IGM.Context.LangOpts.DisableAvailabilityChecking && !deploymentAvailability.isContainedIn(initRaw2Avail) && !IGF.IGM.getSwiftModule()->isStdlibModule()) { emitInitializeRawLayoutOld(IGF, likeType, count, T, metadata, collector); return; } auto &IGM = IGF.IGM; auto rawLayout = T.getRawLayout(); auto likeTypeLayout = emitTypeLayoutRef(IGF, likeType, collector); auto structLayoutflags = StructLayoutFlags::Swift5Algorithm; auto rawLayoutFlags = (RawLayoutFlags) 0; if (rawLayout->shouldMoveAsLikeType()) rawLayoutFlags |= RawLayoutFlags::MovesAsLike; // If we don't have a count, then we're the 'like:' variant so just pass some // 0 to the runtime call. if (!count) { count = IGM.getSize(Size(0)); } else { rawLayoutFlags |= RawLayoutFlags::IsArray; } // Call swift_initRawStructMetadata2(). IGF.Builder.CreateCall(IGM.getInitRawStructMetadata2FunctionPointer(), {metadata, IGM.getSize(Size(uintptr_t(structLayoutflags))), likeTypeLayout, count, IGM.getSize(Size(uintptr_t(rawLayoutFlags)))}); } static void emitInitializeValueMetadata(IRGenFunction &IGF, NominalTypeDecl *nominalDecl, llvm::Value *metadata, bool isVWTMutable, MetadataDependencyCollector *collector) { auto &IGM = IGF.IGM; auto loweredTy = IGM.getLoweredType(nominalDecl->getDeclaredTypeInContext()); auto &concreteTI = IGM.getTypeInfo(loweredTy); bool useLayoutStrings = layoutStringsEnabled(IGM) && IGM.Context.LangOpts.hasFeature( Feature::LayoutStringValueWitnessesInstantiation) && IGM.getOptions().EnableLayoutStringValueWitnessesInstantiation && concreteTI.isCopyable(ResilienceExpansion::Maximal); if (auto sd = dyn_cast(nominalDecl)) { if (isa(concreteTI)) return; // Use a different runtime function to initialize the value witness table // if the struct has a raw layout. The existing swift_initStructMetadata // is the wrong thing for these types. if (auto rawLayout = nominalDecl->getAttrs().getAttribute()) { SILType loweredLikeType; llvm::Value *count = nullptr; if (auto likeType = rawLayout->getResolvedScalarLikeType(sd)) { loweredLikeType = IGM.getLoweredType(AbstractionPattern::getOpaque(), *likeType); } else if (auto likeArray = rawLayout->getResolvedArrayLikeTypeAndCount(sd)) { auto likeType = likeArray->first; auto countType = likeArray->second; loweredLikeType = IGM.getLoweredType(AbstractionPattern::getOpaque(), likeType); count = IGF.emitValueGenericRef(countType->getCanonicalType()); } emitInitializeRawLayout(IGF, loweredLikeType, count, loweredTy, metadata, collector); return; } if (useLayoutStrings) { emitInitializeFieldOffsetVectorWithLayoutString(IGF, loweredTy, metadata, isVWTMutable, collector); } else { IGF.emitInitializeFieldOffsetVector(loweredTy, metadata, isVWTMutable, collector); } } else { assert(isa(nominalDecl)); auto &strategy = getEnumImplStrategy(IGM, loweredTy); if (useLayoutStrings) { strategy.initializeMetadataWithLayoutString(IGF, metadata, isVWTMutable, loweredTy, collector); } else { strategy.initializeMetadata(IGF, metadata, isVWTMutable, loweredTy, collector); } } } static void emitInitializeClassMetadata(IRGenFunction &IGF, ClassDecl *classDecl, const ClassLayout &fieldLayout, llvm::Value *metadata, MetadataDependencyCollector *collector) { auto &IGM = IGF.IGM; assert(IGM.getClassMetadataStrategy(classDecl) != ClassMetadataStrategy::Fixed); auto loweredTy = IGM.getLoweredType(classDecl->getDeclaredTypeInContext()); // Set the superclass, fill out the field offset vector, and copy vtable // entries, generic requirements and field offsets from superclasses. IGF.emitInitializeFieldOffsetVector(loweredTy, metadata, /*VWT is mutable*/ false, collector); // Realizing the class with the ObjC runtime will copy back to the // field offset globals for us; but if ObjC interop is disabled, we // have to do that ourselves, assuming we didn't just emit them all // correctly in the first place. // FIXME: make the runtime do this in all cases, because there's no // good reason it shouldn't if (!IGM.ObjCInterop) { forEachField(IGM, classDecl, [&](Field field) { // FIXME: should we handle the other cases here? if (field.getKind() != Field::Var) return; auto prop = field.getVarDecl(); auto fieldInfo = fieldLayout.getFieldAccessAndElement(prop); if (fieldInfo.first == FieldAccess::NonConstantDirect) { Address offsetA = IGM.getAddrOfFieldOffset(prop, ForDefinition); // We can't use emitClassFieldOffset() here because that creates // an invariant load, which could be hoisted above the point // where the metadata becomes fully initialized auto slot = emitAddressOfClassFieldOffset(IGF, metadata, classDecl, prop); auto offsetVal = IGF.emitInvariantLoad(slot); IGF.Builder.CreateStore(offsetVal, offsetA); } }); } } static MetadataKind getMetadataKind(NominalTypeDecl *nominalDecl) { if (isa(nominalDecl)) return MetadataKind::Struct; assert(isa(nominalDecl)); return (nominalDecl->isOptionalDecl() ? MetadataKind::Optional : MetadataKind::Enum); } /*****************************************************************************/ /** Metadata Emission ********************************************************/ /*****************************************************************************/ namespace { /// An adapter class which turns a metadata layout class into a /// generic metadata layout class. template class GenericMetadataBuilderBase { protected: IRGenModule &IGM; DeclType *Target; ConstantStructBuilder &B; /// Set to true if the metadata record for the generic type has fields /// outside of the generic parameter vector. bool HasDependentMetadata = false; /// Set to true if the value witness table for the generic type is dependent /// on its generic parameters. Implies HasDependentMetadata. bool HasDependentVWT = false; GenericMetadataBuilderBase(IRGenModule &IGM, DeclType *Target, ConstantStructBuilder &B) : IGM(IGM), Target(Target), B(B) {} /// Emit the instantiation cache variable for the template. void emitInstantiationCache() { if (IGM.IRGen.Opts.NoPreallocatedInstantiationCaches) return; auto cache = cast( IGM.getAddrOfTypeMetadataInstantiationCache(Target, ForDefinition)); auto init = llvm::ConstantAggregateZero::get(cache->getValueType()); cache->setInitializer(init); } SILType getLoweredType() { return IGM.getLoweredType(Target->getDeclaredTypeInContext()); } Impl &asImpl() { return *static_cast(this); } llvm::Constant *emitLayoutString() { if (!layoutStringsEnabled(IGM)) return nullptr; auto lowered = getLoweredTypeInPrimaryContext(IGM, Target); auto &ti = IGM.getTypeInfo(lowered); auto *typeLayoutEntry = ti.buildTypeLayoutEntry(IGM, lowered, /*useStructLayouts*/ true); auto genericSig = lowered.getNominalOrBoundGenericNominal()->getGenericSignature(); return typeLayoutEntry->layoutString(IGM, genericSig); } llvm::Constant *getLayoutString() { return emitLayoutString(); } /// Emit the create function for the template. void emitInstantiationFunction() { // using MetadataInstantiator = // Metadata *(TypeContextDescriptor *type, // const void * const *arguments, // const GenericMetadataPattern *pattern); llvm::Function *f = IGM.getAddrOfTypeMetadataInstantiationFunction(Target, ForDefinition); f->setAttributes(IGM.constructInitialAttributes()); f->setDoesNotThrow(); IGM.setHasNoFramePointer(f); IGM.setColocateMetadataSection(f); IRGenFunction IGF(IGM, f); // Skip instrumentation when building for TSan to avoid false positives. // The synchronization for this happens in the Runtime and we do not see it. if (IGM.IRGen.Opts.Sanitizers & SanitizerKind::Thread) f->removeFnAttr(llvm::Attribute::SanitizeThread); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(IGF, f); Explosion params = IGF.collectParameters(); llvm::Value *descriptor = params.claimNext(); llvm::Value *args = params.claimNext(); llvm::Value *templatePointer = params.claimNext(); // Bind the generic arguments. if (Target->isGenericContext()) { Address argsArray(args, IGM.Int8PtrTy, IGM.getPointerAlignment()); emitPolymorphicParametersFromArray(IGF, Target, argsArray, MetadataState::Abstract); } // Allocate the metadata. llvm::Value *metadata = asImpl().emitAllocateMetadata(IGF, descriptor, args, templatePointer); IGF.Builder.CreateRet(metadata); } void emitCompletionFunction() { // using MetadataCompleter = // MetadataDependency(Metadata *type, // MetadataCompletionContext *context, // const GenericMetadataPattern *pattern); emitMetadataCompletionFunction(IGM, Target, [&](IRGenFunction &IGF, llvm::Value *metadata, MetadataDependencyCollector *collector) { // Bind the generic arguments. // FIXME: this will be problematic if we ever try to bind superclass // types from type metadata! assert(Target->isGenericContext()); auto type = Target->getDeclaredTypeInContext()->getCanonicalType(); IGF.bindLocalTypeDataFromTypeMetadata(type, IsExact, metadata, MetadataState::Abstract); // A dependent VWT means that we have dependent metadata. if (HasDependentVWT) HasDependentMetadata = true; if (HasDependentMetadata) asImpl().emitInitializeMetadata(IGF, metadata, false, collector); }); } /// The information necessary to fill in a GenericMetadataPartialPattern /// structure. struct PartialPattern { llvm::Constant *Data; Size DataOffset; Size DataSize; }; void addPartialPattern(PartialPattern pattern) { // RelativeDirectPointer Pattern; B.addRelativeAddress(pattern.Data); // uint16_t OffsetInWords; B.addInt16(IGM.getOffsetInWords(pattern.DataOffset)); // uint16_t SizeInWords; B.addInt16(IGM.getOffsetInWords(pattern.DataSize)); } public: void createMetadataAccessFunction() { (void) getGenericTypeMetadataAccessFunction(IGM, Target, ForDefinition); } void layout() { asImpl().layoutHeader(); // See also: [pre-5.2-extra-data-zeroing] // See also: [pre-5.3-extra-data-zeroing] if (asImpl().hasExtraDataPattern()) { asImpl().addExtraDataPattern(); } // Immediate-members pattern. This is only valid for classes. if (asImpl().hasImmediateMembersPattern()) { asImpl().addImmediateMembersPattern(); } // We're done with the pattern now. #ifndef NDEBUG auto finalOffset = B.getNextOffsetFromGlobal(); #endif asImpl().emitInstantiationDefinitions(); assert(finalOffset == B.getNextOffsetFromGlobal() && "emitInstantiationDefinitions added members to the pattern!"); } // Emit the fields of GenericMetadataPattern. void layoutHeader() { // RelativePointer InstantiationFunction; asImpl().addInstantiationFunction(); // RelativePointer CompletionFunction; asImpl().addCompletionFunction(); // ClassMetadataPatternFlags PatternFlags; asImpl().addPatternFlags(); } void addInstantiationFunction() { auto function = IGM.getAddrOfTypeMetadataInstantiationFunction(Target, NotForDefinition); B.addCompactFunctionReference(function); } void addCompletionFunction() { if (!asImpl().hasCompletionFunction()) { B.addInt32(0); return; } auto function = IGM.getAddrOfTypeMetadataCompletionFunction(Target, NotForDefinition); B.addCompactFunctionReference(function); } void addPatternFlags() { GenericMetadataPatternFlags flags = asImpl().getPatternFlags(); B.addInt32(flags.getOpaqueValue()); } GenericMetadataPatternFlags getPatternFlags() { GenericMetadataPatternFlags flags; if (asImpl().hasExtraDataPattern()) flags.setHasExtraDataPattern(true); return flags; } bool hasExtraDataPattern() { return false; } void addExtraDataPattern() { asImpl().addPartialPattern(asImpl().buildExtraDataPattern()); } PartialPattern buildExtraDataPattern() { llvm_unreachable("no extra data pattern!"); } bool hasImmediateMembersPattern() { return false; } void addImmediateMembersPattern() { asImpl().addPartialPattern(asImpl().buildImmediateMembersPattern()); } PartialPattern buildImmediateMembersPattern() { llvm_unreachable("no immediate members pattern!"); } void emitInstantiationDefinitions() { // Force the emission of the nominal type descriptor, although we // don't use it yet. (void) asImpl().emitNominalTypeDescriptor(); // Emit the instantiation function. asImpl().emitInstantiationFunction(); // Emit the completion function. if (asImpl().hasCompletionFunction()) asImpl().emitCompletionFunction(); // Emit the instantiation cache. asImpl().emitInstantiationCache(); } }; template class GenericValueMetadataBuilderBase : public GenericMetadataBuilderBase { using super = GenericMetadataBuilderBase; protected: using super::IGM; using super::asImpl; using super::Target; using super::B; template GenericValueMetadataBuilderBase(IRGenModule &IGM, DeclType *Target, ConstantStructBuilder &B) : super(IGM, Target, B) {} SILType getLoweredType() { return IGM.getLoweredType(Target->getDeclaredTypeInContext()); } public: /// Emit the fields of a GenericValueMetadataPattern. void layoutHeader() { super::layoutHeader(); // RelativeIndirectablePointer ValueWitnesses; asImpl().addValueWitnessTable(); } GenericMetadataPatternFlags getPatternFlags() { auto flags = super::getPatternFlags(); flags.value_setMetadataKind(getMetadataKind(Target)); assert(!asImpl().hasImmediateMembersPattern()); return flags; } void addValueWitnessTable() { ConstantReference table = asImpl().emitValueWitnessTable(/*relative*/ true); B.addRelativeAddress(table); } void emitInitializeMetadata(IRGenFunction &IGF, llvm::Value *metadata, bool isVWTMutable, MetadataDependencyCollector *collector) { emitInitializeValueMetadata(IGF, Target, metadata, isVWTMutable, collector); } }; } // end anonymous namespace /// Create an access function for the given type which triggers the /// in-place initialization path. static void createSingletonInitializationMetadataAccessFunction(IRGenModule &IGM, NominalTypeDecl *typeDecl, CanType type) { assert(!typeDecl->isGenericContext()); (void) createTypeMetadataAccessFunction(IGM, type, CacheStrategy::SingletonInitialization, [&](IRGenFunction &IGF, DynamicMetadataRequest request, llvm::Constant *cacheVariable) { if (auto CD = dyn_cast(typeDecl)) { if (CD->getObjCImplementationDecl()) { // Use the Objective-C runtime symbol instead of the Swift one. llvm::Value *descriptor = IGF.IGM.getAddrOfObjCClass(CD, NotForDefinition); // Make the ObjC runtime initialize the class. llvm::Value *initializedDescriptor = IGF.Builder.CreateCall(IGF.IGM.getFixedClassInitializationFn(), {descriptor}); // Turn the ObjC class into a valid Swift metadata pointer. auto response = IGF.Builder.CreateCall(IGF.IGM.getGetObjCClassMetadataFunctionPointer(), {initializedDescriptor}); return MetadataResponse::forComplete(response); } } llvm::Value *descriptor = IGF.IGM.getAddrOfTypeContextDescriptor(typeDecl, RequireMetadata); auto responsePair = IGF.Builder.CreateCall(IGF.IGM.getGetSingletonMetadataFunctionPointer(), {request.get(IGF), descriptor}); return MetadataResponse::handle(IGF, request, responsePair); }); } /// Create an access function for the given non-generic type. static void createNonGenericMetadataAccessFunction(IRGenModule &IGM, NominalTypeDecl *typeDecl) { assert(!typeDecl->isGenericContext()); auto type = typeDecl->getDeclaredType()->getCanonicalType(); // If the type requires the in-place initialization pattern, use it. if (needsSingletonMetadataInitialization(IGM, typeDecl)) { createSingletonInitializationMetadataAccessFunction(IGM, typeDecl, type); return; } // Otherwise, use the lazy pattern, which should be emitted using a // direct reference to the metadata. createDirectTypeMetadataAccessFunction(IGM, type, /*allow existing*/ false); } // Classes /// Emit the base-offset variable for the class. static void emitClassMetadataBaseOffset(IRGenModule &IGM, ClassDecl *classDecl) { assert(!classDecl->isForeignReferenceType()); // Otherwise, we know the offset at compile time, even if our // clients do not, so just emit a constant. auto &layout = IGM.getClassMetadataLayout(classDecl); // Only classes defined in resilient modules, or those that have // a resilient superclass need this. if (!layout.hasResilientSuperclass() && !IGM.hasResilientMetadata(classDecl, ResilienceExpansion::Minimal)) { return; } auto *offsetAddr = IGM.getAddrOfClassMetadataBounds(classDecl, ForDefinition); auto *offsetVar = cast(offsetAddr); if (layout.hasResilientSuperclass()) { // If the superclass is resilient to us, we have to compute and // initialize the global when we initialize the metadata. auto init = llvm::ConstantAggregateZero::get(offsetVar->getValueType()); offsetVar->setInitializer(init); offsetVar->setConstant(false); return; } auto immediateMembersOffset = layout.getStartOfImmediateMembers(); auto size = layout.getSize(); auto negativeSizeInWords = size.AddressPoint / IGM.getPointerSize(); auto positiveSizeInWords = size.getOffsetToEnd() / IGM.getPointerSize(); auto initTy = cast(offsetVar->getValueType()); auto *init = llvm::ConstantStruct::get(initTy, { llvm::ConstantInt::get(IGM.SizeTy, immediateMembersOffset.getValue()), llvm::ConstantInt::get(IGM.Int32Ty, negativeSizeInWords), llvm::ConstantInt::get(IGM.Int32Ty, positiveSizeInWords) }); offsetVar->setInitializer(init); offsetVar->setConstant(true); } static std::optional getAddrOfDestructorFunction(IRGenModule &IGM, ClassDecl *classDecl) { auto dtorRef = SILDeclRef(classDecl->getDestructor(), SILDeclRef::Kind::Deallocator); SILFunction *dtorFunc = IGM.getSILModule().lookUpFunction(dtorRef); if (!dtorFunc) return std::nullopt; return IGM.getAddrOfSILFunction(dtorFunc, NotForDefinition); } static void emitFieldOffsetGlobals(IRGenModule &IGM, ClassDecl *classDecl, const ClassLayout &fragileLayout, const ClassLayout &resilientLayout) { forEachField(IGM, classDecl, [&](Field field) { switch (field.getKind()) { // This is case we actually care about. case Field::Var: break; // We should never be in this case when emitting a type. case Field::MissingMember: llvm_unreachable("unexpected missing member when emitting type"); // We don't need to emit an offset global for the default-actor // storage, which is never accessed directly. case Field::DefaultActorStorage: return; case Field::NonDefaultDistributedActorStorage: return; } auto prop = field.getVarDecl(); auto fieldInfo = fragileLayout.getFieldAccessAndElement(prop); auto access = fieldInfo.first; auto element = fieldInfo.second; llvm::Constant *fieldOffsetOrZero; if (element.hasByteOffset()) { // Use a fixed offset if we have one. fieldOffsetOrZero = IGM.getSize(element.getByteOffset()); } else { // Otherwise, leave a placeholder for the runtime to populate at runtime. fieldOffsetOrZero = IGM.getSize(Size(0)); } switch (access) { case FieldAccess::ConstantDirect: case FieldAccess::NonConstantDirect: { // Emit a global variable storing the constant field offset. // If the superclass was imported from Objective-C, the offset // does not include the superclass size; we rely on the // Objective-C runtime sliding it down. // // TODO: Don't emit the symbol if field has a fixed offset and size // in all resilience domains auto offsetAddr = IGM.getAddrOfFieldOffset(prop, ForDefinition); auto offsetVar = cast(offsetAddr.getAddress()); offsetVar->setInitializer(fieldOffsetOrZero); // If the offset is constant in the resilient layout, it will not change // at runtime, and the global can be true const. // // If it is constant in the fragile layout only, newer Objective-C // runtimes will still update them in place, so make sure to check the // correct layout. // // The one exception to this rule is with empty fields with // ObjC-resilient heritage. The ObjC runtime will attempt to slide // these offsets if it slides the rest of the class, and in doing so // it will compute a different offset than we computed statically. // But this is ultimately unimportant because we do not care about the // offset of an empty field. auto resilientInfo = resilientLayout.getFieldAccessAndElement(prop); if (resilientInfo.first == FieldAccess::ConstantDirect && (!resilientInfo.second.isEmpty() || !resilientLayout.mayRuntimeAssignNonZeroOffsetsToEmptyFields())) { // If it is constant in the resilient layout, it should be constant in // the fragile layout also. assert(access == FieldAccess::ConstantDirect); assert(element.hasByteOffset()); offsetVar->setConstant(true); } break; } case FieldAccess::ConstantIndirect: // No global variable is needed. break; } }); } static ClassFlags getClassFlags(ClassDecl *classDecl) { auto flags = ClassFlags(); // Set a flag if the class uses Swift refcounting. auto type = classDecl->getDeclaredType()->getCanonicalType(); if (type->getReferenceCounting() == ReferenceCounting::Native) { flags |= ClassFlags::UsesSwiftRefcounting; } // Set a flag if the class has a custom ObjC name. DeclAttributes attrs = classDecl->getAttrs(); if (auto objc = attrs.getAttribute()) { if (objc->getName()) flags |= ClassFlags::HasCustomObjCName; } if (attrs.hasAttribute()) flags |= ClassFlags::HasCustomObjCName; return flags; } namespace { /// Base class for layout of non-generic class metadata. template class ClassMetadataBuilderBase : public ClassMetadataVisitor { using super = ClassMetadataVisitor; protected: using NominalDecl = ClassDecl; using super::asImpl; using super::IGM; using super::Target; using super::VTable; ConstantStructBuilder &B; const ClassLayout &FieldLayout; const ClassMetadataLayout &MetadataLayout; Size AddressPoint; // As we're constructing the vtable, VTableEntriesForVFE stores the offset // (from the beginning of the global) for each vtable slot. The offsets are // later turned into !type metadata attributes. SmallVector, 8> VTableEntriesForVFE; public: ClassMetadataBuilderBase(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout) : super(IGM, theClass), B(builder), FieldLayout(fieldLayout), MetadataLayout(IGM.getClassMetadataLayout(theClass)) {} ClassMetadataBuilderBase(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout, SILVTable *vtable) : super(IGM, theClass, vtable), B(builder), FieldLayout(fieldLayout), MetadataLayout(IGM.getClassMetadataLayout(theClass)) {} public: const ClassLayout &getFieldLayout() const { return FieldLayout; } using super::isPureObjC; SILType getLoweredType() { return IGM.getLoweredType(Target->getDeclaredTypeInContext()); } void noteAddressPoint() { ClassMetadataVisitor::noteAddressPoint(); AddressPoint = B.getNextOffsetFromGlobal(); } ClassFlags getClassFlags() { return ::getClassFlags(Target); } void addClassFlags() { assert(!isPureObjC()); B.addInt32((uint32_t)asImpl().getClassFlags()); } void noteResilientSuperclass() {} void noteStartOfImmediateMembers(ClassDecl *theClass) {} ConstantReference getValueWitnessTable(bool relativeReference) { assert( !relativeReference && "Cannot get a relative reference to a class' value witness table."); switch (IGM.getClassMetadataStrategy(Target)) { case ClassMetadataStrategy::Resilient: case ClassMetadataStrategy::Singleton: // The runtime fills in the value witness table for us. return ConstantReference( llvm::ConstantPointerNull::get(IGM.WitnessTablePtrTy), swift::irgen::ConstantReference::Direct); case ClassMetadataStrategy::Update: case ClassMetadataStrategy::FixedOrUpdate: case ClassMetadataStrategy::Fixed: { // FIXME: Should this check HasImported instead? auto type = (Target->checkAncestry(AncestryFlags::ObjC) ? IGM.Context.getAnyObjectType() : IGM.Context.TheNativeObjectType); auto wtable = IGM.getAddrOfValueWitnessTable(type); return ConstantReference(wtable, swift::irgen::ConstantReference::Direct); } } llvm_unreachable("covered switch"); } void addValueWitnessTable() { assert(!isPureObjC()); auto wtable = asImpl().getValueWitnessTable(false).getValue(); if (!isa(wtable)) { auto schema = IGM.getOptions().PointerAuth.ValueWitnessTable; B.addSignedPointer(wtable, schema, PointerAuthEntity()); } else { B.add(wtable); } } llvm::Constant *getAddrOfMetaclassObject(ForDefinition_t forDefinition) { return IGM.getAddrOfMetaclassObject(Target, forDefinition); } /// The 'metadata flags' field in a class is actually a pointer to /// the metaclass object for the class. /// /// NONAPPLE: This is only really required for ObjC interop; maybe /// suppress this for classes that don't need to be exposed to /// ObjC, e.g. for non-Apple platforms? void addMetadataFlags() { static_assert(unsigned(MetadataKind::Class) == 0, "class metadata kind is non-zero?"); if (IGM.ObjCInterop) { // Get the metaclass pointer as an intptr_t. auto metaclass = asImpl().getAddrOfMetaclassObject(NotForDefinition); auto flags = llvm::ConstantExpr::getPtrToInt(metaclass, IGM.MetadataKindTy); B.add(flags); } else { // On non-objc platforms just fill it with a null, there // is no Objective-C metaclass. // FIXME: Remove this to save metadata space. // rdar://problem/18801263 B.addInt(IGM.MetadataKindTy, unsigned(MetadataKind::Class)); } } CanType getSuperclassTypeForMetadata() { if (auto superclass = getSuperclassForMetadata(IGM, Target)) return Target->mapTypeIntoContext(superclass)->getCanonicalType(); return CanType(); } llvm::Constant *getSuperclassMetadata(CanType superclass) { return tryEmitConstantHeapMetadataRef(IGM, superclass, /*allowUninit*/ false); } bool shouldAddNullSuperclass() { // If we might have generic ancestry, leave a placeholder since // swift_initClassMetadata() will fill in the superclass. switch (IGM.getClassMetadataStrategy(Target)) { case ClassMetadataStrategy::Resilient: case ClassMetadataStrategy::Singleton: return true; case ClassMetadataStrategy::Update: case ClassMetadataStrategy::FixedOrUpdate: case ClassMetadataStrategy::Fixed: return false; } llvm_unreachable("covered switch"); } void addEmbeddedSuperclass(CanType classTy) { CanType superclass = asImpl().getSuperclassTypeForMetadata(); if (!superclass) { B.addNullPointer(IGM.TypeMetadataPtrTy); return; } CanType superTy = classTy->getSuperclass()->getCanonicalType(); B.add(IGM.getAddrOfTypeMetadata(superTy)); } void addSuperclass() { if (asImpl().shouldAddNullSuperclass()) { B.addNullPointer(IGM.TypeMetadataPtrTy); return; } // If this is a root class, use SwiftObject as our formal parent. CanType superclass = asImpl().getSuperclassTypeForMetadata(); if (!superclass) { // This is only required for ObjC interoperation. if (!IGM.ObjCInterop) { B.addNullPointer(IGM.TypeMetadataPtrTy); return; } // We have to do getAddrOfObjCClass ourselves here because // the ObjC runtime base needs to be ObjC-mangled but isn't // actually imported from a clang module. B.add(IGM.getAddrOfObjCClass( IGM.getObjCRuntimeBaseForSwiftRootClass(Target), NotForDefinition)); return; } // This should succeed because the cases where it doesn't should // lead to shouldAddNullSuperclass returning true above. auto metadata = asImpl().getSuperclassMetadata(superclass); assert(metadata); B.add(metadata); } llvm::Constant *emitLayoutString() { if (!layoutStringsEnabled(IGM)) return nullptr; auto lowered = getLoweredTypeInPrimaryContext(IGM, Target); auto &ti = IGM.getTypeInfo(lowered); auto *typeLayoutEntry = ti.buildTypeLayoutEntry(IGM, lowered, /*useStructLayouts*/ true); auto genericSig = lowered.getNominalOrBoundGenericNominal()->getGenericSignature(); return typeLayoutEntry->layoutString(IGM, genericSig); } llvm::Constant *getLayoutString() { return emitLayoutString(); } void addLayoutStringPointer() { assert(!isPureObjC()); if (auto *layoutString = getLayoutString()) { B.addSignedPointer(layoutString, IGM.getOptions().PointerAuth.TypeLayoutString, PointerAuthEntity::Special::TypeLayoutString); } else { B.addNullPointer(IGM.Int8PtrTy); } } void addDestructorFunction() { if (IGM.Context.LangOpts.hasFeature(Feature::Embedded)) { auto dtorRef = SILDeclRef(Target->getDestructor(), SILDeclRef::Kind::Deallocator); auto entry = VTable->getEntry(IGM.getSILModule(), dtorRef); if (llvm::Constant *ptr = IGM.getAddrOfSILFunction( entry->getImplementation(), NotForDefinition)) { B.addSignedPointer(ptr, IGM.getOptions().PointerAuth.HeapDestructors, PointerAuthEntity::Special::HeapDestructor); } else { B.addNullPointer(IGM.FunctionPtrTy); } return; } assert(!isPureObjC()); if (auto ptr = getAddrOfDestructorFunction(IGM, Target)) { B.addSignedPointer(*ptr, IGM.getOptions().PointerAuth.HeapDestructors, PointerAuthEntity::Special::HeapDestructor); } else { // In case the optimizer removed the function. See comment in // addReifiedVTableEntry(). B.addNullPointer(IGM.FunctionPtrTy); } } void addIVarDestroyer() { if (IGM.Context.LangOpts.hasFeature(Feature::Embedded)) { llvm::Constant *ptr = nullptr; for (const SILVTable::Entry &entry : VTable->getEntries()) { if (entry.getMethod().kind == SILDeclRef::Kind::IVarDestroyer) { ptr = IGM.getAddrOfSILFunction(entry.getImplementation(), NotForDefinition); break; } } if (ptr) { B.addSignedPointer(ptr, IGM.getOptions().PointerAuth.HeapDestructors, PointerAuthEntity::Special::HeapDestructor); } else { B.addNullPointer(IGM.FunctionPtrTy); } return; } assert(!isPureObjC()); auto dtorFunc = IGM.getAddrOfIVarInitDestroy(Target, /*isDestroyer=*/ true, /*isForeign=*/ false, NotForDefinition); if (dtorFunc) { B.addSignedPointer(*dtorFunc, IGM.getOptions().PointerAuth.HeapDestructors, PointerAuthEntity::Special::HeapDestructor); } else { B.addNullPointer(IGM.FunctionPtrTy); } } llvm::Constant *emitNominalTypeDescriptor() { return ClassContextDescriptorBuilder(IGM, Target, RequireMetadata).emit(); } llvm::Constant *getNominalTypeDescriptor() { return emitNominalTypeDescriptor(); } void addNominalTypeDescriptor() { assert(!isPureObjC()); B.addSignedPointer(asImpl().getNominalTypeDescriptor(), IGM.getOptions().PointerAuth.TypeDescriptors, PointerAuthEntity::Special::TypeDescriptor); } bool canBeConstant() { // TODO: the metadata global can actually be constant in a very // special case: it's not a pattern, ObjC interoperation isn't // required, there are no class fields, and there is nothing that // needs to be runtime-adjusted. return false; } void addInstanceAddressPoint() { assert(!isPureObjC()); // Right now, we never allocate fields before the address point. B.addInt32(0); } bool hasFixedLayout() { return FieldLayout.isFixedLayout(); } const ClassLayout &getFieldLayout() { return FieldLayout; } void addInstanceSize() { assert(!isPureObjC()); if (asImpl().hasFixedLayout()) { B.addInt32(asImpl().getFieldLayout().getSize().getValue()); } else { // Leave a zero placeholder to be filled at runtime B.addInt32(0); } } void addInstanceAlignMask() { assert(!isPureObjC()); if (asImpl().hasFixedLayout()) { B.addInt16(asImpl().getFieldLayout().getAlignMask().getValue()); } else { // Leave a zero placeholder to be filled at runtime B.addInt16(0); } } void addRuntimeReservedBits() { assert(!isPureObjC()); B.addInt16(0); } void addClassSize() { assert(!isPureObjC()); auto size = MetadataLayout.getSize(); B.addInt32(size.FullSize.getValue()); } void addClassAddressPoint() { assert(!isPureObjC()); // FIXME: Wrong auto size = MetadataLayout.getSize(); B.addInt32(size.AddressPoint.getValue()); } void addClassCacheData() { // We initially fill in these fields with addresses taken from // the ObjC runtime. // FIXME: Remove null data altogether rdar://problem/18801263 B.add(IGM.getObjCEmptyCachePtr()); B.add(IGM.getObjCEmptyVTablePtr()); } llvm::Constant *getROData() { return emitClassPrivateData(IGM, Target); } uint64_t getClassDataPointerHasSwiftMetadataBits() { // objcImpl classes should not have the Swift bit set. if (isPureObjC()) return 0; return IGM.UseDarwinPreStableABIBit ? 1 : 2; } void addClassDataPointer() { if (!IGM.ObjCInterop) { // with no Objective-C runtime, just give an empty pointer with the // swift bit set. // FIXME: Remove null data altogether rdar://problem/18801263 B.addInt(IGM.IntPtrTy, 1); return; } // Derive the RO-data. llvm::Constant *data = asImpl().getROData(); // Set a low bit to indicate this class has Swift metadata. auto bit = llvm::ConstantInt::get( IGM.IntPtrTy, asImpl().getClassDataPointerHasSwiftMetadataBits()); // Emit data + bit. data = llvm::ConstantExpr::getPtrToInt(data, IGM.IntPtrTy); data = llvm::ConstantExpr::getAdd(data, bit); B.add(data); } void addDefaultActorStorageFieldOffset() { B.addInt(IGM.SizeTy, getDefaultActorStorageFieldOffset(IGM).getValue()); } void addNonDefaultDistributedActorStorageFieldOffset() { B.addInt(IGM.SizeTy, getNonDefaultDistributedActorStorageFieldOffset(IGM).getValue()); } void addReifiedVTableEntry(SILDeclRef fn) { // Find the vtable entry. assert(VTable && "no vtable?!"); auto entry = VTable->getEntry(IGM.getSILModule(), fn); auto *afd = cast(fn.getDecl()); // The class is fragile. Emit a direct reference to the vtable entry. llvm::Constant *ptr; if (entry) { if (entry->getImplementation()->isAsync()) { ptr = IGM.getAddrOfAsyncFunctionPointer(entry->getImplementation()); } else if (entry->getImplementation() ->getLoweredFunctionType() ->isCalleeAllocatedCoroutine()) { ptr = IGM.getAddrOfCoroFunctionPointer(entry->getImplementation()); } else { ptr = IGM.getAddrOfSILFunction(entry->getImplementation(), NotForDefinition); } } else { auto *accessor = dyn_cast(afd); // The method is removed by dead method elimination. // It should be never called. We add a pointer to an error function. if (afd->hasAsync()) { ptr = llvm::ConstantExpr::getBitCast( IGM.getDeletedAsyncMethodErrorAsyncFunctionPointer(), IGM.FunctionPtrTy); } else if (accessor && requiresFeatureCoroutineAccessors( accessor->getAccessorKind())) { ptr = llvm::ConstantExpr::getBitCast( IGM.getDeletedCalleeAllocatedCoroutineMethodErrorCoroFunctionPointer(), IGM.FunctionPtrTy); } else { ptr = llvm::ConstantExpr::getBitCast(IGM.getDeletedMethodErrorFn(), IGM.FunctionPtrTy); } } if (IGM.getOptions().VirtualFunctionElimination) { auto offset = B.getNextOffsetFromGlobal(); VTableEntriesForVFE.push_back(std::pair(offset, fn)); } auto *accessor = dyn_cast(afd); PointerAuthSchema schema = afd->hasAsync() ? IGM.getOptions().PointerAuth.AsyncSwiftClassMethods : accessor && requiresFeatureCoroutineAccessors(accessor->getAccessorKind()) ? IGM.getOptions().PointerAuth.CoroSwiftClassMethods : IGM.getOptions().PointerAuth.SwiftClassMethods; B.addSignedPointer(ptr, schema, fn); } SmallVector, 8> getVTableEntriesForVFE() { return VTableEntriesForVFE; } void addPlaceholder(MissingMemberDecl *m) { assert(m->getNumberOfVTableEntries() == 0 && "cannot generate metadata with placeholders in it"); } void addMethodOverride(SILDeclRef baseRef, SILDeclRef declRef) {} void createMetadataAccessFunction() { assert(!Target->isGenericContext()); emitClassMetadataBaseOffset(IGM, Target); createNonGenericMetadataAccessFunction(IGM, Target); if (IGM.getClassMetadataStrategy(Target) == ClassMetadataStrategy::Fixed) return; if (isPureObjC()) return; emitMetadataCompletionFunction( IGM, Target, [&](IRGenFunction &IGF, llvm::Value *metadata, MetadataDependencyCollector *collector) { emitInitializeClassMetadata(IGF, Target, FieldLayout, metadata, collector); }); } }; static void addFixedFieldOffset(IRGenModule &IGM, ConstantStructBuilder &B, VarDecl *var, std::function typeFromContext) { SILType baseType = SILType::getPrimitiveObjectType( typeFromContext(var->getDeclContext())->getCanonicalType()); B.addInt(IGM.SizeTy, getClassFieldOffset(IGM, baseType, var).getValue()); } /// A builder for non-generic class metadata which does not require any /// runtime initialization, or that only requires runtime initialization /// on newer Objective-C runtimes. class FixedClassMetadataBuilder : public ClassMetadataBuilderBase { using super = ClassMetadataBuilderBase; using super::IGM; using super::B; public: FixedClassMetadataBuilder(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout) : super(IGM, theClass, builder, fieldLayout) {} FixedClassMetadataBuilder(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout, SILVTable *vtable) : super(IGM, theClass, builder, fieldLayout, vtable) {} void addFieldOffset(VarDecl *var) { assert(!isPureObjC()); addFixedFieldOffset(IGM, B, var, [](DeclContext *dc) { return dc->getDeclaredTypeInContext(); }); } void addFieldOffsetPlaceholders(MissingMemberDecl *placeholder) { assert(!isPureObjC()); llvm_unreachable("Fixed class metadata cannot have missing members"); } void addGenericRequirement(GenericRequirement requirement, ClassDecl *forClass) { llvm_unreachable("Fixed class metadata cannot have generic requirements"); } }; /// A builder for non-generic class metadata with resiliently-sized /// fields or generic ancestry. class SingletonClassMetadataBuilder : public ClassMetadataBuilderBase { using NominalDecl = StructDecl; using super = ClassMetadataBuilderBase; using super::IGM; using super::B; public: SingletonClassMetadataBuilder(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout) : super(IGM, theClass, builder, fieldLayout) {} void addFieldOffset(VarDecl *var) { assert(!isPureObjC()); // Field offsets are either copied from the superclass or calculated // at runtime. B.addInt(IGM.SizeTy, 0); } void addFieldOffsetPlaceholders(MissingMemberDecl *placeholder) { assert(!isPureObjC()); for (unsigned i = 0, e = placeholder->getNumberOfFieldOffsetVectorEntries(); i < e; ++i) { // Emit placeholder values for some number of stored properties we // know exist but aren't able to reference directly. B.addInt(IGM.SizeTy, 0); } } void addGenericRequirement(GenericRequirement requirement, ClassDecl *forClass) { switch (requirement.getKind()) { case GenericRequirement::Kind::Shape: case GenericRequirement::Kind::Value: B.addInt(cast(requirement.getType(IGM)), 0); break; case GenericRequirement::Kind::Metadata: case GenericRequirement::Kind::WitnessTable: case GenericRequirement::Kind::MetadataPack: case GenericRequirement::Kind::WitnessTablePack: B.addNullPointer(cast(requirement.getType(IGM))); break; } } }; /// A builder for metadata patterns for non-generic class with /// resilient ancestry. class ResilientClassMetadataBuilder { IRGenModule &IGM; ClassDecl *Target; ConstantStructBuilder &B; const ClassLayout &FieldLayout; public: ResilientClassMetadataBuilder(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &builder, const ClassLayout &fieldLayout) : IGM(IGM), Target(theClass), B(builder), FieldLayout(fieldLayout) {} llvm::Constant *emitNominalTypeDescriptor() { return ClassContextDescriptorBuilder(IGM, Target, RequireMetadata).emit(); } void layout() { assert(!FieldLayout.hasObjCImplementation() && "Resilient class metadata not supported for @objcImpl"); emitNominalTypeDescriptor(); addRelocationFunction(); addDestructorFunction(); addIVarDestroyer(); addClassFlags(); addClassDataPointer(); addMetaclass(); } void addRelocationFunction() { // We don't use this yet, but it's available as a future customization // point. B.addRelativeAddressOrNull(nullptr); } void addLayoutStringPointer() { // TODO: really add the pointer B.addNullPointer(IGM.Int8PtrTy); } void addDestructorFunction() { auto function = getAddrOfDestructorFunction(IGM, Target); B.addCompactFunctionReferenceOrNull(function ? *function : nullptr); } void addIVarDestroyer() { auto function = IGM.getAddrOfIVarInitDestroy(Target, /*isDestroyer=*/ true, /*isForeign=*/ false, NotForDefinition); B.addCompactFunctionReferenceOrNull(function ? *function : nullptr); } void addClassFlags() { B.addInt32((uint32_t) getClassFlags(Target)); } void addClassDataPointer() { auto data = (IGM.ObjCInterop ? emitClassPrivateData(IGM, Target) : nullptr); B.addRelativeAddressOrNull(data); } void addMetaclass() { auto metaclass = (IGM.ObjCInterop ? IGM.getAddrOfMetaclassObject(Target, NotForDefinition) : nullptr); B.addRelativeAddressOrNull(metaclass); } void createMetadataAccessFunction() { assert(IGM.getClassMetadataStrategy(Target) == ClassMetadataStrategy::Resilient); assert(!Target->isGenericContext()); emitClassMetadataBaseOffset(IGM, Target); createNonGenericMetadataAccessFunction(IGM, Target); emitMetadataCompletionFunction( IGM, Target, [&](IRGenFunction &IGF, llvm::Value *metadata, MetadataDependencyCollector *collector) { emitInitializeClassMetadata(IGF, Target, FieldLayout, metadata, collector); }); } }; /// A builder for GenericClassMetadataPattern objects. class GenericClassMetadataBuilder : public GenericMetadataBuilderBase { using super = GenericMetadataBuilderBase; const ClassLayout &FieldLayout; std::optional ClassRODataOffset, MetaclassObjectOffset, MetaclassRODataOffset; public: GenericClassMetadataBuilder(IRGenModule &IGM, ClassDecl *theClass, ConstantStructBuilder &B, const ClassLayout &fieldLayout) : super(IGM, theClass, B), FieldLayout(fieldLayout) { // We need special initialization of metadata objects to trick the ObjC // runtime into initializing them. HasDependentMetadata = true; } void layoutHeader() { // @_objcImplementation on true (non-ObjC) generic classes doesn't make // much sense, and we haven't updated this builder to handle it. assert(!FieldLayout.hasObjCImplementation() && "Generic metadata not supported for @objcImpl"); super::layoutHeader(); // RelativePointer Destroy; addDestructorFunction(); // RelativePointer IVarDestroyer; addIVarDestroyer(); // ClassFlags Flags; B.addInt32((uint32_t) getClassFlags(Target)); // uint16_t ClassRODataOffset; if (IGM.ObjCInterop) ClassRODataOffset = B.addPlaceholderWithSize(IGM.Int16Ty); else B.addInt16(0); // uint16_t MetaclassObjectOffset; if (IGM.ObjCInterop) MetaclassObjectOffset = B.addPlaceholderWithSize(IGM.Int16Ty); else B.addInt16(0); // uint16_t MetadataRODataOffset; if (IGM.ObjCInterop) MetaclassRODataOffset = B.addPlaceholderWithSize(IGM.Int16Ty); else B.addInt16(0); // uint16_t Reserved; B.addInt16(0); } llvm::Constant *emitNominalTypeDescriptor() { return ClassContextDescriptorBuilder(IGM, Target, RequireMetadata).emit(); } GenericMetadataPatternFlags getPatternFlags() { auto flags = super::getPatternFlags(); flags.class_setHasImmediateMembersPattern(hasImmediateMembersPattern()); return flags; } void emitInstantiationDefinitions() { // Emit the base-offset variable. emitClassMetadataBaseOffset(IGM, Target); super::emitInstantiationDefinitions(); } void addLayoutStringPointer() { // TODO: really add the pointer B.addNullPointer(IGM.Int8PtrTy); } void addDestructorFunction() { auto function = getAddrOfDestructorFunction(IGM, Target); B.addCompactFunctionReferenceOrNull(function ? *function : nullptr); } void addIVarDestroyer() { auto function = IGM.getAddrOfIVarInitDestroy(Target, /*isDestroyer=*/ true, /*isForeign=*/ false, NotForDefinition); B.addCompactFunctionReferenceOrNull(function ? *function : nullptr); } bool hasExtraDataPattern() { return IGM.ObjCInterop; } PartialPattern buildExtraDataPattern() { ConstantInitBuilder subBuilder(IGM); auto subB = subBuilder.beginStruct(); subB.setPacked(true); // The offset of the pattern bytes in the overall extra-data section. // Any bytes before this will be zeroed. Currently we don't take // advantage of this. Size patternOffset = Size(0); Size classROData = Size(0); Size metaclassROData = Size(0); if (IGM.ObjCInterop) { // Add the metaclass object. B.fillPlaceholderWithInt(*MetaclassObjectOffset, IGM.Int16Ty, IGM.getOffsetInWords(patternOffset + subB.getNextOffsetFromGlobal())); addMetaclassObject(subB); // Add the RO-data objects. auto roDataPoints = emitClassPrivateDataFields(IGM, subB, Target); B.fillPlaceholderWithInt(*ClassRODataOffset, IGM.Int16Ty, IGM.getOffsetInWords(patternOffset + roDataPoints.first)); B.fillPlaceholderWithInt(*MetaclassRODataOffset, IGM.Int16Ty, IGM.getOffsetInWords(patternOffset + roDataPoints.second)); classROData = patternOffset + roDataPoints.first; metaclassROData = patternOffset + roDataPoints.second; } auto patternSize = subB.getNextOffsetFromGlobal(); auto global = subB.finishAndCreateGlobal("", IGM.getPointerAlignment(), /*constant*/ true); if (IGM.ObjCInterop) { auto getRODataAtOffset = [&] (Size offset) -> llvm::Constant * { auto t0 = llvm::ConstantExpr::getBitCast(global, IGM.Int8PtrTy); llvm::Constant *indices[] = {llvm::ConstantInt::get(IGM.Int32Ty, offset.getValue())}; return llvm::ConstantExpr::getBitCast( llvm::ConstantExpr::getInBoundsGetElementPtr(IGM.Int8Ty, t0, indices), IGM.Int8PtrTy); }; IGM.addGenericROData(getRODataAtOffset(classROData)); IGM.addGenericROData(getRODataAtOffset(metaclassROData)); } return { global, patternOffset, patternSize }; } void addMetaclassObject(ConstantStructBuilder &B) { // isa ClassDecl *rootClass = getRootClassForMetaclass(IGM, Target); auto isa = IGM.getAddrOfMetaclassObject(rootClass, NotForDefinition); B.add(isa); // super, which is dependent if the superclass is generic B.addNullPointer(IGM.ObjCClassPtrTy); // cache B.add(IGM.getObjCEmptyCachePtr()); // vtable B.add(IGM.getObjCEmptyVTablePtr()); // rodata, which is always dependent B.addInt(IGM.IntPtrTy, 0); } bool hasImmediateMembersPattern() { // TODO: use the real field offsets if they're known statically. return false; } llvm::Value *emitAllocateMetadata(IRGenFunction &IGF, llvm::Value *descriptor, llvm::Value *arguments, llvm::Value *templatePointer) { // Sign the descriptor. auto schema = IGF.IGM.getOptions().PointerAuth.TypeDescriptorsAsArguments; if (schema) { auto authInfo = PointerAuthInfo::emit( IGF, schema, nullptr, PointerAuthEntity::Special::TypeDescriptorAsArgument); descriptor = emitPointerAuthSign(IGF, descriptor, authInfo); } auto metadata = IGF.Builder.CreateCall( getLayoutString() ? IGM.getAllocateGenericClassMetadataWithLayoutStringFunctionPointer() : IGM.getAllocateGenericClassMetadataFunctionPointer(), {descriptor, arguments, templatePointer}); return metadata; } bool hasCompletionFunction() { // TODO: recognize cases where this is not required. // For example, under ObjCInterop mode we can move class realization // into the allocation phase if the superclass is trivial and there's // no layout to do. return true; } void emitInitializeMetadata(IRGenFunction &IGF, llvm::Value *metadata, bool isVWTMutable, MetadataDependencyCollector *collector) { assert(!HasDependentVWT && "class should never have dependent VWT"); emitInitializeClassMetadata(IGF, Target, FieldLayout, metadata, collector); } }; template