diff --git a/include/swift/AST/IRGenOptions.h b/include/swift/AST/IRGenOptions.h index 5e6ef2ce7de..f9a9c47ee79 100644 --- a/include/swift/AST/IRGenOptions.h +++ b/include/swift/AST/IRGenOptions.h @@ -171,6 +171,9 @@ public: /// Bypass resilience when accessing resilient frameworks. unsigned EnableResilienceBypass : 1; + /// The path to load legacy type layouts from. + StringRef ReadTypeInfoPath; + /// Should we try to build incrementally by not emitting an object file if it /// has the same IR hash as the module that we are preparing to emit? /// diff --git a/include/swift/Option/FrontendOptions.td b/include/swift/Option/FrontendOptions.td index 5b428d2f1e0..aeac53867d3 100644 --- a/include/swift/Option/FrontendOptions.td +++ b/include/swift/Option/FrontendOptions.td @@ -497,6 +497,9 @@ def enable_class_resilience : Flag<["-"], "enable-class-resilience">, def enable_resilience_bypass : Flag<["-"], "enable-resilience-bypass">, HelpText<"Completely bypass resilience when accessing types in resilient frameworks">; +def read_type_info_path_EQ : Joined<["-"], "read-type-info-path=">, + HelpText<"Read legacy type layout from the given path">; + def group_info_path : Separate<["-"], "group-info-path">, HelpText<"The path to collect the group information of the compiled module">; diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index 0066982447e..d6a7d152cf7 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -1023,6 +1023,10 @@ static bool ParseIRGenArgs(IRGenOptions &Opts, ArgList &Args, Opts.EnableResilienceBypass = true; } + if (const Arg *A = Args.getLastArg(OPT_read_type_info_path_EQ)) { + Opts.ReadTypeInfoPath = A->getValue(); + } + for (const auto &Lib : Args.getAllArgValues(options::OPT_autolink_library)) Opts.LinkLibraries.push_back(LinkLibrary(Lib, LibraryKind::Library)); diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index 123c4f81d4e..1cb31ad9e74 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -373,7 +373,12 @@ namespace { // Lower the field type. auto *eltType = &IGM.getTypeInfo(type); if (CompletelyFragileLayout && !eltType->isFixedSize()) { - CompletelyFragileScope scope(IGM); + // For staging purposes, only do the new thing if the path flag + // is provided. + auto mode = (IGM.IRGen.Opts.ReadTypeInfoPath.empty() + ? TypeConverter::Mode::CompletelyFragile + : TypeConverter::Mode::Legacy); + LoweringModeScope scope(IGM, mode); eltType = &IGM.getTypeInfo(type); } diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index 78f8d72f668..20926d6a87c 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -1788,7 +1788,7 @@ Address IRGenModule::getAddrOfSILGlobalVariable(SILGlobalVariable *var, // FIXME: Once lldb can make use of remote mirrors to calculate layouts // at runtime, this should be removed. { - CompletelyFragileScope Scope(*this); + LoweringModeScope Scope(*this, TypeConverter::Mode::CompletelyFragile); SILType loweredTy = var->getLoweredType(); auto &nonResilientTI = cast(getTypeInfo(loweredTy)); @@ -3602,7 +3602,7 @@ llvm::Constant *IRGenModule::getAddrOfGlobalUTF16String(StringRef utf8) { /// of stored properties bool IRGenModule::isResilient(NominalTypeDecl *D, ResilienceExpansion expansion) { if (expansion == ResilienceExpansion::Maximal && - Types.isCompletelyFragile()) { + Types.getLoweringMode() == TypeConverter::Mode::CompletelyFragile) { return false; } return D->isResilient(getSwiftModule(), expansion); @@ -3621,7 +3621,7 @@ IRGenModule::getResilienceExpansionForAccess(NominalTypeDecl *decl) { // layout. Calling isResilient() with this scope will always return false. ResilienceExpansion IRGenModule::getResilienceExpansionForLayout(NominalTypeDecl *decl) { - if (Types.isCompletelyFragile()) + if (Types.getLoweringMode() == TypeConverter::Mode::CompletelyFragile) return ResilienceExpansion::Minimal; if (isResilient(decl, ResilienceExpansion::Minimal)) diff --git a/lib/IRGen/GenEnum.cpp b/lib/IRGen/GenEnum.cpp index 846d0d52e2b..a0da8366034 100644 --- a/lib/IRGen/GenEnum.cpp +++ b/lib/IRGen/GenEnum.cpp @@ -6114,6 +6114,13 @@ const TypeInfo *TypeConverter::convertEnumType(TypeBase *key, CanType type, // Assert that the layout query functions for fixed-layout enums work, for // LLDB's sake. #ifndef NDEBUG + + // ... but not if we're building a legacy layout, in which case we only know + // the extra inhabitant *count* and not the actual extra inhabitant values, so + // we simply crash if we go do this. + if (LoweringMode == Mode::Legacy) + return ti; + auto displayBitMask = [&](const SpareBitVector &v) { for (unsigned i = v.size(); i-- > 0;) { llvm::dbgs() << (v[i] ? '1' : '0'); diff --git a/lib/IRGen/GenType.cpp b/lib/IRGen/GenType.cpp index 0ebeda8bba6..fa7e89b1eae 100644 --- a/lib/IRGen/GenType.cpp +++ b/lib/IRGen/GenType.cpp @@ -29,6 +29,7 @@ #include "clang/CodeGen/SwiftCallingConv.h" #include "EnumPayload.h" +#include "LegacyLayoutFormat.h" #include "LoadableTypeInfo.h" #include "GenMeta.h" #include "GenProto.h" @@ -56,16 +57,10 @@ Alignment IRGenModule::getCappedAlignment(Alignment align) { } llvm::DenseMap & -TypeConverter::Types_t::getCacheFor(bool isDependent, bool completelyFragile) { - if (completelyFragile) { - return (isDependent - ? FragileDependentCache - : FragileIndependentCache); - } - +TypeConverter::Types_t::getCacheFor(bool isDependent, TypeConverter::Mode mode) { return (isDependent - ? DependentCache - : IndependentCache); + ? DependentCache[unsigned(mode)] + : IndependentCache[unsigned(mode)]); } void TypeInfo::assign(IRGenFunction &IGF, Address dest, Address src, @@ -1071,6 +1066,54 @@ TypeConverter::createImmovable(llvm::Type *type, Size size, Alignment align) { static TypeInfo *invalidTypeInfo() { return (TypeInfo*) 1; } +bool TypeConverter::readLegacyTypeInfo(StringRef path) { + auto fileOrErr = llvm::MemoryBuffer::getFile(path); + if (!fileOrErr) + return true; + + auto file = std::move(fileOrErr.get()); + + llvm::yaml::Input yin(file->getBuffer()); + + // Read the document list. + std::vector modules; + yin >> modules; + + if (yin.error()) + return true; + + for (auto &module : modules) { + for (auto &decl : module.Decls) { + auto result = LegacyTypeInfos.insert(std::make_pair( + decl.Name, + decl)); + assert(result.second); + (void) result; + } + } + + return false; +} + +static std::string mangleTypeAsContext(const NominalTypeDecl *decl) { + Mangle::ASTMangler Mangler; + return Mangler.mangleTypeAsContextUSR(decl); +} + +Optional +TypeConverter::getLegacyTypeInfo(NominalTypeDecl *decl) const { + auto &mangledName = const_cast(this)->DeclMangledNames[decl]; + if (mangledName.empty()) + mangledName = mangleTypeAsContext(decl); + assert(!mangledName.empty()); + + auto found = LegacyTypeInfos.find(mangledName); + if (found == LegacyTypeInfos.end()) + return None; + + return found->second; +} + TypeConverter::TypeConverter(IRGenModule &IGM) : IGM(IGM), FirstType(invalidTypeInfo()) { @@ -1080,7 +1123,15 @@ TypeConverter::TypeConverter(IRGenModule &IGM) // sync with the binary module. Once LLDB can calculate type layouts at // runtime (using remote mirrors or some other mechanism), we can remove this. if (IGM.IRGen.Opts.EnableResilienceBypass) - CompletelyFragile = true; + LoweringMode = Mode::CompletelyFragile; + + StringRef path = IGM.IRGen.Opts.ReadTypeInfoPath; + if (!path.empty()) { + bool error = readLegacyTypeInfo(path); + if (error) { + llvm::report_fatal_error("Cannot read '" + path + "'"); + } + } } TypeConverter::~TypeConverter() { @@ -1102,8 +1153,9 @@ void TypeConverter::pushGenericContext(CanGenericSignature signature) { // Clear the dependent type info cache since we have a new active signature // now. - Types.getCacheFor(/*isDependent*/ true, /*isFragile*/ false).clear(); - Types.getCacheFor(/*isDependent*/ true, /*isFragile*/ true).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::Normal).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::Legacy).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::CompletelyFragile).clear(); } void TypeConverter::popGenericContext(CanGenericSignature signature) { @@ -1113,8 +1165,9 @@ void TypeConverter::popGenericContext(CanGenericSignature signature) { // Pop the SIL TypeConverter's generic context too. IGM.getSILTypes().popGenericContext(signature); - Types.getCacheFor(/*isDependent*/ true, /*isFragile*/ false).clear(); - Types.getCacheFor(/*isDependent*/ true, /*isFragile*/ true).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::Normal).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::Legacy).clear(); + Types.getCacheFor(/*isDependent*/ true, Mode::CompletelyFragile).clear(); } GenericEnvironment *TypeConverter::getGenericEnvironment() { @@ -1131,7 +1184,7 @@ GenericEnvironment *IRGenModule::getGenericEnvironment() { void TypeConverter::addForwardDecl(TypeBase *key) { assert(key->isCanonical()); assert(!key->hasTypeParameter()); - auto &Cache = Types.getCacheFor(/*isDependent*/ false, CompletelyFragile); + auto &Cache = Types.getCacheFor(/*isDependent*/ false, LoweringMode); auto result = Cache.insert(std::make_pair(key, nullptr)); assert(result.second && "entry already exists for type!"); (void) result; @@ -1409,20 +1462,10 @@ CanType TypeConverter::getExemplarType(CanType contextTy) { } } -void TypeConverter::pushCompletelyFragile() { - assert(!CompletelyFragile); - CompletelyFragile = true; -} - -void TypeConverter::popCompletelyFragile() { - assert(CompletelyFragile); - CompletelyFragile = false; -} - const TypeInfo *TypeConverter::getTypeEntry(CanType canonicalTy) { // Cache this entry in the dependent or independent cache appropriate to it. auto &Cache = Types.getCacheFor(canonicalTy->hasTypeParameter(), - CompletelyFragile); + LoweringMode); { auto it = Cache.find(canonicalTy.getPointer()); @@ -1447,7 +1490,7 @@ const TypeInfo *TypeConverter::getTypeEntry(CanType canonicalTy) { // See whether we lowered a type equivalent to this one. if (exemplarTy != canonicalTy) { - auto &Cache = Types.getCacheFor(/*isDependent*/ false, CompletelyFragile); + auto &Cache = Types.getCacheFor(/*isDependent*/ false, LoweringMode); auto it = Cache.find(exemplarTy.getPointer()); if (it != Cache.end()) { // Record the object under the original type. @@ -1469,7 +1512,7 @@ const TypeInfo *TypeConverter::getTypeEntry(CanType canonicalTy) { insertEntry(Cache[canonicalTy.getPointer()]); if (canonicalTy != exemplarTy) { auto &IndependentCache = Types.getCacheFor(/*isDependent*/ false, - CompletelyFragile); + LoweringMode); insertEntry(IndependentCache[exemplarTy.getPointer()]); } @@ -1809,11 +1852,94 @@ static bool isIRTypeDependent(IRGenModule &IGM, NominalTypeDecl *decl) { } } +namespace { + +class LegacyTypeInfo : public FixedTypeInfo { + unsigned NumExtraInhabitants; + +public: + LegacyTypeInfo(llvm::Type *type, const SpareBitVector &spareBits, + const YAMLTypeInfoNode &node) + : FixedTypeInfo(type, + Size(node.Size), + spareBits, + Alignment(node.Alignment), + IsNotPOD, /* irrelevant */ + IsNotBitwiseTakable, /* irrelevant */ + IsFixedSize /* irrelevant */), + NumExtraInhabitants(node.NumExtraInhabitants) {} + + virtual unsigned getFixedExtraInhabitantCount(IRGenModule &IGM) const override { + return NumExtraInhabitants; + } + + virtual APInt getFixedExtraInhabitantMask(IRGenModule &IGM) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual APInt getFixedExtraInhabitantValue(IRGenModule &IGM, + unsigned bits, + unsigned index) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void getSchema(ExplosionSchema &schema) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void assignWithCopy(IRGenFunction &IGF, Address dest, Address src, + SILType T, bool isOutlined) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void assignWithTake(IRGenFunction &IGF, Address dest, Address src, + SILType T, bool isOutlined) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void initializeWithCopy(IRGenFunction &IGF, Address destAddr, + Address srcAddr, SILType T, + bool isOutlined) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void initializeFromParams(IRGenFunction &IGF, Explosion ¶ms, + Address src, SILType T, + bool isOutlined) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } + + virtual void destroy(IRGenFunction &IGF, Address address, SILType T, + bool isOutlined) const override { + llvm_unreachable("TypeConverter::Mode::Legacy is not for real values"); + } +}; + +} // namespace + const TypeInfo *TypeConverter::convertAnyNominalType(CanType type, NominalTypeDecl *decl) { // By "any", we don't mean existentials. assert(!isa(decl)); + // If we're producing a legacy type layout, and we have a serialized + // record for this type, produce it now. + if (LoweringMode == Mode::Legacy) { + auto node = getLegacyTypeInfo(decl); + + if (node) { + Size size(node->Size); + + auto ty = IGM.createNominalType(type); + ty->setBody(llvm::ArrayType::get(IGM.Int8Ty, size.getValue())); + + SpareBitVector spareBits; + spareBits.appendClearBits(size.getValueInBits()); + + return new LegacyTypeInfo(ty, spareBits, *node); + } + } + // We need to give generic specializations distinct TypeInfo objects // if their IR-gen might be different, e.g. if they use different IR // types or if type-specific operations like projections might need @@ -1849,7 +1975,7 @@ const TypeInfo *TypeConverter::convertAnyNominalType(CanType type, assert(decl->getDeclaredType()->isCanonical()); assert(decl->getDeclaredType()->hasUnboundGenericType()); TypeBase *key = decl->getDeclaredType().getPointer(); - auto &Cache = Types.getCacheFor(/*isDependent*/ false, CompletelyFragile); + auto &Cache = Types.getCacheFor(/*isDependent*/ false, LoweringMode); auto entry = Cache.find(key); if (entry != Cache.end()) return entry->second; diff --git a/lib/IRGen/GenType.h b/lib/IRGen/GenType.h index f708d98429f..290ac37615b 100644 --- a/lib/IRGen/GenType.h +++ b/lib/IRGen/GenType.h @@ -22,8 +22,10 @@ #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" +#include "llvm/ADT/StringMap.h" #include "IRGenModule.h" #include "IRGenFunction.h" +#include "LegacyLayoutFormat.h" namespace swift { class GenericSignatureBuilder; @@ -60,9 +62,31 @@ namespace irgen { /// The helper class for generating types. class TypeConverter { public: + enum class Mode : unsigned { + /// Normal type lowering mode where resilient types are opaque. + Normal, + + /// Used for computing backward deployment class layouts, where we emit a + /// static class metadata layout using known sizes and alignments of any + /// resiliently-typed fields from a previous Swift version. On newer Swift + /// versions we use a runtime mechanism to re-initialize the class metadata + /// in-place with the current known layout. + Legacy, + + /// A temporary hack for lldb where all resilient types are transparent and + /// treated like fixed-size (but still lowered in a way that matches the + /// runtime layout produced for resilient types, which is important for some + /// types like enums where enabling resilience changes the layout). + CompletelyFragile + + /// When adding or removing fields, remember to update NumLoweringModes below. + }; + + static unsigned const NumLoweringModes = 3; + IRGenModule &IGM; private: - bool CompletelyFragile = false; + Mode LoweringMode = Mode::Normal; llvm::DenseMap> Protocols; const TypeInfo *FirstType; @@ -89,6 +113,9 @@ private: const LoadableTypeInfo *SwiftRetainablePointerBoxTI = nullptr, *UnknownObjectRetainablePointerBoxTI = nullptr; + llvm::StringMap LegacyTypeInfos; + llvm::DenseMap DeclMangledNames; + const LoadableTypeInfo *createPrimitive(llvm::Type *T, Size size, Alignment align); const LoadableTypeInfo *createPrimitiveForAlignedPointer(llvm::PointerType *T, @@ -127,8 +154,8 @@ public: TypeConverter(IRGenModule &IGM); ~TypeConverter(); - bool isCompletelyFragile() const { - return CompletelyFragile; + Mode getLoweringMode() const { + return LoweringMode; } const TypeInfo *getTypeEntry(CanType type); @@ -160,18 +187,24 @@ public: /// Exit a generic context. void popGenericContext(CanGenericSignature signature); - /// Enter a scope where all types are lowered bypassing resilience. - void pushCompletelyFragile(); - - /// Exit a completely fragile scope. - void popCompletelyFragile(); - /// Retrieve the generic environment for the current generic context. /// /// Fails if there is no generic context. GenericEnvironment *getGenericEnvironment(); private: + friend class LoweringModeScope; + + void setLoweringMode(Mode mode) { + LoweringMode = mode; + } + + /// Read a YAML legacy type layout dump. Returns false on success, true on + /// error. + bool readLegacyTypeInfo(StringRef path); + + Optional getLegacyTypeInfo(NominalTypeDecl *decl) const; + // Debugging aids. #ifndef NDEBUG bool isExemplarArchetype(ArchetypeType *arch) const; @@ -181,14 +214,12 @@ private: CanType getExemplarType(CanType t); class Types_t { - llvm::DenseMap IndependentCache; - llvm::DenseMap DependentCache; - llvm::DenseMap FragileIndependentCache; - llvm::DenseMap FragileDependentCache; + llvm::DenseMap IndependentCache[NumLoweringModes]; + llvm::DenseMap DependentCache[NumLoweringModes]; public: llvm::DenseMap &getCacheFor(bool isDependent, - bool completelyFragile); + Mode mode); }; Types_t Types; }; @@ -215,22 +246,21 @@ public: }; /// An RAII interface for forcing types to be lowered bypassing resilience. -class CompletelyFragileScope { - bool State; +class LoweringModeScope { + TypeConverter::Mode OldLoweringMode; TypeConverter &TC; public: - explicit CompletelyFragileScope(TypeConverter &TC) : TC(TC) { - State = TC.isCompletelyFragile(); - if (!State) - TC.pushCompletelyFragile(); + LoweringModeScope(TypeConverter &TC, TypeConverter::Mode LoweringMode) + : TC(TC) { + OldLoweringMode = TC.getLoweringMode(); + TC.setLoweringMode(LoweringMode); } - CompletelyFragileScope(IRGenModule &IGM) - : CompletelyFragileScope(IGM.Types) {} + LoweringModeScope(IRGenModule &IGM, TypeConverter::Mode LoweringMode) + : LoweringModeScope(IGM.Types, LoweringMode) {} - ~CompletelyFragileScope() { - if (!State) - TC.popCompletelyFragile(); + ~LoweringModeScope() { + TC.setLoweringMode(OldLoweringMode); } }; diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 00656b84a20..9566dc9ff66 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -792,7 +792,7 @@ private: std::unique_ptr> Conformances; friend class GenericContextScope; - friend class CompletelyFragileScope; + friend class LoweringModeScope; //--- Globals --------------------------------------------------------------- public: diff --git a/lib/IRGen/LegacyLayoutFormat.h b/lib/IRGen/LegacyLayoutFormat.h index 0867cc2448d..427f6466d7b 100644 --- a/lib/IRGen/LegacyLayoutFormat.h +++ b/lib/IRGen/LegacyLayoutFormat.h @@ -100,5 +100,6 @@ template <> struct MappingTraits { } // namespace llvm LLVM_YAML_IS_SEQUENCE_VECTOR(swift::irgen::YAMLTypeInfoNode); +LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(swift::irgen::YAMLModuleNode); #endif // SWIFT_IRGEN_LEGACY_LAYOUT_FORMAT_H diff --git a/lib/IRGen/TypeLayoutDumper.cpp b/lib/IRGen/TypeLayoutDumper.cpp index eac9137aadb..49b8e52f4aa 100644 --- a/lib/IRGen/TypeLayoutDumper.cpp +++ b/lib/IRGen/TypeLayoutDumper.cpp @@ -163,7 +163,7 @@ bool swift::performDumpTypeInfo(IRGenOptions &Opts, IRGenModule IGM(IRGen, IRGen.createTargetMachine(), LLVMContext); // We want to bypass resilience. - CompletelyFragileScope scope(IGM); + LoweringModeScope scope(IGM, TypeConverter::Mode::CompletelyFragile); auto *Mod = SILMod.getSwiftModule(); SmallVector AllDecls; diff --git a/test/IRGen/Inputs/legacy_type_info/a.yaml b/test/IRGen/Inputs/legacy_type_info/a.yaml new file mode 100644 index 00000000000..bb131332a61 --- /dev/null +++ b/test/IRGen/Inputs/legacy_type_info/a.yaml @@ -0,0 +1,10 @@ +Name: resilient_struct +Decls: + - Name: 16resilient_struct4SizeV + Size: 16 + Alignment: 8 + ExtraInhabitants: 0 + - Name: 16resilient_struct12ResilientRefV + Size: 8 + Alignment: 8 + ExtraInhabitants: 4096 diff --git a/test/IRGen/completely_fragile_class_layout.sil b/test/IRGen/completely_fragile_class_layout.sil index d2aaeb42b53..f9a9b458495 100644 --- a/test/IRGen/completely_fragile_class_layout.sil +++ b/test/IRGen/completely_fragile_class_layout.sil @@ -1,6 +1,10 @@ // RUN: %empty-directory(%t) // RUN: %target-swift-frontend -emit-module -enable-resilience -emit-module-path=%t/resilient_struct.swiftmodule -module-name=resilient_struct %S/../Inputs/resilient_struct.swift + // RUN: %target-swift-frontend -I %t -emit-ir -enable-resilience %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize -DINT=i%target-ptrsize + +// RUN: %target-swift-frontend -I %t -emit-ir -enable-resilience %s -read-type-info-path=%S/Inputs/legacy_type_info/a.yaml | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize -DINT=i%target-ptrsize + // RUN: %target-swift-frontend -I %t -emit-ir -enable-resilience -O %s // We only use fragile class layouts when Objective-C interop is enabled. @@ -80,6 +84,20 @@ sil_vtable ClassWithResilientEnum {} // CHECK-LABEL: @"$s31completely_fragile_class_layout22ClassWithResilientEnumC6seconds4Int8VvpWvd" = hidden constant i64 25, align 8 +// Make sure extra inhabitants work when reading a legacy layout -- the +// Optional should be 8 bytes in size, not 9 +public class ClassWithResilientRef { + var first: ResilientRef? + var second: Int +} + +sil_vtable ClassWithResilientRef {} + +// Field offsets are statically known: +// CHECK-LABEL: @"$s31completely_fragile_class_layout21ClassWithResilientRefC5first16resilient_struct0gH0VSgvpWvd" = hidden constant i64 16, align 8 +// CHECK-LABEL: @"$s31completely_fragile_class_layout21ClassWithResilientRefC6secondSivpWvd" = hidden constant i64 24, align 8 + + // When allocating a class with resiliently-sized fields, we must still load // the size and alignment from metadata, because its metadata may have been // re-initialized.