IRGen: Forward spare bits through aggregates.

When doing struct layout for fixed-layout structs or tuples, combine the spare bit masks of their elements to form the spare bit mask of the aggregate, treating padding between elements as spare bits as well.

For now, disable using these spare bits to form extra inhabitants for structs and tuples; we would need additional runtime work to expose these extra inhabitants for correct generic runtime behavior. This puts us in a weird situation where 'enum { case A(Struct), B, C }' spills a bit but 'enum { case A(Struct), B(Struct), C }' doesn't, but the work to make the former happen isn't immediately critical for String optimization.

Swift SVN r12165
This commit is contained in:
Joe Groff
2014-01-10 23:15:34 +00:00
parent ee85fb341a
commit 9b38c4b5db
7 changed files with 198 additions and 18 deletions

View File

@@ -3738,7 +3738,7 @@ llvm::Value *irgen::emitGatherSpareBits(IRGenFunction &IGF,
unsigned usedBits = resultLowBit; unsigned usedBits = resultLowBit;
llvm::Value *result = nullptr; llvm::Value *result = nullptr;
for (int i = spareBitMask.find_first(); i != -1; for (int i = spareBitMask.find_first(); i != -1 && usedBits < resultBitWidth;
i = spareBitMask.find_next(i)) { i = spareBitMask.find_next(i)) {
assert(i >= 0); assert(i >= 0);
unsigned u = i; unsigned u = i;
@@ -3758,7 +3758,11 @@ llvm::Value *irgen::emitGatherSpareBits(IRGenFunction &IGF,
// See how many consecutive bits we have. // See how many consecutive bits we have.
unsigned numBits = 1; unsigned numBits = 1;
++u; ++u;
for (unsigned e = spareBitMask.size(); u < e && spareBitMask[u]; ++u) // We don't need more bits than the size of the result.
unsigned maxBits = resultBitWidth - usedBits;
for (unsigned e = spareBitMask.size();
u < e && numBits < maxBits && spareBitMask[u];
++u)
++numBits; ++numBits;
// Mask out the selected bits. // Mask out the selected bits.

View File

@@ -132,8 +132,10 @@ namespace {
public: public:
// FIXME: Spare bits between struct members. // FIXME: Spare bits between struct members.
LoadableStructTypeInfo(unsigned numFields, llvm::Type *T, Size size, LoadableStructTypeInfo(unsigned numFields, llvm::Type *T, Size size,
llvm::BitVector spareBits,
Alignment align, IsPOD_t isPOD) Alignment align, IsPOD_t isPOD)
: StructTypeInfoBase(numFields, T, size, llvm::BitVector{}, align, isPOD) : StructTypeInfoBase(numFields, T, size, std::move(spareBits),
align, isPOD)
{} {}
bool isIndirectArgument(ExplosionKind kind) const override { return false; } bool isIndirectArgument(ExplosionKind kind) const override { return false; }
@@ -147,6 +149,14 @@ namespace {
Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const {
return Nothing; return Nothing;
} }
// FIXME: Suppress use of extra inhabitants for single-payload enum layout
// until we're ready to handle the runtime logic for exporting extra
// inhabitants through generic structs.
bool mayHaveExtraInhabitants(IRGenModule&) const override { return false; }
unsigned getFixedExtraInhabitantCount(IRGenModule&) const override {
return 0;
}
}; };
/// A type implementation for non-loadable but fixed-size struct types. /// A type implementation for non-loadable but fixed-size struct types.
@@ -157,13 +167,23 @@ namespace {
public: public:
// FIXME: Spare bits between struct members. // FIXME: Spare bits between struct members.
FixedStructTypeInfo(unsigned numFields, llvm::Type *T, Size size, FixedStructTypeInfo(unsigned numFields, llvm::Type *T, Size size,
llvm::BitVector spareBits,
Alignment align, IsPOD_t isPOD) Alignment align, IsPOD_t isPOD)
: StructTypeInfoBase(numFields, T, size, llvm::BitVector{}, align, isPOD) : StructTypeInfoBase(numFields, T, size, std::move(spareBits), align,
isPOD)
{} {}
Nothing_t getNonFixedOffsets(IRGenFunction &IGF, CanType T) const { Nothing_t getNonFixedOffsets(IRGenFunction &IGF, CanType T) const {
return Nothing; return Nothing;
} }
Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; } Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; }
// FIXME: Suppress use of extra inhabitants for single-payload enum layout
// until we're ready to handle the runtime logic for exporting extra
// inhabitants through generic structs.
bool mayHaveExtraInhabitants(IRGenModule&) const override { return false; }
unsigned getFixedExtraInhabitantCount(IRGenModule&) const override {
return 0;
}
}; };
/// Find the beginning of the field offset vector in a struct's metadata. /// Find the beginning of the field offset vector in a struct's metadata.
@@ -304,6 +324,7 @@ namespace {
const StructLayout &layout) { const StructLayout &layout) {
return create<LoadableStructTypeInfo>(fields, layout.getType(), return create<LoadableStructTypeInfo>(fields, layout.getType(),
layout.getSize(), layout.getSize(),
layout.getSpareBits(),
layout.getAlignment(), layout.getAlignment(),
layout.isKnownPOD()); layout.isKnownPOD());
} }
@@ -312,6 +333,7 @@ namespace {
const StructLayout &layout) { const StructLayout &layout) {
return create<FixedStructTypeInfo>(fields, layout.getType(), return create<FixedStructTypeInfo>(fields, layout.getType(),
layout.getSize(), layout.getSize(),
layout.getSpareBits(),
layout.getAlignment(), layout.getAlignment(),
layout.isKnownPOD()); layout.isKnownPOD());
} }

View File

@@ -131,13 +131,23 @@ namespace {
public: public:
// FIXME: Spare bits between tuple elements. // FIXME: Spare bits between tuple elements.
LoadableTupleTypeInfo(unsigned numFields, llvm::Type *ty, LoadableTupleTypeInfo(unsigned numFields, llvm::Type *ty,
Size size, Alignment align, IsPOD_t isPOD) Size size, llvm::BitVector spareBits,
: TupleTypeInfoBase(numFields, ty, size, llvm::BitVector{}, align, isPOD) Alignment align, IsPOD_t isPOD)
: TupleTypeInfoBase(numFields, ty, size, std::move(spareBits), align,
isPOD)
{} {}
Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; } Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; }
Nothing_t getNonFixedOffsets(IRGenFunction &IGF, Nothing_t getNonFixedOffsets(IRGenFunction &IGF,
CanType T) const { return Nothing; } CanType T) const { return Nothing; }
// FIXME: Suppress use of extra inhabitants for single-payload enum layout
// until we're ready to handle the runtime logic for exporting extra
// inhabitants through tuple metadata.
bool mayHaveExtraInhabitants(IRGenModule&) const override { return false; }
unsigned getFixedExtraInhabitantCount(IRGenModule&) const override {
return 0;
}
}; };
/// Type implementation for fixed-size but non-loadable tuples. /// Type implementation for fixed-size but non-loadable tuples.
@@ -149,13 +159,23 @@ namespace {
public: public:
// FIXME: Spare bits between tuple elements. // FIXME: Spare bits between tuple elements.
FixedTupleTypeInfo(unsigned numFields, llvm::Type *ty, FixedTupleTypeInfo(unsigned numFields, llvm::Type *ty,
Size size, Alignment align, IsPOD_t isPOD) Size size, llvm::BitVector spareBits, Alignment align,
: TupleTypeInfoBase(numFields, ty, size, llvm::BitVector{}, align, isPOD) IsPOD_t isPOD)
: TupleTypeInfoBase(numFields, ty, size, std::move(spareBits), align,
isPOD)
{} {}
Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; } Nothing_t getNonFixedOffsets(IRGenFunction &IGF) const { return Nothing; }
Nothing_t getNonFixedOffsets(IRGenFunction &IGF, Nothing_t getNonFixedOffsets(IRGenFunction &IGF,
CanType T) const { return Nothing; } CanType T) const { return Nothing; }
// FIXME: Suppress use of extra inhabitants for single-payload enum layout
// until we're ready to handle the runtime logic for exporting extra
// inhabitants through tuple metadata.
bool mayHaveExtraInhabitants(IRGenModule&) const override { return false; }
unsigned getFixedExtraInhabitantCount(IRGenModule&) const override {
return 0;
}
}; };
/// An accessor for the non-fixed offsets for a tuple type. /// An accessor for the non-fixed offsets for a tuple type.
@@ -225,6 +245,7 @@ namespace {
const StructLayout &layout) { const StructLayout &layout) {
return create<FixedTupleTypeInfo>(fields, layout.getType(), return create<FixedTupleTypeInfo>(fields, layout.getType(),
layout.getSize(), layout.getSize(),
layout.getSpareBits(),
layout.getAlignment(), layout.getAlignment(),
layout.isKnownPOD()); layout.isKnownPOD());
} }
@@ -233,6 +254,7 @@ namespace {
const StructLayout &layout) { const StructLayout &layout) {
return create<LoadableTupleTypeInfo>(fields, layout.getType(), return create<LoadableTupleTypeInfo>(fields, layout.getType(),
layout.getSize(), layout.getSize(),
layout.getSpareBits(),
layout.getAlignment(), layout.getAlignment(),
layout.isKnownPOD()); layout.isKnownPOD());
} }

View File

@@ -80,12 +80,14 @@ StructLayout::StructLayout(IRGenModule &IGM, LayoutKind layoutKind,
assert(!builder.empty() == requiresHeapHeader(layoutKind)); assert(!builder.empty() == requiresHeapHeader(layoutKind));
MinimumAlign = Alignment(1); MinimumAlign = Alignment(1);
MinimumSize = Size(0); MinimumSize = Size(0);
SpareBits.clear();
IsFixedLayout = true; IsFixedLayout = true;
IsKnownPOD = IsPOD; IsKnownPOD = IsPOD;
Ty = (typeToFill ? typeToFill : IGM.OpaquePtrTy->getElementType()); Ty = (typeToFill ? typeToFill : IGM.OpaquePtrTy->getElementType());
} else { } else {
MinimumAlign = builder.getAlignment(); MinimumAlign = builder.getAlignment();
MinimumSize = builder.getSize(); MinimumSize = builder.getSize();
SpareBits = getSpareBitsFromBuilder(builder);
IsFixedLayout = builder.isFixedLayout(); IsFixedLayout = builder.isFixedLayout();
IsKnownPOD = builder.isKnownPOD(); IsKnownPOD = builder.isKnownPOD();
if (typeToFill) { if (typeToFill) {
@@ -211,20 +213,23 @@ void StructLayoutBuilder::addFixedSizeElement(ElementLayout &elt) {
= eltAlignment.getValue() - offsetFromAlignment.getValue(); = eltAlignment.getValue() - offsetFromAlignment.getValue();
assert(paddingRequired != 0); assert(paddingRequired != 0);
// Regardless, the storage size goes up.
CurSize += Size(paddingRequired);
// Add the padding to the fixed layout.
if (isFixedLayout()) { if (isFixedLayout()) {
auto paddingTy = llvm::ArrayType::get(IGM.Int8Ty, paddingRequired); auto paddingTy = llvm::ArrayType::get(IGM.Int8Ty, paddingRequired);
StructFields.push_back(paddingTy); StructFields.push_back(paddingTy);
// The padding can be used as spare bits by enum layout.
CurSpareBits.resize(CurSize.getValueInBits(), true);
} }
// Regardless, the storage size goes up.
CurSize += Size(paddingRequired);
} }
// If the overall structure so far has a fixed layout, then add // If the overall structure so far has a fixed layout, then add
// this as a field to the layout. // this as a field to the layout.
if (isFixedLayout()) { if (isFixedLayout()) {
addElementAtFixedOffset(elt); addElementAtFixedOffset(elt);
// Otherwise, just remember the next non-fixed offset index. // Otherwise, just remember the next non-fixed offset index.
} else { } else {
addElementAtNonFixedOffset(elt); addElementAtNonFixedOffset(elt);
@@ -261,11 +266,21 @@ void StructLayoutBuilder::addEmptyElement(ElementLayout &elt) {
/// aggregate. /// aggregate.
void StructLayoutBuilder::addElementAtFixedOffset(ElementLayout &elt) { void StructLayoutBuilder::addElementAtFixedOffset(ElementLayout &elt) {
assert(isFixedLayout()); assert(isFixedLayout());
assert(isa<FixedTypeInfo>(elt.getType())); auto &eltTI = cast<FixedTypeInfo>(elt.getType());
elt.completeFixed(elt.getType().isPOD(ResilienceScope::Local), elt.completeFixed(elt.getType().isPOD(ResilienceScope::Local),
CurSize, StructFields.size()); CurSize, StructFields.size());
StructFields.push_back(elt.getType().getStorageType()); StructFields.push_back(elt.getType().getStorageType());
// Carry over the spare bits from the element.
unsigned startBit = CurSize.getValueInBits();
unsigned eltBits = eltTI.getFixedSize().getValueInBits();
CurSpareBits.resize(startBit + eltBits);
if (!eltTI.getSpareBits().empty()) {
for (unsigned i = 0; i < eltBits; ++i) {
CurSpareBits[startBit + i] = eltTI.getSpareBits()[i];
}
}
} }
/// Add an element at a non-fixed offset to the aggregate. /// Add an element at a non-fixed offset to the aggregate.
@@ -273,6 +288,7 @@ void StructLayoutBuilder::addElementAtNonFixedOffset(ElementLayout &elt) {
assert(!isFixedLayout()); assert(!isFixedLayout());
elt.completeNonFixed(elt.getType().isPOD(ResilienceScope::Local), elt.completeNonFixed(elt.getType().isPOD(ResilienceScope::Local),
NextNonFixedOffsetIndex); NextNonFixedOffsetIndex);
CurSpareBits.clear();
} }
/// Add a non-fixed-size element to the aggregate at offset zero. /// Add a non-fixed-size element to the aggregate at offset zero.
@@ -281,6 +297,7 @@ void StructLayoutBuilder::addNonFixedSizeElementAtOffsetZero(ElementLayout &elt)
assert(!isa<FixedTypeInfo>(elt.getType())); assert(!isa<FixedTypeInfo>(elt.getType()));
assert(CurSize.isZero()); assert(CurSize.isZero());
elt.completeInitialNonFixedSize(elt.getType().isPOD(ResilienceScope::Local)); elt.completeInitialNonFixedSize(elt.getType().isPOD(ResilienceScope::Local));
CurSpareBits.clear();
} }
/// Produce the current fields as an anonymous structure. /// Produce the current fields as an anonymous structure.

View File

@@ -19,6 +19,7 @@
#define SWIFT_IRGEN_STRUCTLAYOUT_H #define SWIFT_IRGEN_STRUCTLAYOUT_H
#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h" #include "llvm/ADT/Twine.h"
#include "swift/Basic/Optional.h" #include "swift/Basic/Optional.h"
@@ -218,6 +219,7 @@ private:
SmallVector<llvm::Type*, 8> StructFields; SmallVector<llvm::Type*, 8> StructFields;
Size CurSize = Size(0); Size CurSize = Size(0);
Alignment CurAlignment = Alignment(1); Alignment CurAlignment = Alignment(1);
llvm::BitVector CurSpareBits;
unsigned NextNonFixedOffsetIndex = 0; unsigned NextNonFixedOffsetIndex = 0;
bool IsFixedLayout = true; bool IsFixedLayout = true;
IsPOD_t IsKnownPOD = IsPOD; IsPOD_t IsKnownPOD = IsPOD;
@@ -254,6 +256,9 @@ public:
/// Return the alignment of the structure built so far. /// Return the alignment of the structure built so far.
Alignment getAlignment() const { return CurAlignment; } Alignment getAlignment() const { return CurAlignment; }
/// Return the spare bit mask of the structure built so far.
const llvm::BitVector &getSpareBits() const { return CurSpareBits; }
/// Build the current elements as a new anonymous struct type. /// Build the current elements as a new anonymous struct type.
llvm::StructType *getAsAnonStruct() const; llvm::StructType *getAsAnonStruct() const;
@@ -278,6 +283,9 @@ class StructLayout {
/// The statically-known minimum bound on the size. /// The statically-known minimum bound on the size.
Size MinimumSize; Size MinimumSize;
/// The statically-known spare bit mask.
llvm::BitVector SpareBits;
/// Whether this layout is fixed in size. If so, the size and /// Whether this layout is fixed in size. If so, the size and
/// alignment are exact. /// alignment are exact.
@@ -289,6 +297,14 @@ class StructLayout {
llvm::Type *Ty; llvm::Type *Ty;
SmallVector<ElementLayout, 8> Elements; SmallVector<ElementLayout, 8> Elements;
/// Use the spare bit mask from the builder if there are any fixed spare bits to
/// speak of.
static llvm::BitVector getSpareBitsFromBuilder(const StructLayoutBuilder &b) {
return b.isFixedLayout() && b.getSpareBits().any()
? b.getSpareBits()
: llvm::BitVector{};
}
public: public:
/// Create a structure layout. /// Create a structure layout.
/// ///
@@ -309,6 +325,7 @@ public:
: MinimumAlign(builder.getAlignment()), : MinimumAlign(builder.getAlignment()),
MinimumSize(builder.getSize()), MinimumSize(builder.getSize()),
IsFixedLayout(builder.isFixedLayout()), IsFixedLayout(builder.isFixedLayout()),
SpareBits(getSpareBitsFromBuilder(builder)),
IsKnownPOD(builder.isKnownPOD()), IsKnownPOD(builder.isKnownPOD()),
Ty(type), Ty(type),
Elements(elements.begin(), elements.end()) {} Elements(elements.begin(), elements.end()) {}
@@ -319,6 +336,7 @@ public:
llvm::Type *getType() const { return Ty; } llvm::Type *getType() const { return Ty; }
Size getSize() const { return MinimumSize; } Size getSize() const { return MinimumSize; }
Alignment getAlignment() const { return MinimumAlign; } Alignment getAlignment() const { return MinimumAlign; }
const llvm::BitVector &getSpareBits() const { return SpareBits; }
bool isKnownEmpty() const { return isFixedLayout() && MinimumSize.isZero(); } bool isKnownEmpty() const { return isFixedLayout() && MinimumSize.isZero(); }
IsPOD_t isKnownPOD() const { return IsKnownPOD; } IsPOD_t isKnownPOD() const { return IsKnownPOD; }

View File

@@ -73,6 +73,8 @@ import Builtin
// CHECK: %O4enum15StringOwnerLike = type <{ [8 x i8] }> // CHECK: %O4enum15StringOwnerLike = type <{ [8 x i8] }>
// CHECK: %O4enum30MultiPayloadSpareBitAggregates = type <{ [16 x i8] }>
// CHECK: %O4enum18MultiPayloadNested = type <{ [9 x i8], [1 x i8] }> // CHECK: %O4enum18MultiPayloadNested = type <{ [9 x i8], [1 x i8] }>
// -- Dynamic enums. The type layout is opaque; we dynamically bitcast to // -- Dynamic enums. The type layout is opaque; we dynamically bitcast to
@@ -2029,6 +2031,64 @@ end:
return undef : $() return undef : $()
} }
struct S {
var a: CharLike
var b: IntLike
}
enum MultiPayloadSpareBitAggregates {
// Has spare bits in the padding between Int32 and Int64
case x(Builtin.Int32, Builtin.Int64)
// Has spare bits in the object pointers
case y(C, C)
// Has spare bits in the padding between struct fields
case z(S)
}
// CHECK: define void @multi_payload_spare_bit_aggregate_switch(i128) {
// CHECK: [[T0:%.*]] = lshr i128 %0, 47
// CHECK: [[TAG:%.*]] = trunc i128 [[T0]] to i16
// CHECK: switch i16 [[TAG]], label {{%.*}} [
// CHECK: i16 0, label %[[X_DEST:[0-9]+]]
// CHECK: i16 1, label %[[Y_DEST:[0-9]+]]
// CHECK: i16 2, label %[[Z_DEST:[0-9]+]]
// CHECK: ]
// CHECK: ; <label>:[[X_DEST]]
// CHECK: [[X_0:%.*]] = trunc i128 %0 to i32
// CHECK: [[T1:%.*]] = lshr i128 %0, 64
// CHECK: [[X_1:%.*]] = trunc i128 [[T1]] to i64
// CHECK: ; <label>:[[Y_DEST]]
// -- 0x80007fffffffffff
// CHECK: [[Y_MASKED:%.*]] = and i128 %0, -9223231299366420481
// CHECK: [[T2:%.*]] = trunc i128 [[Y_MASKED]] to i64
// CHECK: [[Y_0:%.*]] = inttoptr i64 [[T2]] to %C4enum1C*
// CHECK: [[T3:%.*]] = lshr i128 [[Y_MASKED]], 64
// CHECK: [[T4:%.*]] = trunc i128 [[T3]] to i64
// CHECK: [[Y_1:%.*]] = inttoptr i64 [[T4]] to %C4enum1C*
// CHECK: ; <label>:[[Z_DEST]]
// -- 0x80007fffffffffff
// CHECK: [[Z_MASKED:%.*]] = and i128 %0, -9223231299366420481
// CHECK: [[Z_A:%.*]] = trunc i128 [[Z_MASKED]] to i21
// CHECK: [[T5:%.*]] = lshr i128 [[Z_MASKED]], 64
// CHECK: [[Z_B:%.*]] = trunc i128 [[T5]] to i64
sil @multi_payload_spare_bit_aggregate_switch : $(MultiPayloadSpareBitAggregates) -> () {
entry(%c : $MultiPayloadSpareBitAggregates):
switch_enum %c : $MultiPayloadSpareBitAggregates, case #MultiPayloadSpareBitAggregates.x!enumelt.1: x_dest, case #MultiPayloadSpareBitAggregates.y!enumelt.1: y_dest, case #MultiPayloadSpareBitAggregates.z!enumelt.1: z_dest
x_dest(%x : $(Builtin.Int32, Builtin.Int64)):
br end
y_dest(%y : $(C, C)):
br end
z_dest(%z : $S):
br end
end:
return undef : $()
}
// <rdar://problem/15759464> // <rdar://problem/15759464>
enum MultiPayloadInner { enum MultiPayloadInner {
case A(Builtin.Int64) case A(Builtin.Int64)
@@ -2100,6 +2160,7 @@ typealias AllConcreteTestEnums = (
MultiPayloadOneSpareBit, MultiPayloadOneSpareBit,
MultiPayloadTwoSpareBits, MultiPayloadTwoSpareBits,
StringOwnerLike, StringOwnerLike,
MultiPayloadSpareBitAggregates,
MultiPayloadNested) MultiPayloadNested)
var x : AllConcreteTestEnums var x : AllConcreteTestEnums

View File

@@ -141,6 +141,7 @@ enum MultiPayloadNontrivial {
case payload1(Builtin.ObjectPointer) case payload1(Builtin.ObjectPointer)
case payload2(Builtin.Int64) case payload2(Builtin.Int64)
case payload3(Builtin.Int64, Builtin.ObjCPointer) case payload3(Builtin.Int64, Builtin.ObjCPointer)
case payload4(Builtin.Int64, Builtin.Int64)
case a case a
case b case b
case c case c
@@ -153,11 +154,11 @@ enum MultiPayloadNontrivial {
// CHECK: [[PAYLOAD_ADDR:%.*]] = bitcast %O20enum_value_semantics22MultiPayloadNontrivial* [[ADDR]] to i128* // CHECK: [[PAYLOAD_ADDR:%.*]] = bitcast %O20enum_value_semantics22MultiPayloadNontrivial* [[ADDR]] to i128*
// CHECK: [[PAYLOAD:%.*]] = load i128* [[PAYLOAD_ADDR]], align 8 // CHECK: [[PAYLOAD:%.*]] = load i128* [[PAYLOAD_ADDR]], align 8
// CHECK: [[T0:%.*]] = getelementptr inbounds %O20enum_value_semantics22MultiPayloadNontrivial* %0, i32 0, i32 1 // CHECK: [[T0:%.*]] = getelementptr inbounds %O20enum_value_semantics22MultiPayloadNontrivial* %0, i32 0, i32 1
// CHECK: [[TAG_ADDR:%.*]] = bitcast [1 x i8]* [[T0]] to i2* // CHECK: [[TAG_ADDR:%.*]] = bitcast [1 x i8]* [[T0]] to i3*
// CHECK: [[TAG:%.*]] = load i2* [[TAG_ADDR]], align 8 // CHECK: [[TAG:%.*]] = load i3* [[TAG_ADDR]], align 8
// CHECK: switch i2 [[TAG]], label %[[END:[0-9]+]] [ // CHECK: switch i3 [[TAG]], label %[[END:[0-9]+]] [
// CHECK: i2 0, label %[[PAYLOAD1_DESTROY:[0-9]+]] // CHECK: i3 0, label %[[PAYLOAD1_DESTROY:[0-9]+]]
// CHECK: i2 -2, label %[[PAYLOAD3_DESTROY:[0-9]+]] // CHECK: i3 2, label %[[PAYLOAD3_DESTROY:[0-9]+]]
// CHECK: ] // CHECK: ]
// CHECK: ; <label>:[[PAYLOAD1_DESTROY]] // CHECK: ; <label>:[[PAYLOAD1_DESTROY]]
// CHECK: [[PAYLOAD1_TRUNC:%.*]] = trunc i128 [[PAYLOAD]] to i64 // CHECK: [[PAYLOAD1_TRUNC:%.*]] = trunc i128 [[PAYLOAD]] to i64
@@ -174,6 +175,41 @@ enum MultiPayloadNontrivial {
// CHECK: ret void // CHECK: ret void
// CHECK: } // CHECK: }
enum MultiPayloadNontrivialSpareBits {
case payload1(Builtin.ObjectPointer)
case payload2(Builtin.Int64)
case payload3(Builtin.Int64, Builtin.ObjCPointer)
case a
case b
case c
}
// -- MultiPayloadNontrivialSpareBits destroyBuffer
// CHECK: define linkonce_odr hidden void @_TwxxO20enum_value_semantics31MultiPayloadNontrivialSpareBits(%swift.opaque* [[OBJ:%.*]], %swift.type* %Self) {
// CHECK: entry:
// CHECK: [[ADDR:%.*]] = bitcast %swift.opaque* [[OBJ]] to %O20enum_value_semantics31MultiPayloadNontrivialSpareBits*
// CHECK: [[PAYLOAD_ADDR:%.*]] = bitcast %O20enum_value_semantics31MultiPayloadNontrivialSpareBits* [[ADDR]] to i128*
// CHECK: [[PAYLOAD:%.*]] = load i128* [[PAYLOAD_ADDR]], align 8
// CHECK: switch i18 [[SPARE_BITS:%.*]], label %[[END:[0-9]+]] [
// CHECK: i18 0, label %[[PAYLOAD1_DESTROY:[0-9]+]]
// CHECK: i18 2, label %[[PAYLOAD3_DESTROY:[0-9]+]]
// CHECK: ]
// CHECK: ; <label>:[[PAYLOAD1_DESTROY]]
// CHECK: [[PAYLOAD1_TRUNC:%.*]] = trunc i128 [[PAYLOAD]] to i64
// CHECK: [[PAYLOAD1_VAL:%.*]] = inttoptr i64 [[PAYLOAD1_TRUNC]] to %swift.refcounted*
// CHECK: call void @swift_release(%swift.refcounted* [[PAYLOAD1_VAL]])
// CHECK: br label %[[END]]
// CHECK: ; <label>:[[PAYLOAD3_DESTROY]]
// CHECK: [[PAYLOAD3_MASKED:%.*]] = and i128 [[PAYLOAD]]
// CHECK: [[PAYLOAD3_1_LSHR:%.*]] = lshr i128 [[PAYLOAD3_MASKED]], 64
// CHECK: [[PAYLOAD3_1_TRUNC:%.*]] = trunc i128 [[PAYLOAD3_1_LSHR]] to i64
// CHECK: [[PAYLOAD3_1_VAL:%.*]] = inttoptr i64 [[PAYLOAD3_1_TRUNC]] to %objc_object*
// CHECK: call void @objc_release(%objc_object* [[PAYLOAD3_1_VAL]])
// CHECK: br label %[[END]]
// CHECK: ; <label>:[[END]]
// CHECK: ret void
// CHECK: }
enum GenericFixedLayout<T> { enum GenericFixedLayout<T> {
case foo(Builtin.Int64) case foo(Builtin.Int64)
case bar(Builtin.ObjectPointer) case bar(Builtin.ObjectPointer)