mirror of
https://github.com/apple/swift.git
synced 2025-12-21 12:14:44 +01:00
Pass #1 at localizing assumptions about fixed layout and
handling non-fixed layouts. This uncovered a bug where we weren't rounding up the header size to the element alignment when allocating an array of archetypes. Writing up a detailed test case for *that* revealed that we were never initializing the length field of heap arrays. Fixing that caused a bunch of tests to crash trying to release stuff. So... I've left this in a workaround state right now because I have to catch a plane. Swift SVN r4804
This commit is contained in:
@@ -27,11 +27,23 @@ namespace irgen {
|
||||
/// FixedTypeInfo - An abstract class designed for use when
|
||||
/// implementing a type that has a statically known layout.
|
||||
class FixedTypeInfo : public TypeInfo {
|
||||
private:
|
||||
/// The storage size of this type in bytes. This may be zero even
|
||||
/// for well-formed and complete types, such as a trivial oneof or
|
||||
/// tuple.
|
||||
Size StorageSize;
|
||||
|
||||
protected:
|
||||
FixedTypeInfo(llvm::Type *type, Size size, Alignment align, IsPOD_t pod)
|
||||
: TypeInfo(type, size, align, pod) {}
|
||||
: TypeInfo(type, align, pod, IsFixedSize), StorageSize(size) {}
|
||||
|
||||
public:
|
||||
// This is useful for metaprogramming.
|
||||
static bool isFixed() { return true; }
|
||||
|
||||
/// Whether this type is known to be empty.
|
||||
bool isKnownEmpty() const { return StorageSize.isZero(); }
|
||||
|
||||
OwnedAddress allocate(IRGenFunction &IGF, Initialization &init,
|
||||
InitializedObject object,
|
||||
OnHeap_t onHeap,
|
||||
@@ -55,7 +67,30 @@ public:
|
||||
llvm::Constant *getStaticAlignment(IRGenModule &IGM) const;
|
||||
llvm::Constant *getStaticStride(IRGenModule &IGM) const;
|
||||
|
||||
// TODO: move the StorageSize etc. members here.
|
||||
void completeFixed(Size size, Alignment alignment) {
|
||||
StorageSize = size;
|
||||
setStorageAlignment(alignment);
|
||||
}
|
||||
|
||||
/// Returns the known, fixed size required to store a value of this type.
|
||||
Alignment getFixedAlignment() const {
|
||||
return getBestKnownAlignment();
|
||||
}
|
||||
|
||||
/// Returns the known, fixed alignment of a stored value of this type.
|
||||
Size getFixedSize() const {
|
||||
return StorageSize;
|
||||
}
|
||||
|
||||
/// Returns the (assumed fixed) stride of the storage for this
|
||||
/// object. The stride is the storage size rounded up to the
|
||||
/// alignment; its practical use is that, in an array, it is the
|
||||
/// offset from the size of one element to the offset of the next.
|
||||
Size getFixedStride() const {
|
||||
return StorageSize.roundUpToAlignment(getFixedAlignment());
|
||||
}
|
||||
|
||||
static bool classof(const TypeInfo *type) { return type->isFixedSize(); }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -43,10 +43,6 @@ static llvm::ConstantInt *getMetadataKind(IRGenModule &IGM,
|
||||
return llvm::ConstantInt::get(IGM.MetadataKindTy, uint8_t(kind));
|
||||
}
|
||||
|
||||
static llvm::ConstantInt *getSize(IRGenFunction &IGF, Size value) {
|
||||
return llvm::ConstantInt::get(IGF.IGM.SizeTy, value.getValue());
|
||||
}
|
||||
|
||||
static llvm::ConstantInt *getSize(IRGenFunction &IGF,
|
||||
const llvm::APInt &value) {
|
||||
return cast<llvm::ConstantInt>(llvm::ConstantInt::get(IGF.IGM.SizeTy, value));
|
||||
@@ -169,35 +165,69 @@ static void bindNecessaryBindings(IRGenFunction &IGF,
|
||||
bindings.restore(IGF, bindingsBuffer);
|
||||
}
|
||||
|
||||
/// Lay out an array on the heap.
|
||||
ArrayHeapLayout::ArrayHeapLayout(IRGenFunction &IGF, CanType T)
|
||||
: ElementTI(IGF.getFragileTypeInfo(T)), Bindings(IGF.IGM, T) {
|
||||
/// Compute the basic information for how to lay out a heap array.
|
||||
HeapArrayInfo::HeapArrayInfo(IRGenFunction &IGF, CanType T)
|
||||
: ElementTI(IGF.getFragileTypeInfo(T)), Bindings(IGF.IGM, T) {}
|
||||
|
||||
// Add the heap header.
|
||||
Size size(0);
|
||||
Alignment align(1);
|
||||
/// Lay out the allocation in this IGF.
|
||||
HeapArrayInfo::Layout HeapArrayInfo::getLayout(IRGenFunction &IGF) const {
|
||||
// Start with the heap header.
|
||||
Size headerSize(0);
|
||||
Alignment headerAlign(1);
|
||||
SmallVector<llvm::Type*, 4> fields;
|
||||
addHeapHeaderToLayout(IGF.IGM, size, align, fields);
|
||||
assert((size % align).isZero());
|
||||
assert(align >= IGF.IGM.getPointerAlignment());
|
||||
addHeapHeaderToLayout(IGF.IGM, headerSize, headerAlign, fields);
|
||||
assert((headerSize % headerAlign).isZero());
|
||||
assert(headerAlign >= IGF.IGM.getPointerAlignment());
|
||||
|
||||
// Add the length field.
|
||||
size += IGF.IGM.getPointerSize();
|
||||
assert(size == getArrayHeapHeaderSize(IGF.IGM));
|
||||
headerSize += IGF.IGM.getPointerSize();
|
||||
assert(headerSize == getArrayHeapHeaderSize(IGF.IGM));
|
||||
|
||||
// Add the necessary bindings size.
|
||||
size += Bindings.getBufferSize(IGF.IGM);
|
||||
headerSize += Bindings.getBufferSize(IGF.IGM);
|
||||
|
||||
// The easy case is when we know the layout of the element.
|
||||
if (auto fixedElementTI = dyn_cast<FixedTypeInfo>(&ElementTI)) {
|
||||
// Update the required alignment.
|
||||
if (ElementTI.getFixedAlignment() > align)
|
||||
align = ElementTI.getFixedAlignment();
|
||||
if (fixedElementTI->getFixedAlignment() > headerAlign)
|
||||
headerAlign = fixedElementTI->getFixedAlignment();
|
||||
|
||||
// Round the size up to the alignment of the element type.
|
||||
// FIXME: resilient types.
|
||||
size = size.roundUpToAlignment(ElementTI.getFixedAlignment());
|
||||
headerSize = headerSize.roundUpToAlignment(
|
||||
fixedElementTI->getFixedAlignment());
|
||||
|
||||
HeaderSize = size;
|
||||
Align = align;
|
||||
return {
|
||||
IGF.IGM.getSize(headerSize),
|
||||
IGF.IGM.getSize(headerAlign.asSize()),
|
||||
headerAlign
|
||||
};
|
||||
}
|
||||
|
||||
// Otherwise, we need to do this computation at runtime.
|
||||
|
||||
// Read the alignment of the element type.
|
||||
llvm::Value *eltAlign = ElementTI.getAlignment(IGF);
|
||||
|
||||
// Round the header size up to the element alignment.
|
||||
llvm::Value *headerSizeV = IGF.IGM.getSize(headerSize);
|
||||
|
||||
// mask = alignment - 1
|
||||
// headerSize = (headerSize + mask) & ~mask
|
||||
auto eltAlignMask = IGF.Builder.CreateSub(eltAlign, IGF.IGM.getSize(Size(1)));
|
||||
headerSizeV = IGF.Builder.CreateAdd(headerSizeV, eltAlignMask);
|
||||
llvm::Value *eltAlignMaskInverted = IGF.Builder.CreateNot(eltAlignMask);
|
||||
headerSizeV = IGF.Builder.CreateAnd(headerSizeV, eltAlignMaskInverted,
|
||||
"array-header-size");
|
||||
|
||||
// allocAlign = max(headerAlign, alignment)
|
||||
llvm::Value *headerAlignV = IGF.IGM.getSize(headerAlign.asSize());
|
||||
llvm::Value *overaligned =
|
||||
IGF.Builder.CreateICmpUGT(eltAlign, headerAlignV, "overaligned");
|
||||
llvm::Value *allocAlign =
|
||||
IGF.Builder.CreateSelect(overaligned, eltAlign, headerAlignV);
|
||||
|
||||
return { headerSizeV, allocAlign, headerAlign };
|
||||
}
|
||||
|
||||
/// Destroy all the elements of an array.
|
||||
@@ -252,7 +282,7 @@ static void emitArrayDestroy(IRGenFunction &IGF,
|
||||
/// TODO: give this some reasonable name and possibly linkage.
|
||||
static llvm::Constant *
|
||||
createArrayDtorFn(IRGenModule &IGM,
|
||||
const ArrayHeapLayout &layout,
|
||||
const HeapArrayInfo &arrayInfo,
|
||||
const NecessaryBindings &bindings) {
|
||||
llvm::Function *fn =
|
||||
llvm::Function::Create(IGM.DeallocatingDtorTy,
|
||||
@@ -262,20 +292,26 @@ createArrayDtorFn(IRGenModule &IGM,
|
||||
IRGenFunction IGF(IGM, CanType(), llvm::ArrayRef<Pattern*>(),
|
||||
ExplosionKind::Minimal, 0, fn, Prologue::Bare);
|
||||
|
||||
// Bind the necessary archetypes. This is required before we can
|
||||
// lay out the array in this IGF.
|
||||
llvm::Value *header = fn->arg_begin();
|
||||
Address lengthPtr = layout.getLengthPointer(IGF, header);
|
||||
bindNecessaryBindings(IGF, bindings,
|
||||
Address(header, IGM.getPointerAlignment()));
|
||||
|
||||
auto layout = arrayInfo.getLayout(IGF);
|
||||
|
||||
Address lengthPtr = arrayInfo.getLengthPointer(IGF, layout, header);
|
||||
llvm::Value *length = IGF.Builder.CreateLoad(lengthPtr, "length");
|
||||
|
||||
// Bind the necessary archetypes.
|
||||
bindNecessaryBindings(IGF, bindings, Address(header, layout.getAlignment()));
|
||||
auto &eltTI = arrayInfo.getElementTypeInfo();
|
||||
|
||||
// If the layout isn't known to be POD, we actually have to do work here.
|
||||
if (!layout.getElementTypeInfo().isPOD(ResilienceScope::Local)) {
|
||||
llvm::Value *elementSize = layout.getElementTypeInfo().getStride(IGF);
|
||||
if (!eltTI.isPOD(ResilienceScope::Local)) {
|
||||
llvm::Value *elementSize = eltTI.getStride(IGF);
|
||||
|
||||
llvm::Value *begin = layout.getBeginPointer(IGF, header);
|
||||
llvm::Value *begin = arrayInfo.getBeginPointer(IGF, layout, header);
|
||||
llvm::Value *end;
|
||||
if (layout.getElementTypeInfo().StorageType->isSized()) {
|
||||
if (isa<FixedTypeInfo>(eltTI)) {
|
||||
end = IGF.Builder.CreateInBoundsGEP(begin, length, "end");
|
||||
} else {
|
||||
end = IGF.Builder.CreateBitCast(begin, IGF.IGM.Int8PtrTy);
|
||||
@@ -284,16 +320,17 @@ createArrayDtorFn(IRGenModule &IGM,
|
||||
end = IGF.Builder.CreateBitCast(end, begin->getType());
|
||||
}
|
||||
|
||||
emitArrayDestroy(IGF, begin, end, layout.getElementTypeInfo(), elementSize);
|
||||
emitArrayDestroy(IGF, begin, end, eltTI, elementSize);
|
||||
}
|
||||
|
||||
llvm::Value *size = layout.getAllocationSize(IGF, length, false, false);
|
||||
llvm::Value *size =
|
||||
arrayInfo.getAllocationSize(IGF, layout, length, false, false);
|
||||
IGF.Builder.CreateRet(size);
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
||||
llvm::Constant *ArrayHeapLayout::getPrivateMetadata(IRGenModule &IGM) const {
|
||||
llvm::Constant *HeapArrayInfo::getPrivateMetadata(IRGenModule &IGM) const {
|
||||
return buildPrivateMetadata(IGM, createArrayDtorFn(IGM, *this, Bindings),
|
||||
MetadataKind::HeapArray);
|
||||
}
|
||||
@@ -327,7 +364,8 @@ static llvm::Value *checkOverflow(IRGenFunction &IGF,
|
||||
/// this is false for computations involving a known-good length
|
||||
/// \param updateLength - whether to update the 'length' parameter
|
||||
/// with the proper length, i.e. the length as a size_t
|
||||
llvm::Value *ArrayHeapLayout::getAllocationSize(IRGenFunction &IGF,
|
||||
llvm::Value *HeapArrayInfo::getAllocationSize(IRGenFunction &IGF,
|
||||
const Layout &layout,
|
||||
llvm::Value *&length,
|
||||
bool canOverflow,
|
||||
bool updateLength) const {
|
||||
@@ -335,7 +373,8 @@ llvm::Value *ArrayHeapLayout::getAllocationSize(IRGenFunction &IGF,
|
||||
|
||||
// Easy case: the length is a static constant.
|
||||
llvm::ConstantInt *clength = dyn_cast<llvm::ConstantInt>(length);
|
||||
if (clength && ElementTI.StorageType->isSized()) {
|
||||
if (clength && ElementTI.isFixedSize()) {
|
||||
auto &fixedElementTI = cast<FixedTypeInfo>(ElementTI);
|
||||
unsigned sizeWidth = IGF.IGM.SizeTy->getBitWidth();
|
||||
|
||||
// Get the length to size_t, making sure it isn't too large.
|
||||
@@ -352,13 +391,16 @@ llvm::Value *ArrayHeapLayout::getAllocationSize(IRGenFunction &IGF,
|
||||
bool overflow = false;
|
||||
|
||||
// Scale the length by the element stride.
|
||||
llvm::APInt elementStride(sizeWidth, ElementTI.getFixedStride().getValue());
|
||||
llvm::APInt elementStride(sizeWidth,
|
||||
fixedElementTI.getFixedStride().getValue());
|
||||
assert(elementStride);
|
||||
auto scaledLength = lenval.umul_ov(elementStride, overflow);
|
||||
if (overflow) return getSizeMax(IGF);
|
||||
|
||||
// Add the header size in.
|
||||
llvm::APInt headerSize(sizeWidth, HeaderSize.getValue());
|
||||
assert(isa<llvm::ConstantInt>(layout.HeaderSize) &&
|
||||
"fixed-size array element type without constant header size?");
|
||||
auto &headerSize = cast<llvm::ConstantInt>(layout.HeaderSize)->getValue();
|
||||
auto lengthWithHeader = scaledLength.uadd_ov(headerSize, overflow);
|
||||
if (overflow) return getSizeMax(IGF);
|
||||
|
||||
@@ -396,9 +438,8 @@ llvm::Value *ArrayHeapLayout::getAllocationSize(IRGenFunction &IGF,
|
||||
|
||||
// If the element size is known to be zero, we don't need to do
|
||||
// anything further.
|
||||
llvm::Value *headerSize = getSize(IGF, HeaderSize);
|
||||
if (ElementTI.isEmpty(ResilienceScope::Local))
|
||||
return headerSize;
|
||||
if (ElementTI.isKnownEmpty())
|
||||
return layout.HeaderSize;
|
||||
|
||||
llvm::Value *size = properLength;
|
||||
|
||||
@@ -414,16 +455,17 @@ llvm::Value *ArrayHeapLayout::getAllocationSize(IRGenFunction &IGF,
|
||||
// Increase that by the header size, saturating at SIZE_MAX.
|
||||
if (canOverflow) {
|
||||
size = checkOverflow(IGF, llvm::Intrinsic::uadd_with_overflow,
|
||||
size, headerSize);
|
||||
size, layout.HeaderSize);
|
||||
} else {
|
||||
size = IGF.Builder.CreateAdd(size, headerSize);
|
||||
size = IGF.Builder.CreateAdd(size, layout.HeaderSize);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/// Returns a pointer to the 'length' field of an array allocation.
|
||||
Address ArrayHeapLayout::getLengthPointer(IRGenFunction &IGF,
|
||||
Address HeapArrayInfo::getLengthPointer(IRGenFunction &IGF,
|
||||
const Layout &layout,
|
||||
llvm::Value *alloc) const {
|
||||
assert(alloc->getType() == IGF.IGM.RefCountedPtrTy);
|
||||
llvm::Value *addr = IGF.Builder.CreateConstInBoundsGEP1_32(alloc, 1);
|
||||
@@ -432,25 +474,27 @@ Address ArrayHeapLayout::getLengthPointer(IRGenFunction &IGF,
|
||||
return Address(addr, IGF.IGM.getPointerAlignment());
|
||||
}
|
||||
|
||||
llvm::Value *ArrayHeapLayout::getBeginPointer(IRGenFunction &IGF,
|
||||
llvm::Value *HeapArrayInfo::getBeginPointer(IRGenFunction &IGF,
|
||||
const Layout &layout,
|
||||
llvm::Value *alloc) const {
|
||||
assert(alloc->getType() == IGF.IGM.RefCountedPtrTy);
|
||||
alloc = IGF.Builder.CreateBitCast(alloc, IGF.IGM.Int8PtrTy);
|
||||
llvm::Value *begin =
|
||||
IGF.Builder.CreateConstInBoundsGEP1_32(alloc, HeaderSize.getValue());
|
||||
llvm::Value *begin = IGF.Builder.CreateInBoundsGEP(alloc, layout.HeaderSize);
|
||||
return IGF.Builder.CreateBitCast(begin,
|
||||
ElementTI.getStorageType()->getPointerTo());
|
||||
}
|
||||
|
||||
llvm::Value *ArrayHeapLayout::emitUnmanagedAlloc(IRGenFunction &IGF,
|
||||
llvm::Value *HeapArrayInfo::emitUnmanagedAlloc(IRGenFunction &IGF,
|
||||
llvm::Value *length,
|
||||
Address &begin,
|
||||
Expr *init,
|
||||
const llvm::Twine &name) const
|
||||
{
|
||||
Layout layout = getLayout(IGF);
|
||||
|
||||
llvm::Constant *metadata = getPrivateMetadata(IGF.IGM);
|
||||
llvm::Value *size = getAllocationSize(IGF, length, true, true);
|
||||
llvm::Value *align = getSize(IGF, Size(Align.getValue()));
|
||||
llvm::Value *size = getAllocationSize(IGF, layout, length, true, true);
|
||||
llvm::Value *align = layout.AllocAlign;
|
||||
|
||||
// Perform the allocation.
|
||||
llvm::Value *alloc =
|
||||
@@ -458,24 +502,34 @@ llvm::Value *ArrayHeapLayout::emitUnmanagedAlloc(IRGenFunction &IGF,
|
||||
|
||||
if (!Bindings.empty()) {
|
||||
Address bindingsBuffer =
|
||||
projectBindingsBuffer(IGF, Address(alloc, getAlignment()));
|
||||
projectBindingsBuffer(IGF, Address(alloc, layout.BestStaticAlignment));
|
||||
Bindings.save(IGF, bindingsBuffer);
|
||||
}
|
||||
|
||||
// Store the length pointer to the array.
|
||||
Address lengthPtr = getLengthPointer(IGF, layout, alloc);
|
||||
// FIXME: storing the actual length here doesn't seem to work.
|
||||
IGF.Builder.CreateStore(IGF.IGM.getSize(Size(0)), lengthPtr);
|
||||
|
||||
// Find the begin pointer.
|
||||
llvm::Value *beginPtr = getBeginPointer(IGF, alloc);
|
||||
llvm::Value *beginPtr = getBeginPointer(IGF, layout, alloc);
|
||||
begin = ElementTI.getAddressForPointer(beginPtr);
|
||||
|
||||
// If we don't have an initializer, just zero-initialize and
|
||||
// immediately enter a release cleanup.
|
||||
if (!init) {
|
||||
llvm::Value *sizeToMemset =
|
||||
IGF.Builder.CreateSub(size, getSize(IGF, HeaderSize));
|
||||
llvm::Value *sizeToMemset = IGF.Builder.CreateSub(size, layout.HeaderSize);
|
||||
|
||||
Alignment arrayAlignment = layout.BestStaticAlignment;
|
||||
if (auto offset = dyn_cast<llvm::ConstantInt>(layout.HeaderSize))
|
||||
arrayAlignment =
|
||||
arrayAlignment.alignmentAtOffset(Size(offset->getZExtValue()));
|
||||
|
||||
IGF.Builder.CreateMemSet(
|
||||
IGF.Builder.CreateBitCast(beginPtr, IGF.IGM.Int8PtrTy),
|
||||
llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0),
|
||||
sizeToMemset,
|
||||
Align.alignmentAtOffset(HeaderSize).getValue(),
|
||||
arrayAlignment.getValue(),
|
||||
/*volatile*/ false);
|
||||
|
||||
// Otherwise, repeatedly evaluate the initializer into successive
|
||||
@@ -487,7 +541,7 @@ llvm::Value *ArrayHeapLayout::emitUnmanagedAlloc(IRGenFunction &IGF,
|
||||
return alloc;
|
||||
}
|
||||
|
||||
ManagedValue ArrayHeapLayout::emitAlloc(IRGenFunction &IGF,
|
||||
ManagedValue HeapArrayInfo::emitAlloc(IRGenFunction &IGF,
|
||||
llvm::Value *length,
|
||||
Address &begin,
|
||||
Expr *init,
|
||||
|
||||
@@ -47,34 +47,45 @@ public:
|
||||
llvm::Constant *getPrivateMetadata(IRGenModule &IGM) const;
|
||||
};
|
||||
|
||||
/// The heap-layout of an array.
|
||||
class ArrayHeapLayout {
|
||||
/// A class to manage allocating a reference-counted array on the heap.
|
||||
class HeapArrayInfo {
|
||||
const TypeInfo &ElementTI;
|
||||
Size HeaderSize;
|
||||
Alignment Align;
|
||||
NecessaryBindings Bindings;
|
||||
|
||||
public:
|
||||
ArrayHeapLayout(IRGenFunction &IGF, CanType T);
|
||||
HeapArrayInfo(IRGenFunction &IGF, CanType T);
|
||||
|
||||
const TypeInfo &getElementTypeInfo() const { return ElementTI; }
|
||||
|
||||
/// Returns the size required by this array.
|
||||
Size getHeaderSize() const { return HeaderSize; }
|
||||
struct Layout {
|
||||
/// The offset from the allocation to the first element.
|
||||
llvm::Value *HeaderSize;
|
||||
|
||||
/// Returns the alignment required by this array.
|
||||
Alignment getAlignment() const { return Align; }
|
||||
/// The alignment requirement for the array allocation.
|
||||
llvm::Value *AllocAlign;
|
||||
|
||||
/// The most aggressive statically-known alignment
|
||||
/// requirement for the total allocation.
|
||||
Alignment BestStaticAlignment;
|
||||
};
|
||||
|
||||
/// Compute layout for this heap array. The result is local to a
|
||||
/// particular IGF.
|
||||
Layout getLayout(IRGenFunction &IGF) const;
|
||||
|
||||
/// Returns the size required by the given length. If 'canOverflow',
|
||||
/// perform overflow checks and produce (size_t) -1 on overflow.
|
||||
llvm::Value *getAllocationSize(IRGenFunction &IGF, llvm::Value *&length,
|
||||
llvm::Value *getAllocationSize(IRGenFunction &IGF, const Layout &layout,
|
||||
llvm::Value *&length,
|
||||
bool canOverflow, bool updateLength) const;
|
||||
|
||||
/// Derive a pointer to the length field of the given allocation.
|
||||
Address getLengthPointer(IRGenFunction &IGF, llvm::Value *alloc) const;
|
||||
Address getLengthPointer(IRGenFunction &IGF, const Layout &layout,
|
||||
llvm::Value *alloc) const;
|
||||
|
||||
/// Derive a pointer to the first element of the given allocation.
|
||||
llvm::Value *getBeginPointer(IRGenFunction &IGF, llvm::Value *alloc) const;
|
||||
llvm::Value *getBeginPointer(IRGenFunction &IGF, const Layout &layout,
|
||||
llvm::Value *alloc) const;
|
||||
|
||||
/// Allocate the array without a cleanup.
|
||||
llvm::Value *emitUnmanagedAlloc(IRGenFunction &IGF,
|
||||
|
||||
@@ -160,10 +160,10 @@ OwnedAddress Initialization::emitVariable(IRGenFunction &IGF, VarDecl *var,
|
||||
Address IRGenModule::emitGlobalVariable(VarDecl *var,
|
||||
const TypeInfo &type) {
|
||||
// If the variable is empty, don't actually emit it; just return undef.
|
||||
// FIXME: fragility? global destructors?
|
||||
if (type.isEmpty(ResilienceScope::Local)) {
|
||||
// FIXME: global destructors?
|
||||
if (type.isKnownEmpty()) {
|
||||
auto undef = llvm::UndefValue::get(type.StorageType->getPointerTo());
|
||||
return Address(undef, Alignment(1));
|
||||
return type.getAddressForPointer(undef);
|
||||
}
|
||||
|
||||
/// Get the global variable.
|
||||
@@ -182,9 +182,9 @@ OwnedAddress Initialization::emitGlobalVariable(IRGenFunction &IGF,
|
||||
const TypeInfo &type) {
|
||||
// If the variable is empty, don't actually emit it; just return undef.
|
||||
// FIXME: fragility? global destructors?
|
||||
if (type.isEmpty(ResilienceScope::Local)) {
|
||||
if (type.isKnownEmpty()) {
|
||||
auto undef = llvm::UndefValue::get(type.StorageType->getPointerTo());
|
||||
auto addr = Address(undef, Alignment(1));
|
||||
auto addr = type.getAddressForPointer(undef);
|
||||
return OwnedAddress(addr, IGF.IGM.RefCountedNull);
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ OwnedAddress FixedTypeInfo::allocate(IRGenFunction &IGF, Initialization &init,
|
||||
OnHeap_t onHeap,
|
||||
const Twine &name) const {
|
||||
// If the type is known to be empty, don't actually allocate anything.
|
||||
if (isEmpty(ResilienceScope::Local)) {
|
||||
if (isKnownEmpty()) {
|
||||
OwnedAddress addr = createEmptyAlloca(IGF.IGM, *this);
|
||||
init.markAllocated(IGF, object, addr, CleanupsDepth::invalid());
|
||||
return addr;
|
||||
@@ -307,8 +307,7 @@ void Initialization::emitZeroInit(IRGenFunction &IGF, InitializedObject object,
|
||||
markInitialized(IGF, object);
|
||||
|
||||
// No work is necessary if the type is empty or the address is global.
|
||||
if (type.isEmpty(ResilienceScope::Local) ||
|
||||
isa<llvm::Constant>(addr.getAddress()))
|
||||
if (type.isKnownEmpty() || isa<llvm::Constant>(addr.getAddress()))
|
||||
return;
|
||||
|
||||
ExplosionSchema schema(ExplosionKind::Maximal);
|
||||
|
||||
@@ -289,13 +289,12 @@ namespace {
|
||||
/// the runtime always provides an entry for such a type; right
|
||||
/// now, that mapping is as one of the integer types.
|
||||
llvm::Value *visitOpaqueType(CanType type) {
|
||||
IRGenModule &IGM = IGF.IGM;
|
||||
const TypeInfo &opaqueTI = IGM.getFragileTypeInfo(type);
|
||||
auto &opaqueTI = cast<FixedTypeInfo>(IGF.IGM.getFragileTypeInfo(type));
|
||||
assert(opaqueTI.getFixedSize() ==
|
||||
Size(opaqueTI.getFixedAlignment().getValue()));
|
||||
assert(opaqueTI.getFixedSize().isPowerOf2());
|
||||
auto numBits = 8 * opaqueTI.getFixedSize().getValue();
|
||||
auto intTy = BuiltinIntegerType::get(numBits, IGM.Context);
|
||||
auto intTy = BuiltinIntegerType::get(numBits, IGF.IGM.Context);
|
||||
return emitDirectMetadataRef(CanType(intTy));
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include "CallEmission.h"
|
||||
#include "Cleanup.h"
|
||||
#include "Explosion.h"
|
||||
#include "FixedTypeInfo.h"
|
||||
#include "FormalType.h"
|
||||
#include "FunctionRef.h"
|
||||
#include "GenClass.h"
|
||||
|
||||
@@ -392,9 +392,11 @@ const TypeInfo *TypeConverter::convertOneOfType(OneOfDecl *oneof) {
|
||||
} else {
|
||||
const TypeInfo &eltTI = getFragileTypeInfo(eltType->getCanonicalType());
|
||||
assert(eltTI.isComplete());
|
||||
|
||||
auto &fixedEltTI = cast<FixedTypeInfo>(eltTI); // FIXME
|
||||
storageType = eltTI.StorageType;
|
||||
oneofTI->StorageSize = eltTI.StorageSize;
|
||||
oneofTI->StorageAlignment = eltTI.StorageAlignment;
|
||||
oneofTI->completeFixed(fixedEltTI.getFixedSize(),
|
||||
fixedEltTI.getFixedAlignment());
|
||||
oneofTI->Singleton = &eltTI;
|
||||
oneofTI->setPOD(eltTI.isPOD(ResilienceScope::Local));
|
||||
}
|
||||
@@ -445,13 +447,16 @@ const TypeInfo *TypeConverter::convertOneOfType(OneOfDecl *oneof) {
|
||||
// zero-size data.
|
||||
const TypeInfo &eltTInfo = getFragileTypeInfo(eltType->getCanonicalType());
|
||||
assert(eltTInfo.isComplete());
|
||||
if (eltTInfo.isEmpty(ResilienceScope::Local)) continue;
|
||||
if (eltTInfo.isKnownEmpty()) continue;
|
||||
|
||||
auto &fixedEltTI = cast<FixedTypeInfo>(eltTInfo);
|
||||
|
||||
// The required payload size is the amount of padding needed to
|
||||
// get up to the element's alignment, plus the actual size.
|
||||
Size eltPayloadSize = eltTInfo.StorageSize;
|
||||
if (eltTInfo.StorageAlignment.getValue() > discriminatorSize.getValue())
|
||||
eltPayloadSize += Size(eltTInfo.StorageAlignment.getValue()
|
||||
Size eltPayloadSize = fixedEltTI.getFixedSize();
|
||||
if (fixedEltTI.getFixedAlignment().getValue()
|
||||
> discriminatorSize.getValue())
|
||||
eltPayloadSize += Size(fixedEltTI.getFixedAlignment().getValue()
|
||||
- discriminatorSize.getValue());
|
||||
|
||||
payloadSize = std::max(payloadSize, eltPayloadSize);
|
||||
@@ -459,8 +464,7 @@ const TypeInfo *TypeConverter::convertOneOfType(OneOfDecl *oneof) {
|
||||
isPOD &= eltTInfo.isPOD(ResilienceScope::Local);
|
||||
}
|
||||
|
||||
convertedTI->StorageSize = discriminatorSize + payloadSize;
|
||||
convertedTI->StorageAlignment = storageAlignment;
|
||||
convertedTI->completeFixed(discriminatorSize + payloadSize, storageAlignment);
|
||||
convertedTI->setPOD(isPOD);
|
||||
|
||||
// Add the payload to the body if necessary.
|
||||
|
||||
@@ -956,7 +956,7 @@ namespace {
|
||||
|
||||
ArchetypeTypeInfo(ArchetypeType *archetype, llvm::Type *type,
|
||||
ArrayRef<ProtocolEntry> protocols)
|
||||
: IndirectTypeInfo(type, Size(1), Alignment(1), IsNotPOD),
|
||||
: IndirectTypeInfo(type, Alignment(1), IsNotPOD, IsNotFixedSize),
|
||||
TheArchetype(archetype) {
|
||||
assert(protocols.size() == archetype->getConformsTo().size());
|
||||
for (unsigned i = 0, e = protocols.size(); i != e; ++i) {
|
||||
@@ -1103,9 +1103,10 @@ namespace {
|
||||
OffsetZero,
|
||||
|
||||
/// It doesn't fit and needs to be side-allocated.
|
||||
Allocate
|
||||
Allocate,
|
||||
|
||||
// Resilience: it needs to be checked dynamically.
|
||||
/// It needs to be checked dynamically.
|
||||
Dynamic
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1163,8 +1164,16 @@ public:
|
||||
|
||||
static FixedPacking computePacking(IRGenModule &IGM,
|
||||
const TypeInfo &concreteTI) {
|
||||
auto fixedTI = dyn_cast<FixedTypeInfo>(&concreteTI);
|
||||
|
||||
// If the type is fixed, we have to do something dynamic.
|
||||
// FIXME: some types are provably too big (or aligned) to be
|
||||
// allocated inline.
|
||||
if (!fixedTI)
|
||||
return FixedPacking::Dynamic;
|
||||
|
||||
Size bufferSize = getFixedBufferSize(IGM);
|
||||
Size requiredSize = concreteTI.getFixedSize();
|
||||
Size requiredSize = fixedTI->getFixedSize();
|
||||
|
||||
// Flat out, if we need more space than the buffer provides,
|
||||
// we always have to allocate.
|
||||
@@ -1174,7 +1183,7 @@ static FixedPacking computePacking(IRGenModule &IGM,
|
||||
return FixedPacking::Allocate;
|
||||
|
||||
Alignment bufferAlign = getFixedBufferAlignment(IGM);
|
||||
Alignment requiredAlign = concreteTI.getFixedAlignment();
|
||||
Alignment requiredAlign = fixedTI->getFixedAlignment();
|
||||
|
||||
// If the buffer alignment is good enough for the type, great.
|
||||
if (bufferAlign >= requiredAlign)
|
||||
@@ -1191,15 +1200,171 @@ static bool isNeverAllocated(FixedPacking packing) {
|
||||
switch (packing) {
|
||||
case FixedPacking::OffsetZero: return true;
|
||||
case FixedPacking::Allocate: return false;
|
||||
case FixedPacking::Dynamic: return false;
|
||||
}
|
||||
llvm_unreachable("bad FixedPacking value");
|
||||
}
|
||||
|
||||
namespace {
|
||||
/// An operation to be peformed for various kinds of packing.
|
||||
struct DynamicPackingOperation {
|
||||
/// Emit the operation at a concrete packing kind.
|
||||
///
|
||||
/// Immediately after this call, there will be an unconditional
|
||||
/// branch to the continuation block.
|
||||
virtual void emitForPacking(IRGenFunction &IGF, const TypeInfo &type,
|
||||
FixedPacking packing) = 0;
|
||||
|
||||
/// Given that we are currently at the beginning of the
|
||||
/// continuation block, complete the operation.
|
||||
virtual void complete(IRGenFunction &IGF, const TypeInfo &type) = 0;
|
||||
};
|
||||
|
||||
/// A class for merging a particular kind of value across control flow.
|
||||
template <class T> class DynamicPackingPHIMapping;
|
||||
|
||||
/// An implementation of DynamicPackingPHIMapping for a single LLVM value.
|
||||
template <> class DynamicPackingPHIMapping<llvm::Value*> {
|
||||
llvm::PHINode *PHI;
|
||||
public:
|
||||
void collect(IRGenFunction &IGF, const TypeInfo &type, llvm::Value *value) {
|
||||
// Add the result to the phi, creating it (unparented) if necessary.
|
||||
if (!PHI) PHI = llvm::PHINode::Create(value->getType(), 2,
|
||||
"dynamic-packing.result");
|
||||
PHI->addIncoming(value, IGF.Builder.GetInsertBlock());
|
||||
}
|
||||
void complete(IRGenFunction &IGF, const TypeInfo &type) {
|
||||
assert(PHI);
|
||||
IGF.Builder.Insert(PHI);
|
||||
}
|
||||
llvm::Value *get(IRGenFunction &IGF, const TypeInfo &type) {
|
||||
assert(PHI);
|
||||
return PHI;
|
||||
}
|
||||
};
|
||||
|
||||
/// An implementation of DynamicPackingPHIMapping for Addresses.
|
||||
template <> class DynamicPackingPHIMapping<Address>
|
||||
: private DynamicPackingPHIMapping<llvm::Value*> {
|
||||
typedef DynamicPackingPHIMapping<llvm::Value*> super;
|
||||
public:
|
||||
void collect(IRGenFunction &IGF, const TypeInfo &type, Address value) {
|
||||
super::collect(IGF, type, value.getAddress());
|
||||
}
|
||||
void complete(IRGenFunction &IGF, const TypeInfo &type) {
|
||||
super::complete(IGF, type);
|
||||
}
|
||||
Address get(IRGenFunction &IGF, const TypeInfo &type) {
|
||||
return type.getAddressForPointer(super::get(IGF, type));
|
||||
}
|
||||
};
|
||||
|
||||
/// An implementation of packing operations based around a lambda.
|
||||
template <class ResultTy, class FnTy>
|
||||
class LambdaDynamicPackingOperation : public DynamicPackingOperation {
|
||||
FnTy Fn;
|
||||
DynamicPackingPHIMapping<ResultTy> Mapping;
|
||||
public:
|
||||
explicit LambdaDynamicPackingOperation(FnTy &&fn) : Fn(fn) {}
|
||||
void emitForPacking(IRGenFunction &IGF, const TypeInfo &type,
|
||||
FixedPacking packing) override {
|
||||
Mapping.collect(IGF, type, Fn(IGF, type, packing));
|
||||
}
|
||||
|
||||
void complete(IRGenFunction &IGF, const TypeInfo &type) override {
|
||||
Mapping.complete(IGF, type);
|
||||
}
|
||||
|
||||
ResultTy get(IRGenFunction &IGF, const TypeInfo &type) {
|
||||
return Mapping.get(IGF, type);
|
||||
}
|
||||
};
|
||||
|
||||
/// A partial specialization for lambda-based packing operations
|
||||
/// that return 'void'.
|
||||
template <class FnTy>
|
||||
class LambdaDynamicPackingOperation<void, FnTy>
|
||||
: public DynamicPackingOperation {
|
||||
FnTy Fn;
|
||||
public:
|
||||
explicit LambdaDynamicPackingOperation(FnTy &&fn) : Fn(fn) {}
|
||||
void emitForPacking(IRGenFunction &IGF, const TypeInfo &type,
|
||||
FixedPacking packing) override {
|
||||
Fn(IGF, type, packing);
|
||||
}
|
||||
void complete(IRGenFunction &IGF, const TypeInfo &type) override {}
|
||||
void get(IRGenFunction &IGF, const TypeInfo &type) {}
|
||||
};
|
||||
}
|
||||
|
||||
/// Dynamic check for the enabling conditions of different kinds of
|
||||
/// packing into a fixed-size buffer, and perform an operation at each
|
||||
/// of them.
|
||||
static void emitDynamicPackingOperation(IRGenFunction &IGF,
|
||||
const TypeInfo &type,
|
||||
DynamicPackingOperation &operation) {
|
||||
llvm::Value *size = type.getSize(IGF);
|
||||
llvm::Value *align = type.getAlignment(IGF);
|
||||
|
||||
auto indirectBB = IGF.createBasicBlock("dynamic-packing.indirect");
|
||||
auto directBB = IGF.createBasicBlock("dynamic-packing.direct");
|
||||
auto contBB = IGF.createBasicBlock("dynamic-packing.cont");
|
||||
|
||||
// Check whether the type is either over-sized or over-aligned.
|
||||
auto bufferSize = IGF.IGM.getSize(getFixedBufferSize(IGF.IGM));
|
||||
auto oversize = IGF.Builder.CreateICmpUGT(size, bufferSize, "oversized");
|
||||
auto bufferAlign = IGF.IGM.getSize(getFixedBufferAlignment(IGF.IGM).asSize());
|
||||
auto overalign = IGF.Builder.CreateICmpUGT(align, bufferAlign, "overaligned");
|
||||
|
||||
// Branch.
|
||||
llvm::Value *cond = IGF.Builder.CreateOr(oversize, overalign, "indirect");
|
||||
IGF.Builder.CreateCondBr(cond, indirectBB, directBB);
|
||||
|
||||
// Emit the indirect path.
|
||||
IGF.Builder.emitBlock(indirectBB);
|
||||
operation.emitForPacking(IGF, type, FixedPacking::Allocate);
|
||||
IGF.Builder.CreateBr(contBB);
|
||||
|
||||
// Emit the direct path.
|
||||
IGF.Builder.emitBlock(directBB);
|
||||
operation.emitForPacking(IGF, type, FixedPacking::OffsetZero);
|
||||
IGF.Builder.CreateBr(contBB);
|
||||
|
||||
// Enter the continuation block and add the PHI if required.
|
||||
IGF.Builder.emitBlock(contBB);
|
||||
operation.complete(IGF, type);
|
||||
}
|
||||
|
||||
/// A helper function for creating a lambda-based DynamicPackingOperation.
|
||||
template <class ResultTy, class FnTy>
|
||||
LambdaDynamicPackingOperation<ResultTy, FnTy>
|
||||
makeLambdaDynamicPackingOperation(FnTy &&fn) {
|
||||
return LambdaDynamicPackingOperation<ResultTy, FnTy>(std::move(fn));
|
||||
}
|
||||
|
||||
/// Perform an operation on a type that requires dynamic packing.
|
||||
template <class ResultTy, class... ArgTys>
|
||||
static ResultTy emitForDynamicPacking(IRGenFunction &IGF,
|
||||
ResultTy (*fn)(IRGenFunction &IGF,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
ArgTys... args),
|
||||
const TypeInfo &type,
|
||||
// using enable_if to block template argument deduction
|
||||
typename std::enable_if<true,ArgTys>::type... args) {
|
||||
auto operation = makeLambdaDynamicPackingOperation<ResultTy>(
|
||||
[&](IRGenFunction &IGF, const TypeInfo &type, FixedPacking packing) {
|
||||
return fn(IGF, type, packing, args...);
|
||||
});
|
||||
emitDynamicPackingOperation(IGF, type, operation);
|
||||
return operation.get(IGF, type);
|
||||
}
|
||||
|
||||
/// Emit a 'projectBuffer' operation. Always returns a T*.
|
||||
static Address emitProjectBuffer(IRGenFunction &IGF,
|
||||
Address buffer,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address buffer) {
|
||||
llvm::PointerType *resultTy = type.getStorageType()->getPointerTo();
|
||||
switch (packing) {
|
||||
case FixedPacking::Allocate: {
|
||||
@@ -1213,6 +1378,9 @@ static Address emitProjectBuffer(IRGenFunction &IGF,
|
||||
return IGF.Builder.CreateBitCast(buffer, resultTy, "object");
|
||||
}
|
||||
|
||||
case FixedPacking::Dynamic:
|
||||
return emitForDynamicPacking(IGF, &emitProjectBuffer, type, buffer);
|
||||
|
||||
}
|
||||
llvm_unreachable("bad packing!");
|
||||
|
||||
@@ -1220,9 +1388,9 @@ static Address emitProjectBuffer(IRGenFunction &IGF,
|
||||
|
||||
/// Emit an 'allocateBuffer' operation. Always returns a T*.
|
||||
static Address emitAllocateBuffer(IRGenFunction &IGF,
|
||||
Address buffer,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address buffer) {
|
||||
switch (packing) {
|
||||
case FixedPacking::Allocate: {
|
||||
auto sizeAndAlign = type.getSizeAndAlignment(IGF);
|
||||
@@ -1237,15 +1405,18 @@ static Address emitAllocateBuffer(IRGenFunction &IGF,
|
||||
}
|
||||
|
||||
case FixedPacking::OffsetZero:
|
||||
return emitProjectBuffer(IGF, buffer, packing, type);
|
||||
return emitProjectBuffer(IGF, type, packing, buffer);
|
||||
|
||||
case FixedPacking::Dynamic:
|
||||
return emitForDynamicPacking(IGF, &emitAllocateBuffer, type, buffer);
|
||||
}
|
||||
llvm_unreachable("bad packing!");
|
||||
}
|
||||
|
||||
/// Emit an 'assignWithCopy' operation.
|
||||
static void emitAssignWithCopy(IRGenFunction &IGF,
|
||||
Address src, Address dest,
|
||||
const TypeInfo &type) {
|
||||
const TypeInfo &type,
|
||||
Address src, Address dest) {
|
||||
Explosion value(ExplosionKind::Maximal);
|
||||
type.load(IGF, src, value);
|
||||
type.assign(IGF, value, dest);
|
||||
@@ -1253,8 +1424,8 @@ static void emitAssignWithCopy(IRGenFunction &IGF,
|
||||
|
||||
/// Emit an 'assignWithTake' operation.
|
||||
static void emitAssignWithTake(IRGenFunction &IGF,
|
||||
Address src, Address dest,
|
||||
const TypeInfo &type) {
|
||||
const TypeInfo &type,
|
||||
Address src, Address dest) {
|
||||
Explosion value(ExplosionKind::Maximal);
|
||||
type.loadAsTake(IGF, src, value);
|
||||
type.assign(IGF, value, dest);
|
||||
@@ -1262,9 +1433,9 @@ static void emitAssignWithTake(IRGenFunction &IGF,
|
||||
|
||||
/// Emit a 'deallocateBuffer' operation.
|
||||
static void emitDeallocateBuffer(IRGenFunction &IGF,
|
||||
Address buffer,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address buffer) {
|
||||
switch (packing) {
|
||||
case FixedPacking::Allocate: {
|
||||
Address slot =
|
||||
@@ -1276,6 +1447,9 @@ static void emitDeallocateBuffer(IRGenFunction &IGF,
|
||||
|
||||
case FixedPacking::OffsetZero:
|
||||
return;
|
||||
|
||||
case FixedPacking::Dynamic:
|
||||
return emitForDynamicPacking(IGF, &emitDeallocateBuffer, type, buffer);
|
||||
}
|
||||
llvm_unreachable("bad packing!");
|
||||
}
|
||||
@@ -1294,77 +1468,86 @@ namespace {
|
||||
: Buffer(buffer), Packing(packing), ConcreteTI(concreteTI) {}
|
||||
|
||||
void emit(IRGenFunction &IGF) const {
|
||||
emitDeallocateBuffer(IGF, Buffer, Packing, ConcreteTI);
|
||||
emitDeallocateBuffer(IGF, ConcreteTI, Packing, Buffer);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Emit a 'destroyObject' operation.
|
||||
static void emitDestroyObject(IRGenFunction &IGF,
|
||||
Address object,
|
||||
const TypeInfo &type) {
|
||||
const TypeInfo &type,
|
||||
Address object) {
|
||||
if (!type.isPOD(ResilienceScope::Local))
|
||||
type.destroy(IGF, object);
|
||||
}
|
||||
|
||||
/// Emit a 'destroyBuffer' operation.
|
||||
static void emitDestroyBuffer(IRGenFunction &IGF,
|
||||
Address buffer,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address object = emitProjectBuffer(IGF, buffer, packing, type);
|
||||
emitDestroyObject(IGF, object, type);
|
||||
emitDeallocateBuffer(IGF, buffer, packing, type);
|
||||
Address buffer) {
|
||||
// Special-case dynamic packing in order to thread the jumps.
|
||||
if (packing == FixedPacking::Dynamic)
|
||||
return emitForDynamicPacking(IGF, &emitDestroyBuffer, type, buffer);
|
||||
|
||||
Address object = emitProjectBuffer(IGF, type, packing, buffer);
|
||||
emitDestroyObject(IGF, type, object);
|
||||
emitDeallocateBuffer(IGF, type, packing, buffer);
|
||||
}
|
||||
|
||||
/// Emit an 'initializeWithCopy' operation.
|
||||
static void emitInitializeWithCopy(IRGenFunction &IGF,
|
||||
Address dest, Address src,
|
||||
const TypeInfo &type) {
|
||||
const TypeInfo &type,
|
||||
Address dest, Address src) {
|
||||
type.initializeWithCopy(IGF, dest, src);
|
||||
}
|
||||
|
||||
/// Emit an 'initializeWithTake' operation.
|
||||
static void emitInitializeWithTake(IRGenFunction &IGF,
|
||||
Address dest, Address src,
|
||||
const TypeInfo &type) {
|
||||
const TypeInfo &type,
|
||||
Address dest, Address src) {
|
||||
type.initializeWithTake(IGF, dest, src);
|
||||
}
|
||||
|
||||
/// Emit an 'initializeBufferWithCopyOfBuffer' operation.
|
||||
/// Returns the address of the destination object.
|
||||
static Address emitInitializeBufferWithCopyOfBuffer(IRGenFunction &IGF,
|
||||
Address dest,
|
||||
Address src,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address destObject = emitAllocateBuffer(IGF, dest, packing, type);
|
||||
Address srcObject = emitProjectBuffer(IGF, src, packing, type);
|
||||
emitInitializeWithCopy(IGF, destObject, srcObject, type);
|
||||
Address dest,
|
||||
Address src) {
|
||||
// Special-case dynamic packing in order to thread the jumps.
|
||||
if (packing == FixedPacking::Dynamic)
|
||||
return emitForDynamicPacking(IGF, &emitInitializeBufferWithCopyOfBuffer,
|
||||
type, dest, src);
|
||||
|
||||
Address destObject = emitAllocateBuffer(IGF, type, packing, dest);
|
||||
Address srcObject = emitProjectBuffer(IGF, type, packing, src);
|
||||
emitInitializeWithCopy(IGF, type, destObject, srcObject);
|
||||
return destObject;
|
||||
}
|
||||
|
||||
/// Emit an 'initializeBufferWithCopy' operation.
|
||||
/// Returns the address of the destination object.
|
||||
static Address emitInitializeBufferWithCopy(IRGenFunction &IGF,
|
||||
Address dest,
|
||||
Address srcObject,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address destObject = emitAllocateBuffer(IGF, dest, packing, type);
|
||||
emitInitializeWithCopy(IGF, destObject, srcObject, type);
|
||||
Address dest,
|
||||
Address srcObject) {
|
||||
Address destObject = emitAllocateBuffer(IGF, type, packing, dest);
|
||||
emitInitializeWithCopy(IGF, type, destObject, srcObject);
|
||||
return destObject;
|
||||
}
|
||||
|
||||
/// Emit an 'initializeBufferWithTake' operation.
|
||||
/// Returns the address of the destination object.
|
||||
static Address emitInitializeBufferWithTake(IRGenFunction &IGF,
|
||||
Address dest,
|
||||
Address srcObject,
|
||||
const TypeInfo &type,
|
||||
FixedPacking packing,
|
||||
const TypeInfo &type) {
|
||||
Address destObject = emitAllocateBuffer(IGF, dest, packing, type);
|
||||
emitInitializeWithTake(IGF, destObject, srcObject, type);
|
||||
Address dest,
|
||||
Address srcObject) {
|
||||
Address destObject = emitAllocateBuffer(IGF, type, packing, dest);
|
||||
emitInitializeWithTake(IGF, type, destObject, srcObject);
|
||||
return destObject;
|
||||
}
|
||||
|
||||
@@ -1409,7 +1592,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
switch (index) {
|
||||
case ValueWitness::AllocateBuffer: {
|
||||
Address buffer = getArgAsBuffer(IGF, argv, "buffer");
|
||||
Address result = emitAllocateBuffer(IGF, buffer, packing, type);
|
||||
Address result = emitAllocateBuffer(IGF, type, packing, buffer);
|
||||
result = IGF.Builder.CreateBitCast(result, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(result.getAddress());
|
||||
return;
|
||||
@@ -1418,7 +1601,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
case ValueWitness::AssignWithCopy: {
|
||||
Address dest = getArgAs(IGF, argv, type, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
emitAssignWithCopy(IGF, src, dest, type);
|
||||
emitAssignWithCopy(IGF, type, src, dest);
|
||||
dest = IGF.Builder.CreateBitCast(dest, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(dest.getAddress());
|
||||
return;
|
||||
@@ -1427,7 +1610,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
case ValueWitness::AssignWithTake: {
|
||||
Address dest = getArgAs(IGF, argv, type, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
emitAssignWithTake(IGF, src, dest, type);
|
||||
emitAssignWithTake(IGF, type, src, dest);
|
||||
dest = IGF.Builder.CreateBitCast(dest, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(dest.getAddress());
|
||||
return;
|
||||
@@ -1435,21 +1618,21 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
|
||||
case ValueWitness::DeallocateBuffer: {
|
||||
Address buffer = getArgAsBuffer(IGF, argv, "buffer");
|
||||
emitDeallocateBuffer(IGF, buffer, packing, type);
|
||||
emitDeallocateBuffer(IGF, type, packing, buffer);
|
||||
IGF.Builder.CreateRetVoid();
|
||||
return;
|
||||
}
|
||||
|
||||
case ValueWitness::Destroy: {
|
||||
Address object = getArgAs(IGF, argv, type, "object");
|
||||
emitDestroyObject(IGF, object, type);
|
||||
emitDestroyObject(IGF, type, object);
|
||||
IGF.Builder.CreateRetVoid();
|
||||
return;
|
||||
}
|
||||
|
||||
case ValueWitness::DestroyBuffer: {
|
||||
Address buffer = getArgAsBuffer(IGF, argv, "buffer");
|
||||
emitDestroyBuffer(IGF, buffer, packing, type);
|
||||
emitDestroyBuffer(IGF, type, packing, buffer);
|
||||
IGF.Builder.CreateRetVoid();
|
||||
return;
|
||||
}
|
||||
@@ -1458,7 +1641,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
Address dest = getArgAsBuffer(IGF, argv, "dest");
|
||||
Address src = getArgAsBuffer(IGF, argv, "src");
|
||||
Address result =
|
||||
emitInitializeBufferWithCopyOfBuffer(IGF, dest, src, packing, type);
|
||||
emitInitializeBufferWithCopyOfBuffer(IGF, type, packing, dest, src);
|
||||
result = IGF.Builder.CreateBitCast(result, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(result.getAddress());
|
||||
return;
|
||||
@@ -1468,7 +1651,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
Address dest = getArgAsBuffer(IGF, argv, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
Address result =
|
||||
emitInitializeBufferWithCopy(IGF, dest, src, packing, type);
|
||||
emitInitializeBufferWithCopy(IGF, type, packing, dest, src);
|
||||
result = IGF.Builder.CreateBitCast(result, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(result.getAddress());
|
||||
return;
|
||||
@@ -1478,7 +1661,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
Address dest = getArgAsBuffer(IGF, argv, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
Address result =
|
||||
emitInitializeBufferWithTake(IGF, dest, src, packing, type);
|
||||
emitInitializeBufferWithTake(IGF, type, packing, dest, src);
|
||||
result = IGF.Builder.CreateBitCast(result, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(result.getAddress());
|
||||
return;
|
||||
@@ -1487,7 +1670,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
case ValueWitness::InitializeWithCopy: {
|
||||
Address dest = getArgAs(IGF, argv, type, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
emitInitializeWithCopy(IGF, dest, src, type);
|
||||
emitInitializeWithCopy(IGF, type, dest, src);
|
||||
dest = IGF.Builder.CreateBitCast(dest, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(dest.getAddress());
|
||||
return;
|
||||
@@ -1496,7 +1679,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
case ValueWitness::InitializeWithTake: {
|
||||
Address dest = getArgAs(IGF, argv, type, "dest");
|
||||
Address src = getArgAs(IGF, argv, type, "src");
|
||||
emitInitializeWithTake(IGF, dest, src, type);
|
||||
emitInitializeWithTake(IGF, type, dest, src);
|
||||
dest = IGF.Builder.CreateBitCast(dest, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(dest.getAddress());
|
||||
return;
|
||||
@@ -1504,7 +1687,7 @@ static void buildValueWitnessFunction(IRGenModule &IGM,
|
||||
|
||||
case ValueWitness::ProjectBuffer: {
|
||||
Address buffer = getArgAsBuffer(IGF, argv, "buffer");
|
||||
Address result = emitProjectBuffer(IGF, buffer, packing, type);
|
||||
Address result = emitProjectBuffer(IGF, type, packing, buffer);
|
||||
result = IGF.Builder.CreateBitCast(result, IGF.IGM.OpaquePtrTy);
|
||||
IGF.Builder.CreateRet(result.getAddress());
|
||||
return;
|
||||
@@ -1799,11 +1982,18 @@ static llvm::Constant *getDestroyStrongFunction(IRGenModule &IGM) {
|
||||
/// Return a function which takes three pointer arguments, memcpys
|
||||
/// from the second to the first, and returns the first argument.
|
||||
static llvm::Constant *getMemCpyFunction(IRGenModule &IGM,
|
||||
const TypeInfo &type) {
|
||||
llvm::Type *argTys[] = { IGM.Int8PtrTy, IGM.Int8PtrTy, IGM.WitnessTablePtrTy };
|
||||
const TypeInfo &objectTI) {
|
||||
llvm::Type *argTys[] = { IGM.Int8PtrTy, IGM.Int8PtrTy, IGM.TypeMetadataPtrTy };
|
||||
llvm::FunctionType *fnTy =
|
||||
llvm::FunctionType::get(IGM.Int8PtrTy, argTys, false);
|
||||
|
||||
// If we don't have a fixed type, use the standard copy-opaque-POD
|
||||
// routine. It's not quite clear how in practice we'll be able to
|
||||
// conclude that something is known-POD without knowing its size,
|
||||
// but it's (1) conceivable and (2) needed as a general export anyway.
|
||||
auto *fixedTI = dyn_cast<FixedTypeInfo>(&objectTI);
|
||||
if (!fixedTI) return IGM.getCopyPODFn();
|
||||
|
||||
// We need to unique by both size and alignment. Note that we're
|
||||
// assuming that it's safe to call a function that returns a pointer
|
||||
// at a site that assumes the function returns void.
|
||||
@@ -1811,9 +2001,9 @@ static llvm::Constant *getMemCpyFunction(IRGenModule &IGM,
|
||||
{
|
||||
llvm::raw_svector_ostream nameStream(name);
|
||||
nameStream << "__swift_memcpy";
|
||||
nameStream << type.getFixedSize().getValue();
|
||||
nameStream << fixedTI->getFixedSize().getValue();
|
||||
nameStream << '_';
|
||||
nameStream << type.getFixedAlignment().getValue();
|
||||
nameStream << fixedTI->getFixedAlignment().getValue();
|
||||
}
|
||||
|
||||
llvm::Constant *fn = IGM.Module.getOrInsertFunction(name, fnTy);
|
||||
@@ -1821,9 +2011,9 @@ static llvm::Constant *getMemCpyFunction(IRGenModule &IGM,
|
||||
IRGenFunction IGF(IGM, CanType(), ArrayRef<Pattern*>(),
|
||||
ExplosionKind::Minimal, 0, def, Prologue::Bare);
|
||||
auto it = def->arg_begin();
|
||||
Address dest(it++, type.getFixedAlignment());
|
||||
Address src(it++, type.getFixedAlignment());
|
||||
IGF.emitMemCpy(dest, src, type.getFixedSize());
|
||||
Address dest(it++, fixedTI->getFixedAlignment());
|
||||
Address src(it++, fixedTI->getFixedAlignment());
|
||||
IGF.emitMemCpy(dest, src, fixedTI->getFixedSize());
|
||||
IGF.Builder.CreateRet(dest.getAddress());
|
||||
}
|
||||
return fn;
|
||||
@@ -2449,7 +2639,7 @@ static llvm::Constant *buildWitnessTable(IRGenModule &IGM,
|
||||
initializer, "witness_table");
|
||||
|
||||
// Abstract away the length.
|
||||
llvm::ConstantInt *zero = llvm::ConstantInt::get(IGM.SizeTy, 0);
|
||||
llvm::Constant *zero = IGM.getSize(Size(0));
|
||||
llvm::Constant *indices[] = { zero, zero };
|
||||
return llvm::ConstantExpr::getInBoundsGetElementPtr(var, indices);
|
||||
}
|
||||
@@ -3401,7 +3591,7 @@ Address irgen::emitExistentialContainerInit(IRGenFunction &IGF,
|
||||
Address buffer = destLayout.projectExistentialBuffer(IGF, dest);
|
||||
|
||||
// If the type is provably empty, we're done.
|
||||
if (srcTI.isEmpty(ResilienceScope::Local)) {
|
||||
if (srcTI.isKnownEmpty()) {
|
||||
assert(packing == FixedPacking::OffsetZero);
|
||||
return buffer;
|
||||
}
|
||||
@@ -3415,7 +3605,7 @@ Address irgen::emitExistentialContainerInit(IRGenFunction &IGF,
|
||||
Alignment(1));
|
||||
} else {
|
||||
// Otherwise, allocate using what we know statically about the type.
|
||||
return emitAllocateBuffer(IGF, buffer, packing, srcTI);
|
||||
return emitAllocateBuffer(IGF, srcTI, packing, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -95,8 +95,8 @@ public:
|
||||
};
|
||||
|
||||
/// A metaprogrammed TypeInfo implementation for sequential types.
|
||||
template <class Impl, class FieldImpl_>
|
||||
class SequentialTypeInfo : public FixedTypeInfo { // FIXME: not true!
|
||||
template <class Impl, class Base, class FieldImpl_>
|
||||
class SequentialTypeInfo : public Base {
|
||||
public:
|
||||
typedef FieldImpl_ FieldImpl;
|
||||
|
||||
@@ -116,8 +116,8 @@ private:
|
||||
|
||||
protected:
|
||||
SequentialTypeInfo(llvm::Type *ty, unsigned numFields)
|
||||
: FixedTypeInfo(ty, Size(0), Alignment(0), IsPOD), NumFields(numFields) {
|
||||
assert(!isComplete());
|
||||
: Base(ty, Size(0), Alignment(0), IsPOD), NumFields(numFields) {
|
||||
assert(!this->isComplete());
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -208,8 +208,10 @@ public:
|
||||
void initializeWithCopy(IRGenFunction &IGF, Address dest,
|
||||
Address src) const {
|
||||
// If we're POD, use the generic routine.
|
||||
if (isPOD(ResilienceScope::Local))
|
||||
return FixedTypeInfo::initializeWithCopy(IGF, dest, src);
|
||||
if (this->isPOD(ResilienceScope::Local) && Base::isFixedSize()) {
|
||||
return cast<FixedTypeInfo>(this)->
|
||||
FixedTypeInfo::initializeWithCopy(IGF, dest, src);
|
||||
}
|
||||
|
||||
for (auto &field : getFields()) {
|
||||
if (field.isEmpty()) continue;
|
||||
@@ -343,7 +345,7 @@ public:
|
||||
minimalExplosionSize += fieldTI.getExplosionSize(ExplosionKind::Minimal);
|
||||
fieldInfo.MinimalEnd = minimalExplosionSize;
|
||||
|
||||
bool isEmpty = fieldTI.isEmpty(ResilienceScope::Local);
|
||||
bool isEmpty = fieldTI.isKnownEmpty();
|
||||
fieldInfo.IsEmpty = isEmpty;
|
||||
}
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@ namespace {
|
||||
};
|
||||
|
||||
/// Layout information for struct types.
|
||||
class StructTypeInfo :
|
||||
public SequentialTypeInfo<StructTypeInfo, StructFieldInfo> {
|
||||
class StructTypeInfo : // FIXME: FixedTypeInfo as the base class is a lie.
|
||||
public SequentialTypeInfo<StructTypeInfo, FixedTypeInfo, StructFieldInfo> {
|
||||
public:
|
||||
StructTypeInfo(llvm::Type *T, unsigned numFields)
|
||||
: SequentialTypeInfo(T, numFields) {
|
||||
|
||||
@@ -59,8 +59,8 @@ namespace {
|
||||
};
|
||||
|
||||
/// Layout information for tuple types.
|
||||
class TupleTypeInfo :
|
||||
public SequentialTypeInfo<TupleTypeInfo, TupleFieldInfo> {
|
||||
class TupleTypeInfo : // FIXME: FixedTypeInfo as base is a lie
|
||||
public SequentialTypeInfo<TupleTypeInfo, FixedTypeInfo, TupleFieldInfo> {
|
||||
public:
|
||||
TupleTypeInfo(llvm::Type *T, unsigned numFields)
|
||||
: SequentialTypeInfo(T, numFields) {
|
||||
|
||||
@@ -49,6 +49,13 @@ Address TypeInfo::getAddressForPointer(llvm::Value *ptr) const {
|
||||
return Address(ptr, StorageAlignment);
|
||||
}
|
||||
|
||||
/// Whether this type is known to be empty.
|
||||
bool TypeInfo::isKnownEmpty() const {
|
||||
if (auto fixed = dyn_cast<FixedTypeInfo>(this))
|
||||
return fixed->isKnownEmpty();
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Copy a value from one object to a new object, directly taking
|
||||
/// responsibility for anything it might have. This is like C++
|
||||
/// move-initialization, except the old object will not be destroyed.
|
||||
|
||||
@@ -155,6 +155,7 @@ public:
|
||||
bool isZero() const { return Value == 0; }
|
||||
|
||||
Alignment alignmentAtOffset(Size S) const;
|
||||
Size asSize() const;
|
||||
|
||||
explicit operator bool() const { return Value != 0; }
|
||||
|
||||
@@ -246,6 +247,11 @@ inline Alignment Alignment::alignmentAtOffset(Size S) const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// Get this alignment asx a Size value.
|
||||
inline Size Alignment::asSize() const {
|
||||
return Size(getValue());
|
||||
}
|
||||
|
||||
} // end namespace irgen
|
||||
} // end namespace swift
|
||||
|
||||
|
||||
@@ -286,6 +286,17 @@ llvm::Constant *IRGenModule::getSlowRawDeallocFn() {
|
||||
return SlowRawDeallocFn;
|
||||
}
|
||||
|
||||
llvm::Constant *IRGenModule::getCopyPODFn() {
|
||||
if (CopyPODFn) return CopyPODFn;
|
||||
|
||||
/// void *swift_copyPOD(void *dest, void *src, Metadata *self);
|
||||
llvm::Type *types[] = { OpaquePtrTy, OpaquePtrTy, };
|
||||
llvm::FunctionType *fnType =
|
||||
llvm::FunctionType::get(OpaquePtrTy, types, false);
|
||||
CopyPODFn = createRuntimeFunction(*this, "swift_copyPOD", fnType);
|
||||
return CopyPODFn;
|
||||
}
|
||||
|
||||
llvm::Constant *IRGenModule::getDynamicCastClassFn() {
|
||||
if (DynamicCastClassFn) return DynamicCastClassFn;
|
||||
|
||||
@@ -498,6 +509,10 @@ llvm::Constant *IRGenModule::getObjCEmptyVTablePtr() {
|
||||
return ObjCEmptyVTablePtr;
|
||||
}
|
||||
|
||||
llvm::Constant *IRGenModule::getSize(Size size) {
|
||||
return llvm::ConstantInt::get(SizeTy, size.getValue());
|
||||
}
|
||||
|
||||
void IRGenModule::unimplemented(SourceLoc loc, StringRef message) {
|
||||
Context.Diags.diagnose(loc, diag::irgen_unimplemented, message);
|
||||
}
|
||||
|
||||
@@ -230,6 +230,8 @@ public:
|
||||
llvm::Constant *getSlowAllocFn();
|
||||
llvm::Constant *getSlowRawDeallocFn();
|
||||
|
||||
llvm::Constant *getCopyPODFn();
|
||||
|
||||
llvm::Constant *getDynamicCastClassFn();
|
||||
llvm::Constant *getDynamicCastClassUnconditionalFn();
|
||||
llvm::Constant *getDynamicCastFn();
|
||||
@@ -274,6 +276,7 @@ private:
|
||||
llvm::Constant *DynamicCastClassUnconditionalFn = nullptr;
|
||||
llvm::Constant *DynamicCastFn = nullptr;
|
||||
llvm::Constant *DynamicCastUnconditionalFn = nullptr;
|
||||
llvm::Constant *CopyPODFn = nullptr;
|
||||
llvm::Constant *GetFunctionMetadataFn = nullptr;
|
||||
llvm::Constant *GetGenericMetadataFn = nullptr;
|
||||
llvm::Constant *GetMetatypeMetadataFn = nullptr;
|
||||
@@ -320,6 +323,8 @@ public:
|
||||
ExtraData data,
|
||||
llvm::AttributeSet &attrs);
|
||||
|
||||
llvm::Constant *getSize(Size size);
|
||||
|
||||
FormalType getTypeOfGetter(ValueDecl *D);
|
||||
FormalType getTypeOfSetter(ValueDecl *D);
|
||||
|
||||
|
||||
@@ -470,8 +470,8 @@ void IRGenSILFunction::visitConstantRefInst(swift::ConstantRefInst *i) {
|
||||
Address addr;
|
||||
|
||||
// If the variable is empty, don't actually emit it; just return undef.
|
||||
// FIXME: fragility? global destructors?
|
||||
if (type.isEmpty(ResilienceScope::Local)) {
|
||||
// FIXME: global destructors?
|
||||
if (type.isKnownEmpty()) {
|
||||
auto undef = llvm::UndefValue::get(type.StorageType->getPointerTo());
|
||||
addr = Address(undef, Alignment(1));
|
||||
} else {
|
||||
@@ -1214,9 +1214,9 @@ void IRGenSILFunction::visitAllocArrayInst(swift::AllocArrayInst *i) {
|
||||
|
||||
Explosion lengthEx = getLoweredExplosion(i->getNumElements());
|
||||
llvm::Value *lengthValue = lengthEx.claimUnmanagedNext();
|
||||
ArrayHeapLayout layout(*this, i->getElementType()->getCanonicalType());
|
||||
HeapArrayInfo arrayInfo(*this, i->getElementType()->getCanonicalType());
|
||||
Address ptr;
|
||||
llvm::Value *box = layout.emitUnmanagedAlloc(*this,
|
||||
llvm::Value *box = arrayInfo.emitUnmanagedAlloc(*this,
|
||||
lengthValue,
|
||||
ptr,
|
||||
nullptr,
|
||||
|
||||
@@ -54,10 +54,6 @@ public:
|
||||
|
||||
unsigned getExplosionSize(ExplosionKind kind) const { return 1; }
|
||||
|
||||
void initializeWithTake(IRGenFunction &IGF, Address dest, Address src) const {
|
||||
IGF.emitMemCpy(dest, src, this->Base::getFixedSize());
|
||||
}
|
||||
|
||||
void load(IRGenFunction &IGF, Address src, Explosion &out) const {
|
||||
// Create a temporary.
|
||||
Initialization init;
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/DerivedTypes.h"
|
||||
|
||||
#include "FixedTypeInfo.h"
|
||||
#include "IRGenFunction.h"
|
||||
#include "IRGenModule.h"
|
||||
#include "StructLayout.h"
|
||||
@@ -136,7 +137,7 @@ bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,
|
||||
auto &eltTI = *elt.Type;
|
||||
|
||||
// If the element type is empty, it adds nothing.
|
||||
if (eltTI.isEmpty(ResilienceScope::Local)) {
|
||||
if (eltTI.isKnownEmpty()) {
|
||||
elt.StructIndex = ElementLayout::NoStructIndex;
|
||||
elt.ByteOffset = Size(-1);
|
||||
continue;
|
||||
@@ -146,6 +147,7 @@ bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,
|
||||
addedStorage = true;
|
||||
|
||||
// FIXME: handle resilient/dependently-sized types
|
||||
auto &fixedEltTI = cast<FixedTypeInfo>(eltTI);
|
||||
|
||||
// TODO: consider using different layout rules.
|
||||
// If the rules are changed so that fields aren't necessarily laid
|
||||
@@ -153,21 +155,22 @@ bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,
|
||||
// RO-data will need to be fixed.
|
||||
|
||||
// The struct alignment is the max of the alignment of the fields.
|
||||
CurAlignment = std::max(CurAlignment, eltTI.getFixedAlignment());
|
||||
CurAlignment = std::max(CurAlignment, fixedEltTI.getFixedAlignment());
|
||||
|
||||
// If the current tuple size isn't a multiple of the field's
|
||||
// required alignment, we need to pad out.
|
||||
if (Size offsetFromAlignment = CurSize % eltTI.getFixedAlignment()) {
|
||||
Alignment eltAlignment = fixedEltTI.getFixedAlignment();
|
||||
if (Size offsetFromAlignment = CurSize % eltAlignment) {
|
||||
unsigned paddingRequired
|
||||
= eltTI.getFixedAlignment().getValue() - offsetFromAlignment.getValue();
|
||||
= eltAlignment.getValue() - offsetFromAlignment.getValue();
|
||||
assert(paddingRequired != 0);
|
||||
|
||||
// We don't actually need to uglify the IR unless the natural
|
||||
// alignment of the IR type for the field isn't good enough.
|
||||
Alignment fieldIRAlignment(
|
||||
IGM.DataLayout.getABITypeAlignment(eltTI.StorageType));
|
||||
assert(fieldIRAlignment <= eltTI.getFixedAlignment());
|
||||
if (fieldIRAlignment != eltTI.getFixedAlignment()) {
|
||||
assert(fieldIRAlignment <= eltAlignment);
|
||||
if (fieldIRAlignment != eltAlignment) {
|
||||
auto paddingTy = llvm::ArrayType::get(IGM.Int8Ty, paddingRequired);
|
||||
StructFields.push_back(paddingTy);
|
||||
}
|
||||
@@ -181,7 +184,7 @@ bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,
|
||||
elt.StructIndex = StructFields.size();
|
||||
|
||||
StructFields.push_back(eltTI.getStorageType());
|
||||
CurSize += eltTI.getFixedSize();
|
||||
CurSize += fixedEltTI.getFixedSize();
|
||||
}
|
||||
|
||||
return addedStorage;
|
||||
|
||||
@@ -49,6 +49,14 @@ inline IsPOD_t &operator&=(IsPOD_t &l, IsPOD_t r) {
|
||||
return (l = (l & r));
|
||||
}
|
||||
|
||||
enum IsFixedSize_t : bool { IsNotFixedSize, IsFixedSize };
|
||||
inline IsFixedSize_t operator&(IsFixedSize_t l, IsFixedSize_t r) {
|
||||
return IsFixedSize_t(unsigned(l) & unsigned(r));
|
||||
}
|
||||
inline IsFixedSize_t &operator&=(IsFixedSize_t &l, IsFixedSize_t r) {
|
||||
return (l = (l & r));
|
||||
}
|
||||
|
||||
/// Information about the IR representation and generation of the
|
||||
/// given type.
|
||||
class TypeInfo {
|
||||
@@ -59,9 +67,14 @@ class TypeInfo {
|
||||
mutable const TypeInfo *NextConverted;
|
||||
|
||||
protected:
|
||||
TypeInfo(llvm::Type *Type, Size S, Alignment A, IsPOD_t pod)
|
||||
: NextConverted(0), StorageType(Type), StorageSize(S),
|
||||
StorageAlignment(A), POD(pod) {}
|
||||
TypeInfo(llvm::Type *Type, Alignment A, IsPOD_t pod, IsFixedSize_t fixed)
|
||||
: NextConverted(0), StorageType(Type), StorageAlignment(A),
|
||||
POD(pod), Fixed(fixed) {}
|
||||
|
||||
/// Change the minimum alignment of a stored value of this type.
|
||||
void setStorageAlignment(Alignment alignment) {
|
||||
StorageAlignment = alignment;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual ~TypeInfo() = default;
|
||||
@@ -77,11 +90,6 @@ public:
|
||||
llvm::Type *StorageType;
|
||||
|
||||
private:
|
||||
/// The storage size of this type in bytes. This may be zero even
|
||||
/// for well-formed and complete types, such as a trivial oneof or
|
||||
/// tuple.
|
||||
Size StorageSize;
|
||||
|
||||
/// The storage alignment of this type in bytes. This is never zero
|
||||
/// for a completely-converted type.
|
||||
Alignment StorageAlignment;
|
||||
@@ -89,12 +97,10 @@ private:
|
||||
/// Whether this type is known to be POD.
|
||||
unsigned POD : 1;
|
||||
|
||||
public:
|
||||
void completeFixed(Size size, Alignment alignment) {
|
||||
StorageSize = size;
|
||||
StorageAlignment = alignment;
|
||||
}
|
||||
/// Whether this type is known to be fixed in size.
|
||||
unsigned Fixed : 1;
|
||||
|
||||
public:
|
||||
/// Sets whether this type is POD. Should only be called during
|
||||
/// completion of a forward-declaration.
|
||||
void setPOD(IsPOD_t isPOD) { POD = unsigned(isPOD); }
|
||||
@@ -102,36 +108,26 @@ public:
|
||||
/// Whether this type info has been completely converted.
|
||||
bool isComplete() const { return !StorageAlignment.isZero(); }
|
||||
|
||||
/// Whether this type is known to be empty within the given
|
||||
/// resilience scope.
|
||||
bool isEmpty(ResilienceScope Scope) const { return StorageSize.isZero(); }
|
||||
/// Whether this type is known to be empty.
|
||||
bool isKnownEmpty() const;
|
||||
|
||||
/// Whether this type is known to be POD, i.e. to not require any
|
||||
/// particular action on copy or destroy.
|
||||
IsPOD_t isPOD(ResilienceScope scope) const { return IsPOD_t(POD); }
|
||||
|
||||
/// Whether this type is known to be fixed-size in the local
|
||||
/// resilience domain. If true, this TypeInfo can be cast to
|
||||
/// FixedTypeInfo.
|
||||
IsFixedSize_t isFixedSize() const {
|
||||
return IsFixedSize_t(Fixed);
|
||||
}
|
||||
|
||||
llvm::Type *getStorageType() const { return StorageType; }
|
||||
|
||||
Size getFixedSize() const {
|
||||
return StorageSize;
|
||||
}
|
||||
|
||||
Alignment getFixedAlignment() const {
|
||||
return StorageAlignment;
|
||||
}
|
||||
|
||||
Alignment getBestKnownAlignment() const {
|
||||
return StorageAlignment;
|
||||
}
|
||||
|
||||
/// Returns the (assumed fixed) stride of the storage for this
|
||||
/// object. The stride is the storage size rounded up to the
|
||||
/// alignment; its practical use is that, in an array, it is the
|
||||
/// offset from the size of one element to the offset of the next.
|
||||
Size getFixedStride() const {
|
||||
return StorageSize.roundUpToAlignment(StorageAlignment);
|
||||
}
|
||||
|
||||
/// Given a generic pointer to this type, produce an Address for it.
|
||||
Address getAddressForPointer(llvm::Value *ptr) const;
|
||||
|
||||
|
||||
105
test/IRGen/heaparrays.swift
Normal file
105
test/IRGen/heaparrays.swift
Normal file
@@ -0,0 +1,105 @@
|
||||
// RUN: %swift -triple x86_64-apple-darwin10 %s -emit-llvm | FileCheck %s
|
||||
|
||||
// CHECK: [[REFCOUNT:%.*]] = type { [[TYPE:%swift.type]]*, i64 }
|
||||
// CHECK: [[INT:%Si]] = type { i64 }
|
||||
// CHECK: [[OPAQUE:%swift.opaque]] = type opaque
|
||||
|
||||
func make_array<T>(n : Int) -> T[] {
|
||||
return new T[n]
|
||||
}
|
||||
|
||||
// CHECK: define { i8*, i64, [[REFCOUNT]]* } @_T10heaparrays10make_arrayU__FT1nSi_GVSs5SliceQ__(
|
||||
|
||||
// Pull out the value witness tables for T.
|
||||
// CHECK: [[T0:%.*]] = bitcast [[TYPE]]* %T to i8***
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8*** [[T0]], i64 -1
|
||||
// CHECK-NEXT: %T.value = load i8*** [[T1]], align 8
|
||||
|
||||
// CHECK: [[BOUND:%.*]] = call i64 @_TSi18getArrayBoundValuefRSiFT_Bi64_([[INT]]*
|
||||
|
||||
// Grab alignof(T) and use it to compute the alignment.
|
||||
// CHECK: [[T0:%.*]] = getelementptr inbounds i8** %T.value, i32 13
|
||||
// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]], align 8
|
||||
// CHECK-NEXT: [[T_ALIGN:%.*]] = ptrtoint i8* [[T1]] to i64
|
||||
// CHECK-NEXT: [[T0:%.*]] = sub i64 [[T_ALIGN]], 1
|
||||
// CHECK-NEXT: [[T1:%.*]] = add i64 32, [[T0]]
|
||||
// CHECK-NEXT: [[T2:%.*]] = xor i64 [[T0]], -1
|
||||
// CHECK-NEXT: [[HEADER_SIZE:%.*]] = and i64 [[T1]], [[T2]]
|
||||
|
||||
// Compute the required alignment.
|
||||
// CHECK-NEXT: [[T0:%.*]] = icmp ugt i64 [[T_ALIGN]], 8
|
||||
// CHECK-NEXT: [[ALLOC_ALIGN:%.*]] = select i1 [[T0]], i64 [[T_ALIGN]], i64 8
|
||||
|
||||
// Grab the stride, compute the allocation size, and allocate.
|
||||
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8** %T.value, i32 14
|
||||
// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]], align 8
|
||||
// CHECK-NEXT: [[T_STRIDE:%.*]] = ptrtoint i8* [[T1]] to i64
|
||||
// CHECK-NEXT: [[T0:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[BOUND]], i64 [[T_STRIDE]])
|
||||
// CHECK-NEXT: [[T1:%.*]] = extractvalue { i64, i1 } [[T0]], 0
|
||||
// CHECK-NEXT: [[T2:%.*]] = extractvalue { i64, i1 } [[T0]], 1
|
||||
// CHECK-NEXT: [[T3:%.*]] = select i1 [[T2]], i64 -1, i64 [[T1]]
|
||||
// CHECK-NEXT: [[T0:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[T3]], i64 [[HEADER_SIZE]])
|
||||
// CHECK-NEXT: [[T1:%.*]] = extractvalue { i64, i1 } [[T0]], 0
|
||||
// CHECK-NEXT: [[T2:%.*]] = extractvalue { i64, i1 } [[T0]], 1
|
||||
// CHECK-NEXT: [[ALLOC_SIZE:%.*]] = select i1 [[T2]], i64 -1, i64 [[T1]]
|
||||
// CHECK-NEXT: [[ALLOC:%.*]] = call noalias [[REFCOUNT]]* @swift_allocObject({{.*}}
|
||||
|
||||
// Initialize the binding.
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[REFCOUNT]]* [[ALLOC]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8* [[T0]], i32 24
|
||||
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[TYPE]]**
|
||||
// CHECK-NEXT: store [[TYPE]]* %T, [[TYPE]]** [[T2]], align 8
|
||||
|
||||
// Initialize the length.
|
||||
// FIXME: this should be storing [[BOUND]]!
|
||||
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[REFCOUNT]]* [[ALLOC]], i32 1
|
||||
// CHECK-NEXT: [[T1:%.*]] = bitcast [[REFCOUNT]]* [[T0]] to i64*
|
||||
// CHECK-NEXT: store i64 0, i64* [[T1]], align 8
|
||||
|
||||
// Zero-initialize the elements.
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[REFCOUNT]]* [[ALLOC]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8* [[T0]], i64 [[HEADER_SIZE]]
|
||||
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[OPAQUE]]*
|
||||
// CHECK-NEXT: [[T3:%.*]] = sub i64 [[ALLOC_SIZE]], [[HEADER_SIZE]]
|
||||
// CHECK-NEXT: [[T4:%.*]] = bitcast [[OPAQUE]]* [[T2]] to i8*
|
||||
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T4]], i8 0, i64 [[T3]], i32 8, i1 false)
|
||||
|
||||
// Create the slice.
|
||||
// CHECK-NEXT: [[T5:%.*]] = bitcast [[OPAQUE]]* [[T2]] to i8*
|
||||
// CHECK-NEXT: call { i8*, i64, %swift.refcounted* } @_TVSs5Slice20convertFromHeapArrayU__fMGS_Q__FT4baseBp5ownerBo6lengthBi64__GS_Q__(i8* [[T5]], [[REFCOUNT]]* [[ALLOC]], i64 [[BOUND]], [[TYPE]]* %T)
|
||||
|
||||
// CHECK: define internal i64 @arraydestroy([[REFCOUNT]]*
|
||||
// Load the binding.
|
||||
// CHECK: [[T0:%.*]] = bitcast [[REFCOUNT]]* [[ALLOC:%.*]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8* [[T0]], i32 24
|
||||
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[TYPE]]**
|
||||
// CHECK-NEXT: %T = load [[TYPE]]** [[T2]], align 8
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[TYPE]]* %T to i8***
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8*** [[T0]], i64 -1
|
||||
// CHECK-NEXT: %T.value = load i8*** [[T1]], align 8
|
||||
// Compute the header size.
|
||||
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8** %T.value, i32 13
|
||||
// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]], align 8
|
||||
// CHECK-NEXT: [[T_ALIGN:%.*]] = ptrtoint i8* [[T1]] to i64
|
||||
// CHECK-NEXT: [[T0:%.*]] = sub i64 [[T_ALIGN]], 1
|
||||
// CHECK-NEXT: [[T1:%.*]] = add i64 32, [[T0]]
|
||||
// CHECK-NEXT: [[T2:%.*]] = xor i64 [[T0]], -1
|
||||
// CHECK-NEXT: [[HEADER_SIZE:%.*]] = and i64 [[T1]], [[T2]]
|
||||
// Load the length.
|
||||
// CHECK: [[T0:%.*]] = getelementptr inbounds [[REFCOUNT]]* [[ALLOC]], i32 1
|
||||
// CHECK-NEXT: [[T1:%.*]] = bitcast [[REFCOUNT]]* [[T0]] to i64*
|
||||
// CHECK-NEXT: [[BOUND:%.*]] = load i64* [[T1]], align 8
|
||||
// Load the stride and find the limits of the array.
|
||||
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8** %T.value, i32 14
|
||||
// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]], align 8
|
||||
// CHECK-NEXT: [[T_STRIDE:%.*]] = ptrtoint i8* [[T1]] to i64
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[REFCOUNT]]* [[ALLOC:%.*]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8* [[T0]], i64 [[HEADER_SIZE]]
|
||||
// CHECK-NEXT: [[BEGIN:%.*]] = bitcast i8* [[T1]] to [[OPAQUE]]*
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[OPAQUE]]* [[BEGIN]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = mul i64 [[T_STRIDE]], [[BOUND]]
|
||||
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8* [[T0]], i64 [[T1]]
|
||||
// CHECK-NEXT: [[END:%.*]] = bitcast i8* [[T2]] to [[OPAQUE]]*
|
||||
// Loop over the elements.
|
||||
// CHECK-NEXT: icmp eq [[OPAQUE]]* [[BEGIN]], [[END]]
|
||||
// CHECK-NEXT: br i1
|
||||
Reference in New Issue
Block a user