Clean up the internal APIs around dynamic allocas to structurally

discourage accidental use of them.  Relatedly, fix several bugs
where we were accidentally using dynamic allocas.
This commit is contained in:
John McCall
2017-12-19 00:27:54 -05:00
parent 0a9be5da79
commit 1f3f33e67d
16 changed files with 169 additions and 123 deletions

View File

@@ -120,6 +120,11 @@ public:
StackAddress(Address address, llvm::Value *SP)
: Addr(address), StackPtrResetLocation(SP) {}
/// Return a StackAddress with the address changed in some superficial way.
StackAddress withAddress(Address addr) const {
return StackAddress(addr, StackPtrResetLocation);
}
llvm::Value *getAddressPointer() const { return Addr.getAddress(); }
Alignment getAlignment() const { return Addr.getAlignment(); }
Address getAddress() const { return Addr; }

View File

@@ -17,6 +17,7 @@
#ifndef SWIFT_IRGEN_CALLEMISSION_H
#define SWIFT_IRGEN_CALLEMISSION_H
#include "Address.h"
#include "Callee.h"
namespace llvm {
@@ -26,7 +27,6 @@ namespace llvm {
namespace swift {
namespace irgen {
class Address;
class Explosion;
class LoadableTypeInfo;
struct WitnessMetadata;
@@ -36,10 +36,18 @@ class CallEmission {
public:
IRGenFunction &IGF;
struct TypedTemporary {
StackAddress Temp;
SILType Type;
};
private:
/// The builtin/special arguments to pass to the call.
SmallVector<llvm::Value*, 8> Args;
/// Temporaries required by the call.
SmallVector<TypedTemporary, 4> Temporaries;
/// The function we're going to call.
Callee CurCallee;

View File

@@ -76,7 +76,7 @@ public:
return (isFixedSize(expansion) && StorageSize.isZero());
}
StackAddress allocateStack(IRGenFunction &IGF, SILType T, bool isEntryBlock,
StackAddress allocateStack(IRGenFunction &IGF, SILType T,
const llvm::Twine &name) const override;
void deallocateStack(IRGenFunction &IGF, StackAddress addr, SILType T) const override;
void destroyStack(IRGenFunction &IGF, StackAddress addr, SILType T,

View File

@@ -1315,6 +1315,12 @@ llvm::CallSite CallEmission::emitCallSite() {
Args.clear();
// Deallocate all the temporaries.
for (auto &temporary : Temporaries) {
auto &ti = IGF.getTypeInfo(temporary.Type);
ti.deallocateStack(IGF, temporary.Temp, temporary.Type);
}
// Return.
return call;
}
@@ -1386,7 +1392,7 @@ void CallEmission::emitToExplosion(Explosion &out, bool isOutlined) {
// explode that temporary.
if (LastArgWritten == 1) {
StackAddress ctemp = substResultTI.allocateStack(IGF, substResultType,
false, "call.aggresult");
"call.aggresult");
Address temp = ctemp.getAddress();
emitToMemory(temp, substResultTI, isOutlined);
@@ -1623,8 +1629,9 @@ static void emitCoerceAndExpand(IRGenFunction &IGF, Explosion &in,
}
// Otherwise, materialize to a temporary.
Address temporary =
paramTI.allocateStack(IGF, paramTy, false, "coerce-and-expand.temp").getAddress();
auto temporaryAlloc =
paramTI.allocateStack(IGF, paramTy, "coerce-and-expand.temp");
Address temporary = temporaryAlloc.getAddress();
auto coercionTyLayout = IGF.IGM.DataLayout.getStructLayout(coercionTy);
@@ -1683,7 +1690,7 @@ static void emitCoerceAndExpand(IRGenFunction &IGF, Explosion &in,
paramTI.loadAsTake(IGF, temporary, out);
}
paramTI.deallocateStack(IGF, StackAddress(temporary), paramTy);
paramTI.deallocateStack(IGF, temporaryAlloc, paramTy);
}
static void emitDirectExternalArgument(IRGenFunction &IGF, SILType argType,
@@ -1794,12 +1801,14 @@ emitClangExpandedArgument(IRGenFunction &IGF, Explosion &in, Explosion &out,
}
// Otherwise, materialize to a temporary.
Address temp = swiftTI.allocateStack(IGF, swiftType, false,
"clang-expand-arg.temp").getAddress();
auto ctemp = swiftTI.allocateStack(IGF, swiftType, "clang-expand-arg.temp");
Address temp = ctemp.getAddress();
swiftTI.initialize(IGF, in, temp, isOutlined);
Address castTemp = IGF.Builder.CreateBitCast(temp, IGF.IGM.Int8PtrTy);
ClangExpandLoadEmitter(IGF, out).visit(clangType, castTemp);
swiftTI.deallocateStack(IGF, ctemp, swiftType);
}
/// Given a Clang-expanded (according to ABIArgInfo::Expand) parameter
@@ -1816,17 +1825,21 @@ void irgen::emitClangExpandedParameter(IRGenFunction &IGF,
}
// Otherwise, materialize to a temporary.
Address temp = swiftTI.allocateStack(IGF, swiftType, false,
"clang-expand-param.temp").getAddress();
auto tempAlloc = swiftTI.allocateStack(IGF, swiftType,
"clang-expand-param.temp");
Address temp = tempAlloc.getAddress();
Address castTemp = IGF.Builder.CreateBitCast(temp, IGF.IGM.Int8PtrTy);
ClangExpandStoreEmitter(IGF, in).visit(clangType, castTemp);
// Then load out.
swiftTI.loadAsTake(IGF, temp, out);
swiftTI.deallocateStack(IGF, tempAlloc, swiftType);
}
static void externalizeArguments(IRGenFunction &IGF, const Callee &callee,
Explosion &in, Explosion &out,
SmallVectorImpl<CallEmission::TypedTemporary> &temporaries,
bool isOutlined) {
auto silConv = IGF.IGM.silConv;
auto fnType = callee.getOrigFunctionType();
@@ -1894,8 +1907,11 @@ static void externalizeArguments(IRGenFunction &IGF, const Callee &callee,
}
case clang::CodeGen::ABIArgInfo::Indirect: {
auto &ti = cast<LoadableTypeInfo>(IGF.getTypeInfo(paramType));
Address addr = ti.allocateStack(IGF, paramType, false,
"indirect-temporary").getAddress();
auto temp = ti.allocateStack(IGF, paramType, "indirect-temporary");
temporaries.push_back({temp, paramType});
Address addr = temp.getAddress();
// Set at least the alignment the ABI expects.
if (AI.getIndirectByVal()) {
auto ABIAlign = AI.getIndirectAlign();
@@ -2121,7 +2137,8 @@ void CallEmission::setArgs(Explosion &original, bool isOutlined,
case SILFunctionTypeRepresentation::ObjCMethod:
adjusted.add(getCallee().getObjCMethodReceiver());
adjusted.add(getCallee().getObjCMethodSelector());
externalizeArguments(IGF, getCallee(), original, adjusted, isOutlined);
externalizeArguments(IGF, getCallee(), original, adjusted,
Temporaries, isOutlined);
break;
case SILFunctionTypeRepresentation::Block:
@@ -2129,7 +2146,8 @@ void CallEmission::setArgs(Explosion &original, bool isOutlined,
LLVM_FALLTHROUGH;
case SILFunctionTypeRepresentation::CFunctionPointer:
externalizeArguments(IGF, getCallee(), original, adjusted, isOutlined);
externalizeArguments(IGF, getCallee(), original, adjusted,
Temporaries, isOutlined);
break;
case SILFunctionTypeRepresentation::WitnessMethod:
@@ -2204,23 +2222,23 @@ Address IRGenFunction::getErrorResultSlot(SILType errorType) {
// Create the alloca. We don't use allocateStack because we're
// not allocating this in stack order.
auto addr = builder.CreateAlloca(errorTI.getStorageType(), nullptr,
"swifterror");
addr->setAlignment(errorTI.getFixedAlignment().getValue());
auto addr = createAlloca(errorTI.getStorageType(),
errorTI.getFixedAlignment(),
"swifterror");
// Only add the swifterror attribute on ABIs that pass it in a register.
// We create a shadow stack location of the swifterror parameter for the
// debugger on platforms that pass swifterror by reference and so we can't
// mark the parameter with a swifterror attribute for these.
if (IGM.IsSwiftErrorInRegister)
addr->setSwiftError(true);
cast<llvm::AllocaInst>(addr.getAddress())->setSwiftError(true);
// Initialize at the alloca point.
auto nullError = llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(errorTI.getStorageType()));
builder.CreateStore(nullError, addr, errorTI.getFixedAlignment());
builder.CreateStore(nullError, addr);
ErrorResultSlot = addr;
ErrorResultSlot = addr.getAddress();
}
return Address(ErrorResultSlot, IGM.getPointerAlignment());
}
@@ -2256,8 +2274,9 @@ void IRGenFunction::emitPrologue() {
Builder.SetInsertPoint(EntryBB);
// Set up the alloca insertion point.
AllocaIP = Builder.CreateAlloca(IGM.Int1Ty, /*array size*/ nullptr,
"alloca point");
AllocaIP = Builder.IRBuilderBase::CreateAlloca(IGM.Int1Ty,
/*array size*/ nullptr,
"alloca point");
}
/// Emit a branch to the return block and set the insert point there.
@@ -2573,6 +2592,8 @@ Explosion NativeConventionSchema::mapFromNative(IRGenModule &IGM,
temporary, loadableTI.getStorageType()->getPointerTo());
loadableTI.loadAsTake(IGF, storageAddr, nonNativeExplosion);
Builder.CreateLifetimeEnd(temporary, tempSize);
return nonNativeExplosion;
}

View File

@@ -813,7 +813,7 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
} else if (origNativeSchema.requiresIndirect()) {
assert(!nativeResultSchema.requiresIndirect());
auto stackAddr = outResultTI.allocateStack(
subIGF, outConv.getSILResultType(), false, "return.temp");
subIGF, outConv.getSILResultType(), "return.temp");
resultValueAddr = stackAddr.getAddress();
auto resultAddr = subIGF.Builder.CreateBitCast(
resultValueAddr,
@@ -1097,7 +1097,7 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
// The +1 argument is passed indirectly, so we need to copy into a
// temporary.
needsAllocas = true;
auto stackAddr = fieldTI.allocateStack(subIGF, fieldTy, false, "arg.temp");
auto stackAddr = fieldTI.allocateStack(subIGF, fieldTy, "arg.temp");
auto addressPointer = stackAddr.getAddress().getAddress();
fieldTI.initializeWithCopy(subIGF, stackAddr.getAddress(), fieldAddr,
fieldTy, false);

View File

@@ -57,7 +57,6 @@ void IRGenModule::emitSILGlobalVariable(SILGlobalVariable *var) {
}
StackAddress FixedTypeInfo::allocateStack(IRGenFunction &IGF, SILType T,
bool isEntryBlock,
const Twine &name) const {
// If the type is known to be empty, don't actually allocate anything.
if (isKnownEmpty(ResilienceExpansion::Maximal)) {

View File

@@ -443,39 +443,47 @@ irgen::emitInitializeBufferWithTakeOfBufferCall(IRGenFunction &IGF,
/// Emit a dynamic alloca call to allocate enough memory to hold an object of
/// type 'T' and an optional llvm.stackrestore point if 'isInEntryBlock' is
/// false.
DynamicAlloca irgen::emitDynamicAlloca(IRGenFunction &IGF, SILType T,
bool isInEntryBlock) {
StackAddress IRGenFunction::emitDynamicAlloca(SILType T,
const llvm::Twine &name) {
llvm::Value *size = emitLoadOfSize(*this, T);
return emitDynamicAlloca(IGM.Int8Ty, size, Alignment(16), name);
}
StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy,
llvm::Value *arraySize,
Alignment align,
const llvm::Twine &name) {
llvm::Value *stackRestorePoint = nullptr;
// Save the stack pointer if we are not in the entry block (we could be
// executed more than once).
bool isInEntryBlock = (Builder.GetInsertBlock() == &*CurFn->begin());
if (!isInEntryBlock) {
auto *stackSaveFn = llvm::Intrinsic::getDeclaration(
&IGF.IGM.Module, llvm::Intrinsic::ID::stacksave);
&IGM.Module, llvm::Intrinsic::ID::stacksave);
stackRestorePoint = IGF.Builder.CreateCall(stackSaveFn, {}, "spsave");
stackRestorePoint = Builder.CreateCall(stackSaveFn, {}, "spsave");
}
// Emit the dynamic alloca.
llvm::Value *size = emitLoadOfSize(IGF, T);
auto *alloca = IGF.Builder.CreateAlloca(IGF.IGM.Int8Ty, size, "alloca");
alloca->setAlignment(16);
auto *alloca = Builder.IRBuilderBase::CreateAlloca(eltTy, arraySize, name);
alloca->setAlignment(align.getValue());
assert(!isInEntryBlock ||
IGF.getActiveDominancePoint().isUniversal() &&
getActiveDominancePoint().isUniversal() &&
"Must be in entry block if we insert dynamic alloca's without "
"stackrestores");
return {alloca, stackRestorePoint};
return {Address(alloca, align), stackRestorePoint};
}
/// Deallocate dynamic alloca's memory if requested by restoring the stack
/// location before the dynamic alloca's call.
void irgen::emitDeallocateDynamicAlloca(IRGenFunction &IGF,
StackAddress address) {
void IRGenFunction::emitDeallocateDynamicAlloca(StackAddress address) {
if (!address.needsSPRestore())
return;
auto *stackRestoreFn = llvm::Intrinsic::getDeclaration(
&IGF.IGM.Module, llvm::Intrinsic::ID::stackrestore);
IGF.Builder.CreateCall(stackRestoreFn, address.getSavedSP());
&IGM.Module, llvm::Intrinsic::ID::stackrestore);
Builder.CreateCall(stackRestoreFn, address.getSavedSP());
}
/// Emit a call to do an 'initializeArrayWithCopy' operation.

View File

@@ -29,7 +29,6 @@ namespace irgen {
class IRGenFunction;
class IRGenModule;
enum class ValueWitness : unsigned;
class StackAddress;
class WitnessIndex;
/// Return the size of a fixed buffer.
@@ -235,21 +234,6 @@ namespace irgen {
/// The type must be dynamically known to have extra inhabitant witnesses.
llvm::Value *emitLoadOfExtraInhabitantCount(IRGenFunction &IGF, SILType T);
/// Emit a dynamic alloca call to allocate enough memory to hold an object of
/// type 'T' and an optional llvm.stackrestore point if 'isInEntryBlock' is
/// false.
struct DynamicAlloca {
llvm::Value *Alloca;
llvm::Value *SavedSP;
DynamicAlloca(llvm::Value *A, llvm::Value *SP) : Alloca(A), SavedSP(SP) {}
};
DynamicAlloca emitDynamicAlloca(IRGenFunction &IGF, SILType T,
bool isInEntryBlock);
/// Deallocate dynamic alloca's memory if the stack address has an SP restore
/// point associated with it.
void emitDeallocateDynamicAlloca(IRGenFunction &IGF, StackAddress address);
/// Returns the IsInline flag and the loaded flags value.
std::pair<llvm::Value *, llvm::Value *>
emitLoadOfIsInline(IRGenFunction &IGF, llvm::Value *metadata);

View File

@@ -346,7 +346,7 @@ FixedTypeInfo::getSpareBitExtraInhabitantIndex(IRGenFunction &IGF,
}
static llvm::Value *computeExtraTagBytes(IRGenFunction &IGF, IRBuilder &Builder,
size_t fixedSize,
Size fixedSize,
llvm::Value *numEmptyCases) {
// We can use the payload area with a tag bit set somewhere outside of the
// payload area to represent cases. See how many bytes we need to cover
@@ -371,12 +371,12 @@ static llvm::Value *computeExtraTagBytes(IRGenFunction &IGF, IRBuilder &Builder,
auto *int32Ty = IGM.Int32Ty;
auto *one = llvm::ConstantInt::get(int32Ty, 1U);
if (fixedSize >= 4) {
if (fixedSize >= Size(4)) {
return one;
}
auto *entryBB = Builder.GetInsertBlock();
llvm::Value *size = asSizeConstant(IGM, Size(fixedSize));
llvm::Value *size = asSizeConstant(IGM, fixedSize);
auto *returnBB = llvm::BasicBlock::Create(Ctx);
size = Builder.CreateTrunc(size, int32Ty); // We know size < 4.
@@ -418,7 +418,7 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
auto &Builder = IGF.Builder;
auto *size = getSize(IGF, T);
auto fixedSize = getFixedSize().getValue();
Size fixedSize = getFixedSize();
auto *numExtraInhabitants =
llvm::ConstantInt::get(IGM.Int32Ty, getFixedExtraInhabitantCount(IGM));
@@ -444,8 +444,8 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
// There are extra tag bits to check.
Builder.emitBlock(extraTagBitsBB);
llvm::Value *extraTagBits = Builder.CreateAlloca(IGM.Int32Ty, nullptr);
Builder.CreateStore(zero, extraTagBits, Alignment(0));
Address extraTagBitsSlot = IGF.createAlloca(IGM.Int32Ty, Alignment(4));
Builder.CreateStore(zero, extraTagBitsSlot);
// Compute the number of extra tag bytes.
auto *emptyCases = Builder.CreateSub(numEmptyCases, numExtraInhabitants);
@@ -456,13 +456,14 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
auto *valueAddr =
Builder.CreateBitOrPointerCast(enumAddr.getAddress(), IGM.Int8PtrTy);
auto *extraTagBitsAddr =
Builder.CreateConstInBoundsGEP1_32(IGM.Int8Ty, valueAddr, fixedSize);
Builder.CreateConstInBoundsGEP1_32(IGM.Int8Ty, valueAddr,
fixedSize.getValue());
// TODO: big endian.
Builder.CreateMemCpy(
Builder.CreateBitOrPointerCast(extraTagBits, IGM.Int8PtrTy),
Builder.CreateBitCast(extraTagBitsSlot, IGM.Int8PtrTy).getAddress(),
extraTagBitsAddr, numExtraTagBytes, 1);
extraTagBits = Builder.CreateLoad(extraTagBits, Alignment(0));
auto extraTagBits = Builder.CreateLoad(extraTagBitsSlot);
extraTagBitsBB = llvm::BasicBlock::Create(Ctx);
Builder.CreateCondBr(Builder.CreateICmpEQ(extraTagBits, zero),
@@ -473,8 +474,8 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
Builder.emitBlock(extraTagBitsBB);
auto *truncSize = Builder.CreateTrunc(size, IGM.Int32Ty);
llvm::Value *caseIndexFromValue = Builder.CreateAlloca(IGM.Int32Ty, nullptr);
Builder.CreateStore(zero, caseIndexFromValue, Alignment(0));
Address caseIndexFromValueSlot = IGF.createAlloca(IGM.Int32Ty, Alignment(4));
Builder.CreateStore(zero, caseIndexFromValueSlot);
auto *caseIndexFromExtraTagBits = Builder.CreateSelect(
Builder.CreateICmpUGE(truncSize, four), zero,
@@ -483,9 +484,10 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
// TODO: big endian.
Builder.CreateMemCpy(
Builder.CreateBitOrPointerCast(caseIndexFromValue, IGM.Int8PtrTy),
valueAddr, std::min(Size(4U).getValue(), fixedSize), 1);
caseIndexFromValue = Builder.CreateLoad(caseIndexFromValue, Alignment(0));
Builder.CreateBitCast(caseIndexFromValueSlot, IGM.Int8PtrTy),
Address(valueAddr, Alignment(1)),
std::min(Size(4U), fixedSize));
auto caseIndexFromValue = Builder.CreateLoad(caseIndexFromValueSlot);
auto *result1 = Builder.CreateAdd(
numExtraInhabitants,
@@ -521,7 +523,7 @@ llvm::Value *FixedTypeInfo::getEnumTagSinglePayload(IRGenFunction &IGF,
/// Emit a speciaize memory operation for a \p size of 0 to 4 bytes.
static void emitSpecializedMemOperation(
IRGenFunction &IGF,
llvm::function_ref<void(IRBuilder &, uint64_t)> emitMemOpFn,
llvm::function_ref<void(IRBuilder &, Size)> emitMemOpFn,
llvm::Value *size) {
auto &IGM = IGF.IGM;
auto &Ctx = IGF.IGM.getLLVMContext();
@@ -549,37 +551,37 @@ static void emitSpecializedMemOperation(
Builder.CreateCondBr(isTwo, twoBB, fourBB);
Builder.emitBlock(oneBB);
emitMemOpFn(Builder, 1);
emitMemOpFn(Builder, Size(1));
Builder.CreateBr(returnBB);
Builder.emitBlock(twoBB);
emitMemOpFn(Builder, 2);
emitMemOpFn(Builder, Size(2));
Builder.CreateBr(returnBB);
Builder.emitBlock(fourBB);
emitMemOpFn(Builder, 4);
emitMemOpFn(Builder, Size(4));
Builder.CreateBr(returnBB);
Builder.emitBlock(returnBB);
}
/// Emit a memset of zero operation for a \p size of 0 to 4 bytes.
static void emitMemZero(IRGenFunction &IGF, llvm::Value *addr,
static void emitMemZero(IRGenFunction &IGF, Address addr,
llvm::Value *size) {
auto *zeroByte = llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0U);
emitSpecializedMemOperation(IGF,
[=](IRBuilder &B, uint64_t numBytes) {
B.CreateMemSet(addr, zeroByte, numBytes, 1);
[=](IRBuilder &B, Size numBytes) {
B.CreateMemSet(addr, zeroByte, numBytes);
},
size);
}
/// Emit a memcpy operation for a \p size of 0 to 4 bytes.
static void emitMemCpy(IRGenFunction &IGF, llvm::Value *to, llvm::Value *from,
static void emitMemCpy(IRGenFunction &IGF, Address to, Address from,
llvm::Value *size) {
emitSpecializedMemOperation(IGF,
[=](IRBuilder &B, uint64_t numBytes) {
B.CreateMemCpy(to, from, numBytes, 1);
[=](IRBuilder &B, Size numBytes) {
B.CreateMemCpy(to, from, numBytes);
},
size);
}
@@ -599,12 +601,11 @@ void FixedTypeInfo::storeEnumTagSinglePayload(IRGenFunction &IGF,
auto *four = llvm::ConstantInt::get(int32Ty, 4U);
auto *eight = llvm::ConstantInt::get(int32Ty, 8U);
auto fixedSize = getFixedSize().getValue();
auto fixedSize = getFixedSize();
auto *valueAddr =
Builder.CreateBitOrPointerCast(enumAddr.getAddress(), IGM.Int8PtrTy);
auto *extraTagBitsAddr =
Builder.CreateConstInBoundsGEP1_32(IGM.Int8Ty, valueAddr, fixedSize);
Address valueAddr = Builder.CreateElementBitCast(enumAddr, IGM.Int8Ty);
Address extraTagBitsAddr =
Builder.CreateConstByteArrayGEP(valueAddr, fixedSize);
auto *numExtraInhabitants =
llvm::ConstantInt::get(IGM.Int32Ty, getFixedExtraInhabitantCount(IGM));
@@ -685,21 +686,20 @@ void FixedTypeInfo::storeEnumTagSinglePayload(IRGenFunction &IGF,
payloadIndex->addIncoming(caseIndex, payloadGE4BB);
payloadIndex->addIncoming(payloadIndex0, payloadLT4BB);
auto *payloadIndexAddr = Builder.CreateAlloca(int32Ty, nullptr);
Builder.CreateStore(payloadIndex, payloadIndexAddr, Alignment(0));
auto *extraTagIndexAddr = Builder.CreateAlloca(int32Ty, nullptr);
Builder.CreateStore(extraTagIndex, extraTagIndexAddr, Alignment(0));
Address payloadIndexAddr = IGF.createAlloca(int32Ty, Alignment(4));
Builder.CreateStore(payloadIndex, payloadIndexAddr);
Address extraTagIndexAddr = IGF.createAlloca(int32Ty, Alignment(4));
Builder.CreateStore(extraTagIndex, extraTagIndexAddr);
// TODO: big endian
Builder.CreateMemCpy(
valueAddr,
Builder.CreateBitOrPointerCast(payloadIndexAddr, IGM.Int8PtrTy),
std::min(Size(4U).getValue(), fixedSize), 1);
auto *extraZeroAddr =
Builder.CreateConstInBoundsGEP1_32(IGM.Int8Ty, valueAddr, 4);
if (fixedSize > 4)
Builder.CreateBitCast(payloadIndexAddr, IGM.Int8PtrTy),
std::min(Size(4U), fixedSize));
Address extraZeroAddr = Builder.CreateConstByteArrayGEP(valueAddr, Size(4));
if (fixedSize > Size(4))
Builder.CreateMemSet(
extraZeroAddr, llvm::ConstantInt::get(IGM.Int8Ty, 0),
Builder.CreateSub(size, llvm::ConstantInt::get(size->getType(), 4)), 1);
Builder.CreateSub(size, llvm::ConstantInt::get(size->getType(), 4)));
emitMemCpy(IGF, extraTagBitsAddr, extraTagIndexAddr, numExtraTagBytes);
Builder.CreateBr(returnBB);

View File

@@ -31,10 +31,12 @@ class FunctionPointer;
typedef llvm::IRBuilder<> IRBuilderBase;
class IRBuilder : public IRBuilderBase {
public:
// Without this, it keeps resolving to llvm::IRBuilderBase because
// of the injected class name.
typedef irgen::IRBuilderBase IRBuilderBase;
private:
/// The block containing the insertion point when the insertion
/// point was last cleared. Used only for preserving block
/// ordering.
@@ -161,6 +163,11 @@ public:
return StableIP(*this);
}
/// Don't create allocas this way; you'll get a dynamic alloca.
/// Use IGF::createAlloca or IGF::emitDynamicAlloca.
llvm::Value *CreateAlloca(llvm::Type *type, llvm::Value *arraySize,
const llvm::Twine &name = "") = delete;
llvm::LoadInst *CreateLoad(llvm::Value *addr, Alignment align,
const llvm::Twine &name = "") {
llvm::LoadInst *load = IRBuilderBase::CreateLoad(addr, name);
@@ -252,6 +259,17 @@ public:
std::min(dest.getAlignment(),
src.getAlignment()).getValue());
}
using IRBuilderBase::CreateMemSet;
llvm::CallInst *CreateMemSet(Address dest, llvm::Value *value, Size size) {
return CreateMemSet(dest.getAddress(), value, size.getValue(),
dest.getAlignment().getValue());
}
llvm::CallInst *CreateMemSet(Address dest, llvm::Value *value,
llvm::Value *size) {
return CreateMemSet(dest.getAddress(), value, size,
dest.getAlignment().getValue());
}
using IRBuilderBase::CreateLifetimeStart;
llvm::CallInst *CreateLifetimeStart(Address buf, Size size) {

View File

@@ -127,11 +127,17 @@ public:
}
Address createAlloca(llvm::Type *ty, Alignment align,
const llvm::Twine &name);
Address createAlloca(llvm::Type *ty, llvm::Value *ArraySize, Alignment align,
const llvm::Twine &name);
const llvm::Twine &name = "");
Address createAlloca(llvm::Type *ty, llvm::Value *arraySize, Alignment align,
const llvm::Twine &name = "");
Address createFixedSizeBufferAlloca(const llvm::Twine &name);
StackAddress emitDynamicAlloca(SILType type, const llvm::Twine &name = "");
StackAddress emitDynamicAlloca(llvm::Type *eltTy, llvm::Value *arraySize,
Alignment align,
const llvm::Twine &name = "");
void emitDeallocateDynamicAlloca(StackAddress address);
llvm::BasicBlock *createBasicBlock(const llvm::Twine &Name);
const TypeInfo &getTypeInfoForUnlowered(Type subst);
const TypeInfo &getTypeInfoForUnlowered(AbstractionPattern orig, Type subst);

View File

@@ -3756,9 +3756,7 @@ void IRGenSILFunction::visitAllocStackInst(swift::AllocStackInst *i) {
(void) Decl;
bool isEntryBlock = (i->getParent() == i->getFunction()->getEntryBlock());
auto addr =
type.allocateStack(*this, i->getElementType(), isEntryBlock, dbgname);
auto addr = type.allocateStack(*this, i->getElementType(), dbgname);
emitDebugInfoForAllocStack(i, type, addr.getAddress().getAddress());
@@ -4661,6 +4659,7 @@ void IRGenSILFunction::visitCheckedCastAddrBranchInst(
void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) {
auto pattern = IGM.getAddrOfKeyPathPattern(I->getPattern(), I->getLoc());
// Build up the argument vector to instantiate the pattern here.
Optional<StackAddress> dynamicArgsBuf;
llvm::Value *args;
if (!I->getSubstitutions().empty() || !I->getAllOperands().empty()) {
auto sig = I->getPattern()->getGenericSignature();
@@ -4702,11 +4701,9 @@ void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) {
argsBufAlign = Builder.CreateOr(argsBufAlign, alignMask);
}
auto argsBufInst = Builder.CreateAlloca(IGM.Int8Ty, argsBufSize);
// TODO: over-alignment?
argsBufInst->setAlignment(16);
dynamicArgsBuf = emitDynamicAlloca(IGM.Int8Ty, argsBufSize, Alignment(16));
Address argsBuf(argsBufInst, Alignment(16));
Address argsBuf = dynamicArgsBuf->getAddress();
if (!I->getSubstitutions().empty()) {
emitInitOfGenericRequirementsBuffer(*this, requirements, argsBuf,
@@ -4720,7 +4717,8 @@ void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) {
for (unsigned i : indices(I->getAllOperands())) {
auto operand = I->getAllOperands()[i].get();
auto &ti = getTypeInfo(operand->getType());
auto ptr = Builder.CreateInBoundsGEP(argsBufInst, operandOffsets[i]);
auto ptr = Builder.CreateInBoundsGEP(argsBuf.getAddress(),
operandOffsets[i]);
auto addr = ti.getAddressForPointer(
Builder.CreateBitCast(ptr, ti.getStorageType()->getPointerTo()));
if (operand->getType().isAddress()) {
@@ -4731,7 +4729,7 @@ void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) {
cast<LoadableTypeInfo>(ti).initialize(*this, operandValue, addr, false);
}
}
args = argsBufInst;
args = argsBuf.getAddress();
} else {
// No arguments necessary, so the argument ought to be ignored by any
// callbacks in the pattern.
@@ -4742,6 +4740,10 @@ void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) {
auto call = Builder.CreateCall(IGM.getGetKeyPathFn(), {patternPtr, args});
call->setDoesNotThrow();
if (dynamicArgsBuf) {
emitDeallocateDynamicAlloca(*dynamicArgsBuf);
}
auto resultStorageTy = IGM.getTypeInfo(I->getType()).getStorageType();
Explosion e;

View File

@@ -26,13 +26,14 @@
namespace swift {
namespace irgen {
using SwiftAggLowering = clang::CodeGen::swiftcall::SwiftAggLowering;
class NativeConventionSchema {
clang::CodeGen::swiftcall::SwiftAggLowering Lowering;
SwiftAggLowering Lowering;
bool RequiresIndirect;
public:
using EnumerationCallback =
clang::CodeGen::swiftcall::SwiftAggLowering::EnumerationCallback;
using EnumerationCallback = SwiftAggLowering::EnumerationCallback;
NativeConventionSchema(IRGenModule &IGM, const TypeInfo *TI, bool isResult);

View File

@@ -68,24 +68,19 @@ public:
// This is useful for metaprogramming.
static bool isFixed() { return false; }
StackAddress allocateStack(IRGenFunction &IGF,
SILType T,
bool isInEntryBlock,
const llvm::Twine &name) const override {
StackAddress allocateStack(IRGenFunction &IGF, SILType T,
const llvm::Twine &name) const override {
// Allocate memory on the stack.
auto alloca = emitDynamicAlloca(IGF, T, isInEntryBlock);
assert((isInEntryBlock && alloca.SavedSP == nullptr) ||
(!isInEntryBlock && alloca.SavedSP != nullptr) &&
"stacksave/restore operations can only be skipped in the entry "
"block");
IGF.Builder.CreateLifetimeStart(alloca.Alloca);
return { getAsBitCastAddress(IGF, alloca.Alloca), alloca.SavedSP };
auto alloca = IGF.emitDynamicAlloca(T, name);
IGF.Builder.CreateLifetimeStart(alloca.getAddressPointer());
return alloca.withAddress(
getAsBitCastAddress(IGF, alloca.getAddressPointer()));
}
void deallocateStack(IRGenFunction &IGF, StackAddress stackAddress,
SILType T) const override {
IGF.Builder.CreateLifetimeEnd(stackAddress.getAddress().getAddress());
emitDeallocateDynamicAlloca(IGF, stackAddress);
IGF.emitDeallocateDynamicAlloca(stackAddress);
}
void destroyStack(IRGenFunction &IGF, StackAddress stackAddress, SILType T,

View File

@@ -261,7 +261,6 @@ public:
/// Allocate a variable of this type on the stack.
virtual StackAddress allocateStack(IRGenFunction &IGF, SILType T,
bool isInEntryBlock,
const llvm::Twine &name) const = 0;
/// Deallocate a variable of this type.

View File

@@ -203,8 +203,8 @@ class Foo {
// x86_64-macosx: call void @llvm.lifetime.start
// x86_64-macosx: store i32 {{.*}}
// x86_64-macosx: store i32 {{.*}}
// x86_64-macosx: getelementptr inbounds { i64 }, { i64 }
// x86_64-macosx: load i64, i64* %12, align 8
// x86_64-macosx: [[T0:%.*]] = getelementptr inbounds { i64 }, { i64 }
// x86_64-macosx: load i64, i64* [[T0]], align 8
// x86_64-macosx: bitcast
// x86_64-macosx: call void @llvm.lifetime.end
// x86_64-macosx: ret i64