Remove the LLVM stack promotion pass and related SIL optimization logic.

It's not needed anymore because array buffers are now allocated with alloc_ref instead of a swift_bufferAllocate runtime call.
This commit is contained in:
Erik Eckstein
2016-09-09 15:33:21 -07:00
parent b67959a6c1
commit bd0d2bfed4
13 changed files with 37 additions and 820 deletions

View File

@@ -73,8 +73,6 @@ Rename with a non-`stdlib` naming scheme.
000000000001cb30 T _swift_allocBox
000000000001c990 T _swift_allocObject
000000000001ca60 T _swift_bufferAllocate
000000000001ca70 T _swift_bufferAllocateOnStack
000000000001ca80 T _swift_bufferDeallocateFromStack
000000000001ca90 T _swift_bufferHeaderSize
000000000001cd30 T _swift_deallocBox
000000000001d490 T _swift_deallocClassInstance

View File

@@ -87,14 +87,6 @@ namespace swift {
SwiftARCContract() : llvm::FunctionPass(ID) {}
};
class SwiftStackPromotion : public llvm::FunctionPass {
virtual void getAnalysisUsage(llvm::AnalysisUsage &AU) const override;
virtual bool runOnFunction(llvm::Function &F) override;
public:
static char ID;
SwiftStackPromotion() : llvm::FunctionPass(ID) {}
};
class InlineTreePrinter : public llvm::ModulePass {
virtual void getAnalysisUsage(llvm::AnalysisUsage &AU) const override;
virtual bool runOnModule(llvm::Module &M) override;

View File

@@ -23,7 +23,6 @@ namespace llvm {
void initializeSwiftRCIdentityPass(PassRegistry &);
void initializeSwiftARCOptPass(PassRegistry &);
void initializeSwiftARCContractPass(PassRegistry &);
void initializeSwiftStackPromotionPass(PassRegistry &);
void initializeInlineTreePrinterPass(PassRegistry &);
void initializeSwiftMergeFunctionsPass(PassRegistry &);
}
@@ -31,7 +30,6 @@ namespace llvm {
namespace swift {
llvm::FunctionPass *createSwiftARCOptPass();
llvm::FunctionPass *createSwiftARCContractPass();
llvm::FunctionPass *createSwiftStackPromotionPass();
llvm::ModulePass *createInlineTreePrinterPass();
llvm::ModulePass *createSwiftMergeFunctionsPass();
llvm::ImmutablePass *createSwiftAAWrapperPass();

View File

@@ -88,12 +88,6 @@ static void addSwiftContractPass(const PassManagerBuilder &Builder,
PM.add(createSwiftARCContractPass());
}
static void addSwiftStackPromotionPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
if (Builder.OptLevel > 0)
PM.add(createSwiftStackPromotionPass());
}
static void addSwiftMergeFunctionsPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
if (Builder.OptLevel > 0)
@@ -162,9 +156,6 @@ void swift::performLLVMOptimizations(IRGenOptions &Opts, llvm::Module *Module,
llvm::createAlwaysInlinerPass(/*insertlifetime*/false);
}
PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
addSwiftStackPromotionPass);
// If the optimizer is enabled, we run the ARCOpt pass in the scalar optimizer
// and the Contract pass as late as possible.
if (!Opts.DisableLLVMARCOpts) {

View File

@@ -4,7 +4,6 @@ add_swift_library(swiftLLVMPasses STATIC
LLVMARCOpts.cpp
LLVMARCContract.cpp
LLVMInlineTree.cpp
LLVMStackPromotion.cpp
LLVMMergeFunctions.cpp
LLVM_COMPONENT_DEPENDS

View File

@@ -1,272 +0,0 @@
//===--- LLVMStackPromotion.cpp - Replace allocation calls with alloca ----===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See http://swift.org/LICENSE.txt for license information
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This pass performs the last part of stack promotion for array buffers.
// The SIL StackPromotion pass generates a pair of swift_bufferAllocateOnStack
// and swift_bufferDeallocateFromStack calls. In this pass the final decision
// is made if stack promotion should be done. If yes, the
// swift_bufferAllocateOnStack is replace with an alloca plus a call to
// swift_initStackObject and the swift_bufferDeallocateFromStack is removed.
// TODO: This is a hack and eventually this pass should not be required at all.
// For details see the comments for the SIL StackPromoter.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "swift-stack-promotion"
#include "swift/LLVMPasses/Passes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace swift;
STATISTIC(NumBufferAllocsPromoted,
"Number of swift_bufferAllocate promoted");
cl::opt<int> LimitOpt("stack-promotion-limit",
llvm::cl::init(1024), llvm::cl::Hidden);
//===----------------------------------------------------------------------===//
// SwiftStackPromotion Pass
//===----------------------------------------------------------------------===//
char SwiftStackPromotion::ID = 0;
INITIALIZE_PASS_BEGIN(SwiftStackPromotion,
"swift-stack-promotion", "Swift stack promotion pass",
false, false)
INITIALIZE_PASS_END(SwiftStackPromotion,
"swift-stack-promotion", "Swift stack promotion pass",
false, false)
llvm::FunctionPass *swift::createSwiftStackPromotionPass() {
initializeSwiftStackPromotionPass(*llvm::PassRegistry::getPassRegistry());
return new SwiftStackPromotion();
}
void SwiftStackPromotion::getAnalysisUsage(llvm::AnalysisUsage &AU) const {
AU.setPreservesCFG();
}
/// Checks if we can promote a buffer and returns the size of the buffer.
/// The \a align parameter is set to the alignment of the buffer.
int canPromote(CallInst *CI, unsigned &align, int maxSize) {
if (CI->getNumArgOperands() != 3)
return 0;
auto *SizeConst = dyn_cast<ConstantInt>(CI->getArgOperand(1));
if (!SizeConst)
return 0;
auto *AlignMaskConst = dyn_cast<ConstantInt>(CI->getArgOperand(2));
if (!AlignMaskConst)
return 0;
int size = SizeConst->getValue().getSExtValue();
if (size > maxSize)
return 0;
align = AlignMaskConst->getValue().getZExtValue() + 1;
return size;
}
/// Remove redundant runtime calls for stack allocated buffers.
/// If a buffer is allocated on the stack it's not needed to explicitly set
/// the RC_DEALLOCATING_FLAG flag (except there is code which may depend on it).
/// Also the a call to swift_deallocClassInstance (which stems from an inlined
/// deallocator) is not needed.
///
/// %0 = alloca
/// ...
/// call @swift_setDeallocating(%0) // not needed
/// // code which does not depend on the RC_DEALLOCATING_FLAG flag.
/// call @swift_deallocClassInstance(%0) // not needed
/// call @llvm.lifetime.end(%0)
///
static void removeRedundantRTCalls(CallInst *DeallocCall) {
BasicBlock::iterator Iter(DeallocCall);
BasicBlock::iterator Begin = DeallocCall->getParent()->begin();
Value *Buffer = DeallocCall->getArgOperand(0);
CallInst *RedundantDealloc = nullptr;
CallInst *RedundantSetFlag = nullptr;
SmallVector<Instruction *, 2> ToDelete;
while (Iter != Begin) {
--Iter;
Instruction *I = &*Iter;
if (auto *CI = dyn_cast<CallInst>(I)) {
// Check if we have a runtime function with the buffer as argument.
if (CI->getNumArgOperands() < 1)
break;
if (CI->getArgOperand(0)->stripPointerCasts() != Buffer)
break;
auto *Callee = dyn_cast<Constant>(CI->getCalledValue());
if (!Callee)
break;
// The callee function my be a bitcast constant expression.
if (auto *U = dyn_cast<ConstantExpr>(Callee)) {
if (U->getOpcode() == Instruction::BitCast)
Callee = U->getOperand(0);
}
auto *RTFunc = dyn_cast<Function>(Callee);
if (!RTFunc)
break;
if (RTFunc->getName() == "swift_setDeallocating") {
assert(RedundantDealloc && "dealloc call must follow setDeallocating");
assert(!RedundantSetFlag && "multiple setDeallocating calls");
RedundantSetFlag = CI;
continue;
}
if (RTFunc->getName() == "swift_deallocClassInstance") {
assert(!RedundantSetFlag && "dealloc call must follow setDeallocating");
assert(!RedundantDealloc && "multiple deallocClassInstance calls");
RedundantDealloc = CI;
continue;
}
break;
}
// Bail if we have an instruction which may read the RC_DEALLOCATING_FLAG
// flag.
if (I->mayReadFromMemory())
break;
}
if (RedundantDealloc)
RedundantDealloc->eraseFromParent();
if (RedundantSetFlag)
RedundantSetFlag->eraseFromParent();
}
bool SwiftStackPromotion::runOnFunction(Function &F) {
bool Changed = false;
Constant *allocFunc = nullptr;
Constant *initFunc = nullptr;
int maxSize = LimitOpt;
Module *M = F.getParent();
const DataLayout &DL = M->getDataLayout();
IntegerType *AllocType = nullptr;
IntegerType *IntType = nullptr;
SmallVector<CallInst *, 8> BufferAllocs;
SmallPtrSet<CallInst *, 8> PromotedAllocs;
SmallVector<CallInst *, 8> BufferDeallocs;
// Search for allocation- and deallocation-calls in the function.
for (BasicBlock &BB : F) {
for (auto Iter = BB.begin(); Iter != BB.end(); ) {
Instruction *I = &*Iter;
Iter++;
if (auto *AI = dyn_cast<AllocaInst>(I)) {
int Size = 1;
if (auto *SizeConst = dyn_cast<ConstantInt>(AI->getArraySize()))
Size = SizeConst->getValue().getSExtValue();
// Count the existing alloca sizes against the limit.
maxSize -= DL.getTypeAllocSize(AI->getAllocatedType()) * Size;
}
auto *CI = dyn_cast<CallInst>(I);
if (!CI)
continue;
Function *Callee = CI->getCalledFunction();
if (!Callee)
continue;
if (Callee->getName() == "swift_bufferAllocateOnStack") {
BufferAllocs.push_back(CI);
} else if (Callee->getName() == "swift_bufferDeallocateFromStack") {
BufferDeallocs.push_back(CI);
}
}
}
// First handle allocations.
for (CallInst *CI : BufferAllocs) {
Function *Callee = CI->getCalledFunction();
assert(Callee);
unsigned align = 0;
if (int size = canPromote(CI, align, maxSize)) {
maxSize -= size;
if (!AllocType) {
// Create the swift_initStackObject function and all required types.
AllocType = IntegerType::get(M->getContext(), 8);
IntType = IntegerType::get(M->getContext(), 32);
auto *OrigFT = Callee->getFunctionType();
auto *HeapObjTy = OrigFT->getReturnType();
auto *MetaDataTy = OrigFT->getParamType(0);
auto *NewFTy = FunctionType::get(HeapObjTy,
{MetaDataTy, HeapObjTy},
false);
initFunc = M->getOrInsertFunction("swift_initStackObject", NewFTy);
if (llvm::Triple(M->getTargetTriple()).isOSBinFormatCOFF())
if (auto *F = dyn_cast<llvm::Function>(initFunc))
F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
}
// Replace the allocation call with an alloca.
Value *AllocA = new AllocaInst(AllocType, ConstantInt::get(IntType, size),
align, "buffer", &*F.front().begin());
// And initialize it with a call to swift_initStackObject.
IRBuilder<> B(CI);
Value *casted = B.CreateBitCast(AllocA, CI->getType());
CallInst *initCall = B.CreateCall(initFunc,
{CI->getArgOperand(0), casted});
CI->replaceAllUsesWith(initCall);
CI->eraseFromParent();
PromotedAllocs.insert(initCall);
++NumBufferAllocsPromoted;
} else {
// We don't do stack promotion. Replace the call with a call to the
// regular swift_bufferAllocate.
if (!allocFunc) {
allocFunc = M->getOrInsertFunction("swift_bufferAllocate",
Callee->getFunctionType());
if (llvm::Triple(M->getTargetTriple()).isOSBinFormatCOFF())
if (auto *F = dyn_cast<llvm::Function>(allocFunc))
F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
}
CI->setCalledFunction(allocFunc);
}
Changed = true;
}
// After we made the decision for all allocations we can handle the
// deallocations.
for (CallInst *CI : BufferDeallocs) {
CallInst *Alloc = dyn_cast<CallInst>(CI->getArgOperand(0));
assert(Alloc && "alloc buffer obfuscated");
if (PromotedAllocs.count(Alloc)) {
removeRedundantRTCalls(CI);
IRBuilder<> B(CI);
// This has two purposes:
// 1. Tell LLVM the lifetime of the allocated stack memory.
// 2. Avoid tail-call optimization which may convert the call to the final
// release to a jump, which is done after the stack frame is
// destructed.
B.CreateLifetimeEnd(CI->getArgOperand(0));
}
// Other than inserting the end-of-lifetime, the deallocation is a no-op.
CI->eraseFromParent();
Changed = true;
}
return Changed;
}

View File

@@ -90,8 +90,7 @@ bool swift::ArraySemanticsCall::isValidSignature() {
return false;
StringRef AllocFuncName = AllocFn->getName();
if (AllocFuncName != "swift_bufferAllocate" &&
AllocFuncName != "swift_bufferAllocateOnStack")
if (AllocFuncName != "swift_bufferAllocate")
return false;
if (!hasOneNonDebugUse(AllocBufferAI))

View File

@@ -24,6 +24,7 @@ using namespace swift;
namespace {
/// Devirtualizes release instructions which are known to destruct the object.
///
/// This means, it replaces a sequence of
/// %x = alloc_ref [stack] $X
/// ...
@@ -37,10 +38,6 @@ namespace {
/// %a = apply %d(%x)
/// dealloc_ref [stack] %x
///
/// It also works for array buffers, where the allocation/deallocation is done
/// by calls to the swift_bufferAllocateOnStack/swift_bufferDeallocateFromStack
/// functions.
///
/// The optimization is only done for stack promoted objects because they are
/// known to have no associated objects (which are not explicitly released
/// in the deinit method).
@@ -57,10 +54,6 @@ private:
bool devirtualizeReleaseOfObject(SILInstruction *ReleaseInst,
DeallocRefInst *DeallocInst);
/// Devirtualize releases of swift objects.
bool devirtualizeReleaseOfBuffer(SILInstruction *ReleaseInst,
ApplyInst *DeallocCall);
/// Replace the release-instruction \p ReleaseInst with an explicit call to
/// the deallocating destructor of \p AllocType for \p object.
bool createDeallocCall(SILType AllocType, SILInstruction *ReleaseInst,
@@ -91,11 +84,6 @@ void ReleaseDevirtualizer::run() {
LastRelease = nullptr;
continue;
}
if (auto *AI = dyn_cast<ApplyInst>(&I)) {
Changed |= devirtualizeReleaseOfBuffer(LastRelease, AI);
LastRelease = nullptr;
continue;
}
}
if (isa<ReleaseValueInst>(&I) ||
@@ -139,51 +127,6 @@ devirtualizeReleaseOfObject(SILInstruction *ReleaseInst,
return createDeallocCall(AllocType, ReleaseInst, ARI);
}
bool ReleaseDevirtualizer::
devirtualizeReleaseOfBuffer(SILInstruction *ReleaseInst,
ApplyInst *DeallocCall) {
DEBUG(llvm::dbgs() << " try to devirtualize " << *ReleaseInst);
// Is this a deallocation of a buffer?
SILFunction *DeallocFn = DeallocCall->getReferencedFunction();
if (!DeallocFn || DeallocFn->getName() != "swift_bufferDeallocateFromStack")
return false;
// Is the deallocation call paired with an allocation call?
ApplyInst *AllocAI = dyn_cast<ApplyInst>(DeallocCall->getArgument(0));
if (!AllocAI || AllocAI->getNumArguments() < 1)
return false;
SILFunction *AllocFunc = AllocAI->getReferencedFunction();
if (!AllocFunc || AllocFunc->getName() != "swift_bufferAllocateOnStack")
return false;
// Can we find the buffer type which is allocated? It's metatype is passed
// as first argument to the allocation function.
auto *IEMTI = dyn_cast<InitExistentialMetatypeInst>(AllocAI->getArgument(0));
if (!IEMTI)
return false;
SILType MType = IEMTI->getOperand()->getType();
auto *MetaType = MType.getSwiftRValueType()->getAs<AnyMetatypeType>();
if (!MetaType)
return false;
// Is the allocated buffer a class type? This should always be the case.
auto *ClType = MetaType->getInstanceType()->getAs<BoundGenericClassType>();
if (!ClType)
return false;
// Does the last release really release the allocated buffer?
SILValue rcRoot = RCIA->getRCIdentityRoot(ReleaseInst->getOperand(0));
if (rcRoot != AllocAI)
return false;
SILType SILClType = SILType::getPrimitiveObjectType(CanType(ClType));
return createDeallocCall(SILClType, ReleaseInst, AllocAI);
}
bool ReleaseDevirtualizer::createDeallocCall(SILType AllocType,
SILInstruction *ReleaseInst,
SILValue object) {

View File

@@ -27,23 +27,10 @@ STATISTIC(NumStackPromoted, "Number of objects promoted to the stack");
using namespace swift;
/// Promotes heap allocated objects to the stack.
/// Following types of allocations are handled:
/// *) alloc_ref instructions of native swift classes: if promoted, the [stack]
/// attribute is set in the alloc_ref and a dealloc_ref [stack] is inserted
/// at the end of the object's lifetime.
/// *) Array buffers which are allocated by a call to swift_bufferAllocate: if
/// promoted the swift_bufferAllocate call is replaced by a call to
/// swift_bufferAllocateOnStack and a call to swift_bufferDeallocateFromStack
/// is inserted at the end of the buffer's lifetime.
/// Those calls are lowered by the LLVM SwiftStackPromotion pass.
/// TODO: This is a terrible hack, but necessary because we need constant
/// size and alignment for the final stack promotion decision. The arguments
/// to swift_bufferAllocate in SIL are not constant because they depend on
/// the not-yet-evaluatable sizeof and alignof builtins. Therefore we need
/// LLVM's constant propagation prior to deciding on stack promotion.
/// The solution to this problem is that we need native support for tail-
/// allocated arrays in SIL so that we can do the array buffer allocations
/// with alloc_ref instructions.
///
/// It handles alloc_ref instructions of native swift classes: if promoted,
/// the [stack] attribute is set in the alloc_ref and a dealloc_ref [stack] is
/// inserted at the end of the object's lifetime.
class StackPromoter {
// Some analysis we need.
@@ -68,14 +55,6 @@ class StackPromoter {
bool PostDomTreeValid;
// Pseudo-functions for (de-)allocating array buffers on the stack.
SILFunction *BufferAllocFunc = nullptr;
SILFunction *BufferDeallocFunc = nullptr;
bool ChangedInsts = false;
bool ChangedCalls = false;
/// Worklist for visiting all blocks.
class WorkListType {
/// The nesting depth of stack allocation instructions for each block.
@@ -124,22 +103,14 @@ class StackPromoter {
};
/// Tries to promote the allocation \p AI.
void tryPromoteAlloc(SILInstruction *AI);
/// Creates the external declaration for swift_bufferAllocateOnStack.
SILFunction *getBufferAllocFunc(SILFunction *OrigFunc,
SILLocation Loc);
/// Creates the external declaration for swift_bufferDeallocateFromStack.
SILFunction *getBufferDeallocFunc(SILFunction *OrigFunc,
SILLocation Loc);
bool tryPromoteAlloc(AllocRefInst *ARI);
/// Returns true if the allocation \p AI can be promoted.
/// In this case it sets the \a DeallocInsertionPoint to the instruction
/// where the deallocation must be inserted.
/// It optionally also sets \a AllocInsertionPoint in case the allocation
/// instruction must be moved to another place.
bool canPromoteAlloc(SILInstruction *AI,
bool canPromoteAlloc(AllocRefInst *ARI,
SILInstruction *&AllocInsertionPoint,
SILInstruction *&DeallocInsertionPoint);
@@ -199,40 +170,15 @@ public:
F(F), ConGraph(ConGraph), DT(DT), EA(EA), PostDomTree(true),
PostDomTreeValid(false) { }
/// What did the optimization change?
enum class ChangeState {
None,
Insts,
Calls
};
SILFunction *getFunction() const { return F; }
/// The main entry point for the optimization.
ChangeState promote();
///
/// Returns true if some changes were made.
bool promote();
};
/// Returns true if instruction \p I is an allocation we can handle.
static bool isPromotableAllocInst(SILInstruction *I) {
// Check for swift object allocation.
if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
if (!ARI->isObjC() && !ARI->canAllocOnStack())
return true;
return false;
}
// Check for array buffer allocation.
auto *AI = dyn_cast<ApplyInst>(I);
if (AI && AI->getNumArguments() == 3) {
if (auto *Callee = AI->getReferencedFunction()) {
if (Callee->getName() == "swift_bufferAllocate")
return true;
}
return false;
}
return false;
}
StackPromoter::ChangeState StackPromoter::promote() {
bool StackPromoter::promote() {
llvm::SetVector<SILBasicBlock *> ReachableBlocks;
@@ -252,6 +198,7 @@ StackPromoter::ChangeState StackPromoter::promote() {
ReachableBlocks.insert(Pred);
}
bool Changed = false;
// Search the whole function for stack promotable allocations.
for (SILBasicBlock &BB : *F) {
@@ -266,105 +213,34 @@ StackPromoter::ChangeState StackPromoter::promote() {
// The allocation instruction may be moved, so increment Iter prior to
// doing the optimization.
SILInstruction *I = &*Iter++;
if (isPromotableAllocInst(I)) {
tryPromoteAlloc(I);
if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
Changed |= tryPromoteAlloc(ARI);
}
}
}
if (ChangedCalls)
return ChangeState::Calls;
if (ChangedInsts)
return ChangeState::Insts;
return ChangeState::None;
return Changed;
}
void StackPromoter::tryPromoteAlloc(SILInstruction *I) {
bool StackPromoter::tryPromoteAlloc(AllocRefInst *ARI) {
SILInstruction *AllocInsertionPoint = nullptr;
SILInstruction *DeallocInsertionPoint = nullptr;
if (!canPromoteAlloc(I, AllocInsertionPoint, DeallocInsertionPoint))
return;
if (!canPromoteAlloc(ARI, AllocInsertionPoint, DeallocInsertionPoint))
return false;
DEBUG(llvm::dbgs() << "Promoted " << *I);
DEBUG(llvm::dbgs() << " in " << I->getFunction()->getName() << '\n');
DEBUG(llvm::dbgs() << "Promoted " << *ARI);
DEBUG(llvm::dbgs() << " in " << ARI->getFunction()->getName() << '\n');
NumStackPromoted++;
SILBuilder B(DeallocInsertionPoint);
if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
// It's an object allocation. We set the [stack] attribute in the alloc_ref.
ARI->setStackAllocatable();
if (AllocInsertionPoint)
ARI->moveBefore(AllocInsertionPoint);
/// And create a dealloc_ref [stack] at the end of the object's lifetime.
B.createDeallocRef(I->getLoc(), I, true);
ChangedInsts = true;
return;
}
if (auto *AI = dyn_cast<ApplyInst>(I)) {
assert(!AllocInsertionPoint && "can't move call to swift_bufferAlloc");
// It's an array buffer allocation.
auto *OldFRI = cast<FunctionRefInst>(AI->getCallee());
SILFunction *OldF = OldFRI->getReferencedFunction();
SILLocation loc = (OldF->hasLocation() ? OldF->getLocation() : AI->getLoc());
SILFunction *DeallocFun = getBufferDeallocFunc(OldF, loc);
// We insert a swift_bufferDeallocateFromStack at the end of the buffer's
// lifetime.
auto *DeallocFRI = B.createFunctionRef(OldFRI->getLoc(), DeallocFun);
B.createApply(loc, DeallocFRI, { AI }, false);
// And replace the call to swift_bufferAllocate with a call to
// swift_bufferAllocateOnStack.
B.setInsertionPoint(AI);
auto *AllocFRI = B.createFunctionRef(OldFRI->getLoc(),
getBufferAllocFunc(OldF, loc));
AI->setOperand(0, AllocFRI);
ChangedCalls = true;
return;
}
llvm_unreachable("unhandled allocation instruction");
}
SILFunction *StackPromoter::getBufferAllocFunc(SILFunction *OrigFunc,
SILLocation Loc) {
if (!BufferAllocFunc) {
BufferAllocFunc = OrigFunc->getModule().getOrCreateFunction(
Loc,
"swift_bufferAllocateOnStack",
OrigFunc->getLinkage(),
OrigFunc->getLoweredFunctionType(),
OrigFunc->isBare(), IsNotTransparent,
OrigFunc->isFragile());
}
return BufferAllocFunc;
}
SILFunction *StackPromoter::getBufferDeallocFunc(SILFunction *OrigFunc,
SILLocation Loc) {
if (!BufferDeallocFunc) {
SILModule &M = OrigFunc->getModule();
CanSILFunctionType OrigTy = OrigFunc->getLoweredFunctionType();
CanType ObjectTy = OrigTy->getSILResult().getSwiftRValueType();
// The function type for swift_bufferDeallocateFromStack.
CanSILFunctionType FunTy = SILFunctionType::get(
OrigTy->getGenericSignature(),
OrigTy->getExtInfo(),
OrigTy->getCalleeConvention(),
{ SILParameterInfo(ObjectTy, ParameterConvention::Direct_Guaranteed) },
ArrayRef<SILResultInfo>(),
OrigTy->getOptionalErrorResult(),
M.getASTContext());
BufferDeallocFunc = M.getOrCreateFunction(
Loc,
"swift_bufferDeallocateFromStack",
OrigFunc->getLinkage(),
FunTy,
OrigFunc->isBare(), IsNotTransparent, OrigFunc->isFragile());
}
return BufferDeallocFunc;
B.createDeallocRef(ARI->getLoc(), ARI, true);
return true;
}
namespace {
@@ -449,12 +325,15 @@ template <> struct GraphTraits<StackPromoter *>
}
bool StackPromoter::canPromoteAlloc(SILInstruction *AI,
bool StackPromoter::canPromoteAlloc(AllocRefInst *ARI,
SILInstruction *&AllocInsertionPoint,
SILInstruction *&DeallocInsertionPoint) {
if (ARI->isObjC() || ARI->canAllocOnStack())
return false;
AllocInsertionPoint = nullptr;
DeallocInsertionPoint = nullptr;
auto *Node = ConGraph->getNodeOrNull(AI, EA);
auto *Node = ConGraph->getNodeOrNull(ARI, EA);
if (!Node)
return false;
@@ -479,7 +358,7 @@ bool StackPromoter::canPromoteAlloc(SILInstruction *AI,
// Try to find the point where to insert the deallocation.
// This might need more than one try in case we need to move the allocation
// out of a stack-alloc-dealloc pair. See findDeallocPoint().
SILInstruction *StartInst = AI;
SILInstruction *StartInst = ARI;
for (;;) {
SILInstruction *RestartPoint = nullptr;
DeallocInsertionPoint = findDeallocPoint(StartInst, RestartPoint, Node,
@@ -490,11 +369,6 @@ bool StackPromoter::canPromoteAlloc(SILInstruction *AI,
if (!RestartPoint)
return false;
// Moving a buffer allocation call is not trivial because we would need to
// move all the parameter calculations as well. So we just don't do it.
if (!isa<AllocRefInst>(AI))
return false;
// Retry with moving the allocation up.
AllocInsertionPoint = RestartPoint;
StartInst = RestartPoint;
@@ -682,15 +556,8 @@ private:
SILFunction *F = getFunction();
if (auto *ConGraph = EA->getConnectionGraph(F)) {
StackPromoter promoter(F, ConGraph, DA->get(F), EA);
switch (promoter.promote()) {
case StackPromoter::ChangeState::None:
break;
case StackPromoter::ChangeState::Insts:
if (promoter.promote()) {
invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions);
break;
case StackPromoter::ChangeState::Calls:
invalidateAnalysis(SILAnalysis::InvalidationKind::CallsAndInstructions);
break;
}
}
}

View File

@@ -107,28 +107,6 @@ extern "C" HeapObject* swift_bufferAllocate(
alignMask);
}
/// \brief Another entrypoint for swift_bufferAllocate.
/// It is generated by the compiler in some corner cases, e.g. if a serialized
/// optimized module is imported into a non-optimized main module.
/// TODO: This is only a workaround. Remove this function as soon as we can
/// get rid of the llvm SwiftStackPromotion pass.
SWIFT_RUNTIME_EXPORT
extern "C" HeapObject* swift_bufferAllocateOnStack(
HeapMetadata const* bufferType, size_t size, size_t alignMask) {
return swift::SWIFT_RT_ENTRY_CALL(swift_allocObject)(bufferType, size,
alignMask);
}
/// \brief Called at the end of the lifetime of an object returned by
/// swift_bufferAllocateOnStack.
/// It is generated by the compiler in some corner cases, e.g. if a serialized
/// optimized module is imported into a non-optimized main module.
/// TODO: This is only a workaround. Remove this function as soon as we can
/// get rid of the llvm SwiftStackPromotion pass.
SWIFT_RUNTIME_EXPORT
extern "C" void swift_bufferDeallocateFromStack(HeapObject *) {
}
SWIFT_RUNTIME_EXPORT
extern "C" intptr_t swift_bufferHeaderSize() { return sizeof(HeapObject); }

View File

@@ -1,94 +0,0 @@
; RUN: %swift-llvm-opt -swift-stack-promotion -stack-promotion-limit=100 %s | %FileCheck %s
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.9"
%swift.type = type { i64 }
%objc_object = type opaque
%swift.refcounted = type { %swift.type*, i32, i32 }
%BufferStorageType = type <{ %swift.refcounted }>
; CHECK-LABEL: define{{( protected)?}} void @promote_buffer()
; CHECK: [[B:%.+]] = alloca i8, i32 48, align 8
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
; CHECK: [[BC:%.+]] = bitcast i8* [[B]] to %objc_object*
; CHECK: [[I:%.+]] = call %objc_object* @swift_initStackObject(%swift.type* [[M]], %objc_object* [[BC]])
; CHECK: [[BC2:%.+]] = bitcast %objc_object* [[I]] to i8*
; CHECK: call void @llvm.lifetime.end(i64 -1, i8* [[BC2]])
; CHECK: ret void
define void @promote_buffer() {
entry:
%0 = call %swift.type* @get_buffer_metadata()
%1 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %0, i64 48, i64 7)
call void @swift_bufferDeallocateFromStack(%objc_object* %1)
ret void
}
; CHECK-LABEL: define{{( protected)?}} void @promote_buffer_with_devirtualized_release()
; CHECK: [[B:%.+]] = alloca i8, i32 48, align 8
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
; CHECK: [[BC:%.+]] = bitcast i8* [[B]] to %objc_object*
; CHECK: [[I:%.+]] = call %objc_object* @swift_initStackObject(%swift.type* [[M]], %objc_object* [[BC]])
; CHECK-NOT: call
; CHECK: [[BC2:%.+]] = bitcast %objc_object* [[I]] to i8*
; CHECK-NOT: call
; CHECK: call void @llvm.lifetime.end(i64 -1, i8* [[BC2]])
; CHECK-NOT: call
; CHECK: ret void
define void @promote_buffer_with_devirtualized_release() {
entry:
%0 = call %swift.type* @get_buffer_metadata()
%1 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %0, i64 48, i64 7)
%2 = bitcast %objc_object* %1 to %BufferStorageType*
call void bitcast (void (%swift.refcounted*)* @swift_setDeallocating to void (%BufferStorageType*)*)(%BufferStorageType* %2)
%3 = bitcast %BufferStorageType* %2 to %swift.refcounted*
call void @swift_deallocClassInstance(%swift.refcounted* %3, i64 48, i64 7)
call void @swift_bufferDeallocateFromStack(%objc_object* %1)
ret void
}
; CHECK-LABEL: define{{( protected)?}} void @promote_buffer_with_devirtualized_release_and_non_trivial_deinit()
; CHECK: [[B:%.+]] = alloca i8, i32 48, align 8
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
; CHECK: [[BC:%.+]] = bitcast i8* [[B]] to %objc_object*
; CHECK: [[I:%.+]] = call %objc_object* @swift_initStackObject(%swift.type* [[M]], %objc_object* [[BC]])
; CHECK: [[BC2:%.+]] = bitcast %objc_object* [[I]] to %BufferStorageType
; CHECK-NEXT: call {{.*}}@swift_setDeallocating {{.*}}({{.*}} [[BC2]])
; CHECK-NEXT: call void @unknown_deinit(%BufferStorageType* [[BC2]])
; CHECK-NOT: call
; CHECK: [[BC3:%.+]] = bitcast %objc_object* [[I]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.end(i64 -1, i8* [[BC3]])
; CHECK-NOT: call
; CHECK: ret void
define void @promote_buffer_with_devirtualized_release_and_non_trivial_deinit() {
entry:
%0 = call %swift.type* @get_buffer_metadata()
%1 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %0, i64 48, i64 7)
%2 = bitcast %objc_object* %1 to %BufferStorageType*
call void bitcast (void (%swift.refcounted*)* @swift_setDeallocating to void (%BufferStorageType*)*)(%BufferStorageType* %2)
call void @unknown_deinit(%BufferStorageType* %2)
%3 = bitcast %BufferStorageType* %2 to %swift.refcounted*
call void @swift_deallocClassInstance(%swift.refcounted* %3, i64 48, i64 7)
call void @swift_bufferDeallocateFromStack(%objc_object* %1)
ret void
}
; CHECK-LABEL: define{{( protected)?}} void @dont_promote_buffer_exceeding_limit()
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
; CHECK: call %objc_object* @swift_bufferAllocate(%swift.type* [[M]], i64 48, i64 7)
; CHECK-NEXT: ret void
define void @dont_promote_buffer_exceeding_limit() {
entry:
%0 = alloca i8, i32 128, align 8
%1 = call %swift.type* @get_buffer_metadata()
%2 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %1, i64 48, i64 7)
call void @swift_bufferDeallocateFromStack(%objc_object* %2)
ret void
}
declare %swift.type* @get_buffer_metadata()
declare %objc_object* @swift_bufferAllocateOnStack(%swift.type*, i64, i64)
declare void @swift_bufferDeallocateFromStack(%objc_object*)
declare void @swift_setDeallocating(%swift.refcounted*)
declare void @swift_deallocClassInstance(%swift.refcounted*, i64, i64)
declare void @unknown_deinit(%BufferStorageType*)

View File

@@ -94,151 +94,6 @@ bb0:
}
class MyArrayStorage<Element> {
}
struct WrongStorage {
}
// CHECK-LABEL: sil @devirtualize_buffer
// CHECK: [[A:%[0-9]+]] = function_ref @swift_bufferAllocateOnStack
// CHECK: [[B:%[0-9]+]] = apply [[A]]
// CHECK-NOT: strong_release
// CHECK: unchecked_ref_cast
// CHECK: [[C:%[0-9]+]] = unchecked_ref_cast [[B]]
// CHECK: set_deallocating [[C]]
// CHECK: [[D:%[0-9]+]] = function_ref @_TFC4test14MyArrayStorageD
// CHECK: apply [[D]]<Int64>([[C]])
// CHECK: [[X:%[0-9]+]] = function_ref @swift_bufferDeallocateFromStack
// CHECK: apply [[X]]([[B]])
// CHECK: return
sil @devirtualize_buffer : $@convention(thin) () -> () {
bb0:
%3 = integer_literal $Builtin.Int64, 7
%4 = struct $Int64 (%3 : $Builtin.Int64)
%mt = metatype $@thick MyArrayStorage<Int64>.Type // user: %5
%5 = init_existential_metatype %mt : $@thick MyArrayStorage<Int64>.Type, $@thick AnyObject.Type
%66 = function_ref @swift_bufferAllocateOnStack : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%67 = apply %66(%5, %4, %4) : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%95 = unchecked_ref_cast %67 : $AnyObject to $Builtin.BridgeObject
strong_release %95 : $Builtin.BridgeObject
%119 = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%120 = apply %119(%67) : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil @dont_devirtualize_buffer_with_unknown_release
// CHECK: function_ref @swift_bufferAllocateOnStack
// CHECK-NEXT: apply
// CHECK-NOT: apply
// CHECK: strong_release
// CHECK-NEXT: strong_release
// CHECK-NOT: apply
// CHECK: function_ref @swift_bufferDeallocateFromStack
// CHECK-NEXT: apply
// CHECK: return
sil @dont_devirtualize_buffer_with_unknown_release : $@convention(thin) (@owned Builtin.BridgeObject) -> () {
bb0(%0 : $Builtin.BridgeObject):
%3 = integer_literal $Builtin.Int64, 7
%4 = struct $Int64 (%3 : $Builtin.Int64)
%mt = metatype $@thick MyArrayStorage<Int64>.Type // user: %5
%5 = init_existential_metatype %mt : $@thick MyArrayStorage<Int64>.Type, $@thick AnyObject.Type
%66 = function_ref @swift_bufferAllocateOnStack : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%67 = apply %66(%5, %4, %4) : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%95 = unchecked_ref_cast %67 : $AnyObject to $Builtin.BridgeObject
strong_release %95 : $Builtin.BridgeObject
strong_release %0 : $Builtin.BridgeObject
%119 = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%120 = apply %119(%67) : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil @dont_devirtualize_buffer_with_unknown_allocation
// CHECK: function_ref @swift_bufferAllocate
// CHECK-NEXT: apply
// CHECK-NOT: apply
// CHECK: strong_release
// CHECK-NOT: apply
// CHECK: function_ref @swift_bufferDeallocateFromStack
// CHECK-NEXT: apply
// CHECK: return
sil @dont_devirtualize_buffer_with_unknown_allocation : $@convention(thin) () -> () {
bb0:
%3 = integer_literal $Builtin.Int64, 7
%4 = struct $Int64 (%3 : $Builtin.Int64)
%mt = metatype $@thick MyArrayStorage<Int64>.Type // user: %5
%5 = init_existential_metatype %mt : $@thick MyArrayStorage<Int64>.Type, $@thick AnyObject.Type
%66 = function_ref @swift_bufferAllocate: $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%67 = apply %66(%5, %4, %4) : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%95 = unchecked_ref_cast %67 : $AnyObject to $Builtin.BridgeObject
strong_release %95 : $Builtin.BridgeObject
%119 = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%120 = apply %119(%67) : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil @dont_devirtualize_buffer_with_unknown_metatype
// CHECK: function_ref @swift_bufferAllocateOnStack
// CHECK-NEXT: apply
// CHECK-NOT: apply
// CHECK: strong_release
// CHECK-NOT: apply
// CHECK: function_ref @swift_bufferDeallocateFromStack
// CHECK-NEXT: apply
// CHECK: return
sil @dont_devirtualize_buffer_with_unknown_metatype : $@convention(thin) (@thick AnyObject.Type) -> () {
bb0(%0 : $@thick AnyObject.Type):
%3 = integer_literal $Builtin.Int64, 7
%4 = struct $Int64 (%3 : $Builtin.Int64)
%66 = function_ref @swift_bufferAllocateOnStack: $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%67 = apply %66(%0, %4, %4) : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%95 = unchecked_ref_cast %67 : $AnyObject to $Builtin.BridgeObject
strong_release %95 : $Builtin.BridgeObject
%119 = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%120 = apply %119(%67) : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil @dont_devirtualize_buffer_with_wrong_metatype
// CHECK: function_ref @swift_bufferAllocateOnStack
// CHECK-NEXT: apply
// CHECK-NOT: apply
// CHECK: strong_release
// CHECK-NOT: apply
// CHECK: function_ref @swift_bufferDeallocateFromStack
// CHECK-NEXT: apply
// CHECK: return
sil @dont_devirtualize_buffer_with_wrong_metatype : $@convention(thin) () -> () {
bb0:
%3 = integer_literal $Builtin.Int64, 7
%4 = struct $Int64 (%3 : $Builtin.Int64)
%mt = metatype $@thick WrongStorage.Type // user: %5
%5 = init_existential_metatype %mt : $@thick WrongStorage.Type, $@thick AnyObject.Type
%66 = function_ref @swift_bufferAllocateOnStack : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%67 = apply %66(%5, %4, %4) : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
%95 = unchecked_ref_cast %67 : $AnyObject to $Builtin.BridgeObject
strong_release %95 : $Builtin.BridgeObject
%119 = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%120 = apply %119(%67) : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
%r = tuple ()
return %r : $()
}
sil hidden_external @swift_bufferAllocate : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
sil hidden_external @swift_bufferAllocateOnStack : $@convention(thin) (@thick AnyObject.Type, Int64, Int64) -> @owned AnyObject
sil hidden_external @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
sil @unknown_func : $@convention(thin) () -> ()
// test.B.__deallocating_deinit
@@ -277,39 +132,3 @@ sil_vtable B {
#B.init!initializer.1: _TFC4test1BcfT_S0_ // test.B.init () -> test.B
}
// test.MyArrayStorage.__deallocating_deinit
sil hidden @_TFC4test14MyArrayStorageD : $@convention(method) <Element> (@owned MyArrayStorage<Element>) -> () {
// %0 // users: %1, %3
bb0(%0 : $MyArrayStorage<Element>):
debug_value %0 : $MyArrayStorage<Element>, let, name "self" // id: %1
// function_ref test.MyArrayStorage.deinit
%2 = function_ref @_TFC4test14MyArrayStoraged : $@convention(method) <τ_0_0> (@guaranteed MyArrayStorage<τ_0_0>) -> @owned Builtin.NativeObject // user: %3
%3 = apply %2<Element>(%0) : $@convention(method) <τ_0_0> (@guaranteed MyArrayStorage<τ_0_0>) -> @owned Builtin.NativeObject // user: %4
%4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $MyArrayStorage<Element> // user: %5
dealloc_ref %4 : $MyArrayStorage<Element> // id: %5
%6 = tuple () // user: %7
return %6 : $() // id: %7
}
// test.MyArrayStorage.deinit
sil hidden @_TFC4test14MyArrayStoraged : $@convention(method) <Element> (@guaranteed MyArrayStorage<Element>) -> @owned Builtin.NativeObject {
// %0 // users: %1, %2
bb0(%0 : $MyArrayStorage<Element>):
debug_value %0 : $MyArrayStorage<Element>, let, name "self" // id: %1
%2 = unchecked_ref_cast %0 : $MyArrayStorage<Element> to $Builtin.NativeObject // user: %3
return %2 : $Builtin.NativeObject // id: %3
}
// test.MyArrayStorage.init () -> test.MyArrayStorage<A>
sil hidden @_TFC4test14MyArrayStoragecfT_GS0_x_ : $@convention(method) <Element> (@owned MyArrayStorage<Element>) -> @owned MyArrayStorage<Element> {
// %0 // users: %1, %2
bb0(%0 : $MyArrayStorage<Element>):
debug_value %0 : $MyArrayStorage<Element>, let, name "self" // id: %1
return %0 : $MyArrayStorage<Element> // id: %2
}
sil_vtable MyArrayStorage {
#MyArrayStorage.deinit!deallocator: _TFC4test14MyArrayStorageD // test.MyArrayStorage.__deallocating_deinit
#MyArrayStorage.init!initializer.1: _TFC4test14MyArrayStoragecfT_GS0_x_ // test.MyArrayStorage.init () -> test.MyArrayStorage<A>
}

View File

@@ -234,7 +234,6 @@ int main(int argc, char **argv) {
initializeSwiftRCIdentityPass(Registry);
initializeSwiftARCOptPass(Registry);
initializeSwiftARCContractPass(Registry);
initializeSwiftStackPromotionPass(Registry);
initializeInlineTreePrinterPass(Registry);
initializeSwiftMergeFunctionsPass(Registry);