mirror of
https://github.com/apple/swift.git
synced 2025-12-21 12:14:44 +01:00
Revert "Add a stack promotion optimization."
This reverts commit 0dd045ca04.
StackPromotion crashes when compiling MatMult in stdlib-asserts configuration
This commit is contained in:
@@ -88,15 +88,6 @@ namespace swift {
|
||||
SwiftARCContract() : llvm::FunctionPass(ID) {}
|
||||
};
|
||||
|
||||
class SwiftStackPromotion : public llvm::FunctionPass {
|
||||
virtual void getAnalysisUsage(llvm::AnalysisUsage &AU) const override;
|
||||
virtual bool runOnFunction(llvm::Function &F) override;
|
||||
public:
|
||||
static char ID;
|
||||
SwiftStackPromotion() : llvm::FunctionPass(ID) {}
|
||||
};
|
||||
|
||||
|
||||
} // end namespace swift
|
||||
|
||||
#endif
|
||||
|
||||
@@ -22,13 +22,11 @@ namespace llvm {
|
||||
void initializeSwiftRCIdentityPass(PassRegistry &);
|
||||
void initializeSwiftARCOptPass(PassRegistry &);
|
||||
void initializeSwiftARCContractPass(PassRegistry &);
|
||||
void initializeSwiftStackPromotionPass(PassRegistry &);
|
||||
}
|
||||
|
||||
namespace swift {
|
||||
llvm::FunctionPass *createSwiftARCOptPass();
|
||||
llvm::FunctionPass *createSwiftARCContractPass();
|
||||
llvm::FunctionPass *createSwiftStackPromotionPass();
|
||||
llvm::ImmutablePass *createSwiftAAWrapperPass();
|
||||
llvm::ImmutablePass *createSwiftRCIdentityPass();
|
||||
} // end namespace swift
|
||||
|
||||
@@ -166,8 +166,6 @@ PASS(SplitAllCriticalEdges, "split-critical-edges",
|
||||
"Split all critical edges")
|
||||
PASS(SplitNonCondBrCriticalEdges, "split-non-cond_br-critical-edges",
|
||||
"Split all critical edges that do not involve cond_br")
|
||||
PASS(StackPromotion, "stack-promotion",
|
||||
"Promote allocated objects on the stack")
|
||||
PASS(StripDebugInfo, "strip-debug-info",
|
||||
"Strip debug info")
|
||||
PASS(SwiftArrayOpts, "array-specialize",
|
||||
|
||||
@@ -74,12 +74,6 @@ static void addSwiftContractPass(const PassManagerBuilder &Builder,
|
||||
PM.add(createSwiftARCContractPass());
|
||||
}
|
||||
|
||||
static void addSwiftStackPromotionPass(const PassManagerBuilder &Builder,
|
||||
PassManagerBase &PM) {
|
||||
if (Builder.OptLevel > 0)
|
||||
PM.add(createSwiftStackPromotionPass());
|
||||
}
|
||||
|
||||
// FIXME: Copied from clang/lib/CodeGen/CGObjCMac.cpp.
|
||||
// These should be moved to a single definition shared by clang and swift.
|
||||
enum ImageInfoFlags {
|
||||
@@ -131,9 +125,6 @@ void swift::performLLVMOptimizations(IRGenOptions &Opts, llvm::Module *Module,
|
||||
llvm::createAlwaysInlinerPass(/*insertlifetime*/false);
|
||||
}
|
||||
|
||||
PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
|
||||
addSwiftStackPromotionPass);
|
||||
|
||||
// If the optimizer is enabled, we run the ARCOpt pass in the scalar optimizer
|
||||
// and the Contract pass as late as possible.
|
||||
if (!Opts.DisableLLVMARCOpts) {
|
||||
|
||||
@@ -3,7 +3,6 @@ add_swift_library(swiftLLVMPasses
|
||||
LLVMSwiftRCIdentity.cpp
|
||||
LLVMARCOpts.cpp
|
||||
LLVMARCContract.cpp
|
||||
LLVMStackPromotion.cpp
|
||||
)
|
||||
|
||||
add_dependencies(swiftLLVMPasses LLVMAnalysis)
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
//===--- LLVMStackPromotion.cpp - Replace allocation calls with alloca ----===//
|
||||
//
|
||||
// This source file is part of the Swift.org open source project
|
||||
//
|
||||
// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
|
||||
// Licensed under Apache License v2.0 with Runtime Library Exception
|
||||
//
|
||||
// See http://swift.org/LICENSE.txt for license information
|
||||
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This pass performs the last part of stack promotion for array buffers.
|
||||
// The SIL StackPromotion pass generates a pair of swift_bufferAllocateOnStack
|
||||
// and swift_bufferDeallocateFromStack calls. In this pass the final decision
|
||||
// is made if stack promotion should be done. If yes, the
|
||||
// swift_bufferAllocateOnStack is replace with an alloca plus a call to
|
||||
// swift_initStackObject and the swift_bufferDeallocateFromStack is removed.
|
||||
// TODO: This is a hack and eventually this pass should not be required at all.
|
||||
// For details see the comments for the SIL StackPromoter.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "swift-stack-promotion"
|
||||
#include "swift/LLVMPasses/Passes.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
using namespace llvm;
|
||||
using namespace swift;
|
||||
|
||||
STATISTIC(NumBufferAllocsPromoted,
|
||||
"Number of swift_bufferAllocate promoted");
|
||||
|
||||
cl::opt<int> LimitOpt("stack-promotion-limit",
|
||||
llvm::cl::init(1024), llvm::cl::Hidden);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SwiftStackPromotion Pass
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
char SwiftStackPromotion::ID = 0;
|
||||
|
||||
INITIALIZE_PASS_BEGIN(SwiftStackPromotion,
|
||||
"swift-stack-promotion", "Swift stack promotion pass",
|
||||
false, false)
|
||||
INITIALIZE_PASS_END(SwiftStackPromotion,
|
||||
"swift-stack-promotion", "Swift stack promotion pass",
|
||||
false, false)
|
||||
|
||||
llvm::FunctionPass *swift::createSwiftStackPromotionPass() {
|
||||
initializeSwiftStackPromotionPass(*llvm::PassRegistry::getPassRegistry());
|
||||
return new SwiftStackPromotion();
|
||||
}
|
||||
|
||||
void SwiftStackPromotion::getAnalysisUsage(llvm::AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
}
|
||||
|
||||
/// Checks if we can promote a buffer and returns the size of the buffer.
|
||||
/// The \a align parameter is set to the alignment of the buffer.
|
||||
int canPromote(CallInst *CI, unsigned &align, int maxSize) {
|
||||
if (CI->getNumArgOperands() != 3)
|
||||
return 0;
|
||||
|
||||
auto *SizeConst = dyn_cast<ConstantInt>(CI->getArgOperand(1));
|
||||
if (!SizeConst)
|
||||
return 0;
|
||||
|
||||
auto *AlignMaskConst = dyn_cast<ConstantInt>(CI->getArgOperand(2));
|
||||
if (!AlignMaskConst)
|
||||
return 0;
|
||||
|
||||
int size = SizeConst->getValue().getSExtValue();
|
||||
if (size > maxSize)
|
||||
return 0;
|
||||
|
||||
align = AlignMaskConst->getValue().getZExtValue() + 1;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
bool SwiftStackPromotion::runOnFunction(Function &F) {
|
||||
|
||||
bool Changed = false;
|
||||
Constant *allocFunc = nullptr;
|
||||
Constant *initFunc = nullptr;
|
||||
int maxSize = LimitOpt;
|
||||
Module *M = F.getParent();
|
||||
const DataLayout &DL = M->getDataLayout();
|
||||
IntegerType *AllocType = nullptr;
|
||||
IntegerType *IntType = nullptr;
|
||||
|
||||
SmallVector<CallInst *, 8> BufferAllocs;
|
||||
SmallPtrSet<CallInst *, 8> PromotedAllocs;
|
||||
SmallVector<CallInst *, 8> BufferDeallocs;
|
||||
|
||||
// Search for allocation- and deallocation-calls in the function.
|
||||
for (BasicBlock &BB : F) {
|
||||
for (auto Iter = BB.begin(); Iter != BB.end(); ) {
|
||||
Instruction *I = Iter;
|
||||
Iter++;
|
||||
|
||||
if (auto *AI = dyn_cast<AllocaInst>(I)) {
|
||||
int Size = 1;
|
||||
if (auto *SizeConst = dyn_cast<ConstantInt>(AI->getArraySize()))
|
||||
Size = SizeConst->getValue().getSExtValue();
|
||||
|
||||
// Count the existing alloca sizes against the limit.
|
||||
maxSize -= DL.getTypeAllocSize(AI->getAllocatedType()) * Size;
|
||||
}
|
||||
|
||||
auto *CI = dyn_cast<CallInst>(I);
|
||||
if (!CI)
|
||||
continue;
|
||||
|
||||
Function *Callee = CI->getCalledFunction();
|
||||
if (!Callee)
|
||||
continue;
|
||||
|
||||
if (Callee->getName() == "swift_bufferAllocateOnStack") {
|
||||
BufferAllocs.push_back(CI);
|
||||
} else if (Callee->getName() == "swift_bufferDeallocateFromStack") {
|
||||
BufferDeallocs.push_back(CI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// First handle allocations.
|
||||
for (CallInst *CI : BufferAllocs) {
|
||||
Function *Callee = CI->getCalledFunction();
|
||||
assert(Callee);
|
||||
unsigned align = 0;
|
||||
if (int size = canPromote(CI, align, maxSize)) {
|
||||
maxSize -= size;
|
||||
if (!AllocType) {
|
||||
// Create the swift_initStackObject function and all required types.
|
||||
AllocType = IntegerType::get(M->getContext(), 8);
|
||||
IntType = IntegerType::get(M->getContext(), 32);
|
||||
auto *OrigFT = Callee->getFunctionType();
|
||||
auto *HeapObjTy = OrigFT->getReturnType();
|
||||
auto *MetaDataTy = OrigFT->getParamType(0);
|
||||
auto *NewFTy = FunctionType::get(HeapObjTy,
|
||||
{MetaDataTy, HeapObjTy},
|
||||
false);
|
||||
initFunc = M->getOrInsertFunction("swift_initStackObject", NewFTy);
|
||||
}
|
||||
// Replace the allocation call with an alloca.
|
||||
Value *AllocA = new AllocaInst(AllocType,
|
||||
ConstantInt::get(IntType, size),
|
||||
align, "buffer", F.front().begin());
|
||||
// And initialize it with a call to swift_initStackObject.
|
||||
IRBuilder<> B(CI);
|
||||
Value *casted = B.CreateBitCast(AllocA, CI->getType());
|
||||
CallInst *initCall = B.CreateCall(initFunc,
|
||||
{CI->getArgOperand(0), casted});
|
||||
CI->replaceAllUsesWith(initCall);
|
||||
CI->eraseFromParent();
|
||||
PromotedAllocs.insert(initCall);
|
||||
++NumBufferAllocsPromoted;
|
||||
} else {
|
||||
// We don't do stack promotion. Replace the call with a call to the
|
||||
// regular swift_bufferAllocate.
|
||||
if (!allocFunc) {
|
||||
allocFunc = M->getOrInsertFunction("swift_bufferAllocate",
|
||||
Callee->getFunctionType());
|
||||
}
|
||||
CI->setCalledFunction(allocFunc);
|
||||
}
|
||||
Changed = true;
|
||||
}
|
||||
|
||||
// After we made the decision for all allocations we can handle the
|
||||
// deallocations.
|
||||
for (CallInst *CI : BufferDeallocs) {
|
||||
CallInst *Alloc = dyn_cast<CallInst>(CI->getArgOperand(0));
|
||||
assert(Alloc && "alloc buffer obfuscated");
|
||||
if (PromotedAllocs.count(Alloc)) {
|
||||
IRBuilder<> B(CI);
|
||||
// This has two purposes:
|
||||
// 1. Tell LLVM the lifetime of the allocated stack memory.
|
||||
// 2. Avoid tail-call optimization which may convert the call to the final
|
||||
// release to a jump, which is done after the stack frame is
|
||||
// destructed.
|
||||
B.CreateLifetimeEnd(CI->getArgOperand(0));
|
||||
}
|
||||
// Other than inserting the end-of-lifetime, the deallocation is a no-op.
|
||||
CI->eraseFromParent();
|
||||
Changed = true;
|
||||
}
|
||||
return Changed;
|
||||
}
|
||||
@@ -49,6 +49,5 @@ add_swift_library(swiftSILPasses
|
||||
SimplifyCFG.cpp
|
||||
Sink.cpp
|
||||
SpeculativeDevirtualizer.cpp
|
||||
StackPromotion.cpp
|
||||
UsePrespecialized.cpp
|
||||
LINK_LIBRARIES swiftSILPassesUtils swiftSILAnalysis)
|
||||
|
||||
@@ -255,9 +255,6 @@ void swift::runSILOptimizationPasses(SILModule &Module) {
|
||||
PM.addGlobalPropertyOpt();
|
||||
PM.addUpdateEscapeAnalysis();
|
||||
|
||||
// Do the first stack promotion on high-level SIL.
|
||||
PM.addStackPromotion();
|
||||
|
||||
PM.runOneIteration();
|
||||
PM.resetAndRemoveTransformations();
|
||||
|
||||
@@ -292,9 +289,6 @@ void swift::runSILOptimizationPasses(SILModule &Module) {
|
||||
|
||||
PM.addUpdateEscapeAnalysis();
|
||||
|
||||
// Do the first stack promotion on low-level SIL.
|
||||
PM.addStackPromotion();
|
||||
|
||||
// Speculate virtual call targets.
|
||||
PM.addSpeculativeDevirtualization();
|
||||
|
||||
|
||||
@@ -1,468 +0,0 @@
|
||||
//===------- StackPromotion.cpp - Promotes allocations to the stack -------===//
|
||||
//
|
||||
// This source file is part of the Swift.org open source project
|
||||
//
|
||||
// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
|
||||
// Licensed under Apache License v2.0 with Runtime Library Exception
|
||||
//
|
||||
// See http://swift.org/LICENSE.txt for license information
|
||||
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "stack-promotion"
|
||||
#include "swift/SILPasses/Passes.h"
|
||||
#include "swift/SILPasses/Transforms.h"
|
||||
#include "swift/SILAnalysis/EscapeAnalysis.h"
|
||||
#include "swift/SILAnalysis/DominanceAnalysis.h"
|
||||
#include "swift/SIL/SILArgument.h"
|
||||
#include "swift/SIL/SILBuilder.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
|
||||
STATISTIC(NumStackPromoted, "Number of objects promoted to the stack");
|
||||
|
||||
using namespace swift;
|
||||
|
||||
/// Promotes heap allocated objects to the stack.
|
||||
/// Following types of allocations are handled:
|
||||
/// *) alloc_ref instructions of native swift classes: if promoted, the [stack]
|
||||
/// attribute is set in the alloc_ref and a dealloc_ref [stack] is inserted
|
||||
/// at the end of the object's lifetime.
|
||||
/// *) Array buffers which are allocated by a call to swift_bufferAllocate: if
|
||||
/// promoted the swift_bufferAllocate call is replaced by a call to
|
||||
/// swift_bufferAllocateOnStack and a call to swift_bufferDeallocateFromStack
|
||||
/// is inserted at the end of the buffer's lifetime.
|
||||
/// Those calls are lowered by the LLVM SwiftStackPromotion pass.
|
||||
/// TODO: This is a terrible hack, but necessary because we need constant
|
||||
/// size and alignment for the final stack promotion decision. The arguments
|
||||
/// to swift_bufferAllocate in SIL are not constant because they depend on
|
||||
/// the not-yet-evaluatable sizeof and alignof builtins. Therefore we need
|
||||
/// LLVM's contant propagation prior to deciding on stack promotion.
|
||||
/// The solution to this problem is that we need native support for tail-
|
||||
/// allocated arrays in SIL so that we can do the array buffer allocations
|
||||
/// with alloc_ref instructions.
|
||||
class StackPromoter {
|
||||
|
||||
// Some analysis we need.
|
||||
|
||||
EscapeAnalysis::ConnectionGraph *ConGraph;
|
||||
DominanceInfo *DT;
|
||||
PostDominanceInfo *PDT;
|
||||
|
||||
// Pseudo-functions for (de-)allocating array buffers on the stack.
|
||||
|
||||
SILFunction *BufferAllocFunc = nullptr;
|
||||
SILFunction *BufferDeallocFunc = nullptr;
|
||||
|
||||
bool ChangedInsts = false;
|
||||
bool ChangedCalls = false;
|
||||
|
||||
/// Worklist for visiting all blocks.
|
||||
class WorkListType {
|
||||
/// The nesting depth of stack allocation instructions for each block.
|
||||
/// A value of -1 means: not known yet.
|
||||
llvm::DenseMap<SILBasicBlock *, int> Block2StackDepth;
|
||||
|
||||
/// The work list of not yet handled blocks.
|
||||
llvm::SmallVector<SILBasicBlock *, 8> ToHandle;
|
||||
|
||||
public:
|
||||
bool empty() const { return ToHandle.empty(); }
|
||||
|
||||
SILBasicBlock *pop_back_val() { return ToHandle.pop_back_val(); }
|
||||
|
||||
/// Insert a block into the worklist and set its stack depth.
|
||||
void insert(SILBasicBlock *BB, int StackDepth) {
|
||||
auto Iter = Block2StackDepth.find(BB);
|
||||
if (Iter != Block2StackDepth.end()) {
|
||||
// We already handled the block.
|
||||
assert(StackDepth >= 0);
|
||||
if (Iter->second < 0) {
|
||||
// Update the stack depth if we didn't set it yet for the block.
|
||||
Iter->second = StackDepth;
|
||||
} else {
|
||||
assert(Iter->second == StackDepth &&
|
||||
"inconsistent stack depth at a CFG merge point");
|
||||
}
|
||||
} else {
|
||||
Block2StackDepth[BB] = StackDepth;
|
||||
ToHandle.push_back(BB);
|
||||
}
|
||||
}
|
||||
|
||||
int getStackDepth(SILBasicBlock *BB) {
|
||||
assert(Block2StackDepth.find(BB) != Block2StackDepth.end());
|
||||
int Depth = Block2StackDepth.lookup(BB);
|
||||
assert(Depth >= 0 && "EndBlock not reachable from StartBlock");
|
||||
return Depth;
|
||||
}
|
||||
};
|
||||
|
||||
/// Tries to promote the allocation \p AI.
|
||||
void tryPromoteAlloc(SILInstruction *AI);
|
||||
|
||||
/// Creates the external declaration for swift_bufferAllocateOnStack.
|
||||
SILFunction *getBufferAllocFunc(SILFunction *OrigFunc,
|
||||
SILLocation Loc);
|
||||
|
||||
/// Creates the external declaration for swift_bufferDeallocateFromStack.
|
||||
SILFunction *getBufferDeallocFunc(SILFunction *OrigFunc,
|
||||
SILLocation Loc);
|
||||
|
||||
/// Checks if the allocation \p AI can be promoted and returns the insertion
|
||||
/// point for the deallocation instruction(s) if it is possible.
|
||||
SILInstruction *canPromoteAlloc(SILInstruction *AI);
|
||||
|
||||
bool strictlyDominates(SILBasicBlock *A, SILBasicBlock *B) {
|
||||
return A != B && DT->dominates(A, B);
|
||||
}
|
||||
|
||||
bool strictlyPostDominates(SILBasicBlock *A, SILBasicBlock *B) {
|
||||
return A != B && PDT->dominates(A, B);
|
||||
}
|
||||
|
||||
SILBasicBlock *getImmediatePostDom(SILBasicBlock *BB) {
|
||||
auto *IDomNode = PDT->getNode(BB)->getIDom();
|
||||
if (!IDomNode)
|
||||
return nullptr;
|
||||
return IDomNode->getBlock();
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
StackPromoter(EscapeAnalysis::ConnectionGraph *ConGraph,
|
||||
DominanceInfo *DT, PostDominanceInfo *PDT) :
|
||||
ConGraph(ConGraph), DT(DT), PDT(PDT) { }
|
||||
|
||||
/// What did the optimization change?
|
||||
enum class ChangeState {
|
||||
None,
|
||||
Insts,
|
||||
Calls
|
||||
};
|
||||
|
||||
/// The main entry point for the optimization.
|
||||
ChangeState promote();
|
||||
};
|
||||
|
||||
/// Returns true if instruction \p I is an allocation we can handle.
|
||||
static bool isPromotableAllocInst(SILInstruction *I) {
|
||||
// Check for swift object allocation.
|
||||
if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
|
||||
if (!ARI->isObjC())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
// Check for array buffer allocation.
|
||||
auto *AI = dyn_cast<ApplyInst>(I);
|
||||
if (AI && AI->getNumArguments() == 3) {
|
||||
if (auto *FRI = dyn_cast<FunctionRefInst>(AI->getCallee())) {
|
||||
SILFunction *Callee = FRI->getReferencedFunction();
|
||||
if (Callee->getName() == "swift_bufferAllocate")
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
StackPromoter::ChangeState StackPromoter::promote() {
|
||||
// Search the whole function for stack promotable allocations.
|
||||
for (SILBasicBlock &BB : *ConGraph->getFunction()) {
|
||||
for (SILInstruction &I : BB) {
|
||||
if (isPromotableAllocInst(&I)) {
|
||||
tryPromoteAlloc(&I);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ChangedCalls)
|
||||
return ChangeState::Calls;
|
||||
if (ChangedInsts)
|
||||
return ChangeState::Insts;
|
||||
return ChangeState::None;
|
||||
}
|
||||
|
||||
void StackPromoter::tryPromoteAlloc(SILInstruction *I) {
|
||||
SILInstruction *InsertionPoint = canPromoteAlloc(I);
|
||||
if (!InsertionPoint)
|
||||
return;
|
||||
|
||||
DEBUG(llvm::dbgs() << "Promoted " << *I);
|
||||
DEBUG(llvm::dbgs() << " in " << I->getFunction()->getName() << '\n');
|
||||
NumStackPromoted++;
|
||||
|
||||
SILBuilder B(InsertionPoint);
|
||||
if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
|
||||
// It's an object allocation. We set the [stack] attribute in the alloc_ref.
|
||||
ARI->setStackAllocatable();
|
||||
|
||||
/// And create a dealloc_ref [stack] at the end of the object's lifetime.
|
||||
auto *DRI = B.createDeallocRef(I->getLoc(), I);
|
||||
DRI->setStackAllocatable();
|
||||
ChangedInsts = true;
|
||||
return;
|
||||
}
|
||||
if (auto *AI = dyn_cast<ApplyInst>(I)) {
|
||||
// It's an array buffer allocation.
|
||||
auto *OldFRI = cast<FunctionRefInst>(AI->getCallee());
|
||||
SILFunction *OldF = OldFRI->getReferencedFunction();
|
||||
SILLocation loc = (OldF->hasLocation() ? OldF->getLocation() : AI->getLoc());
|
||||
SILFunction *DeallocFun = getBufferDeallocFunc(OldF, loc);
|
||||
|
||||
// We insert a swift_bufferDeallocateFromStack at the end of the buffer's
|
||||
// lifetime.
|
||||
auto *DeallocFRI = B.createFunctionRef(OldFRI->getLoc(), DeallocFun);
|
||||
B.createApply(loc, DeallocFRI, { AI }, false);
|
||||
|
||||
// And replace the call to swift_bufferAllocate with a call to
|
||||
// swift_bufferAllocateOnStack.
|
||||
B.setInsertionPoint(AI);
|
||||
auto *AllocFRI = B.createFunctionRef(OldFRI->getLoc(),
|
||||
getBufferAllocFunc(OldF, loc));
|
||||
AI->setOperand(0, AllocFRI);
|
||||
|
||||
ChangedCalls = true;
|
||||
return;
|
||||
}
|
||||
llvm_unreachable("unhandled allocation instruction");
|
||||
}
|
||||
|
||||
SILFunction *StackPromoter::getBufferAllocFunc(SILFunction *OrigFunc,
|
||||
SILLocation Loc) {
|
||||
if (!BufferAllocFunc) {
|
||||
BufferAllocFunc = OrigFunc->getModule().getOrCreateFunction(
|
||||
Loc,
|
||||
"swift_bufferAllocateOnStack",
|
||||
OrigFunc->getLinkage(),
|
||||
OrigFunc->getLoweredFunctionType(),
|
||||
OrigFunc->isBare(), IsNotTransparent, IsNotFragile);
|
||||
}
|
||||
return BufferAllocFunc;
|
||||
}
|
||||
|
||||
SILFunction *StackPromoter::getBufferDeallocFunc(SILFunction *OrigFunc,
|
||||
SILLocation Loc) {
|
||||
if (!BufferDeallocFunc) {
|
||||
SILModule &M = OrigFunc->getModule();
|
||||
const ASTContext &Ctx = OrigFunc->getModule().getASTContext();
|
||||
CanSILFunctionType OrigTy = OrigFunc->getLoweredFunctionType();
|
||||
CanType ObjectTy = OrigTy->getResult().getType();
|
||||
|
||||
// The function type for swift_bufferDeallocateFromStack.
|
||||
CanSILFunctionType FunTy = SILFunctionType::get(
|
||||
OrigTy->getGenericSignature(),
|
||||
OrigTy->getExtInfo(),
|
||||
OrigTy->getCalleeConvention(),
|
||||
{ SILParameterInfo(ObjectTy, ParameterConvention::Direct_Guaranteed) },
|
||||
SILResultInfo(TupleType::getEmpty(Ctx), ResultConvention::Owned),
|
||||
OrigTy->getOptionalErrorResult(),
|
||||
M.getASTContext());
|
||||
|
||||
BufferDeallocFunc = M.getOrCreateFunction(
|
||||
Loc,
|
||||
"swift_bufferDeallocateFromStack",
|
||||
OrigFunc->getLinkage(),
|
||||
FunTy,
|
||||
OrigFunc->isBare(), IsNotTransparent, IsNotFragile);
|
||||
}
|
||||
return BufferDeallocFunc;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Just for debugging.
|
||||
static void dumpUsePoints(const llvm::SmallPtrSetImpl<ValueBase *> &UsePoints) {
|
||||
for (ValueBase *V : UsePoints) {
|
||||
V->dump();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
SILInstruction *StackPromoter::canPromoteAlloc(SILInstruction *AI) {
|
||||
auto *Node = ConGraph->getNodeOrNull(AI);
|
||||
if (!Node)
|
||||
return nullptr;
|
||||
|
||||
// The most important check: does the object escape the current function?
|
||||
if (Node->escapes())
|
||||
return nullptr;
|
||||
|
||||
// Now we have to determine the lifetime of the allocated object in its
|
||||
// function.
|
||||
|
||||
// Get all interesting uses of the object (e.g. release instructions). This
|
||||
// includes uses of objects where the allocation is stored to.
|
||||
llvm::SmallPtrSet<ValueBase *, 32> UsePoints;
|
||||
ConGraph->getUsePoints(UsePoints, Node);
|
||||
int NumUsePointsToFind = UsePoints.size();
|
||||
assert(NumUsePointsToFind > 0 &&
|
||||
"There must be at least one releasing instruction for an alloc");
|
||||
DEBUG(dumpUsePoints(UsePoints));
|
||||
|
||||
// In the following we check two requirements for stack promotion:
|
||||
// 1) Are all uses in the same control region as the alloc? E.g. if the
|
||||
// allocation is in a loop then there may not be any uses of the object
|
||||
// outside the loop.
|
||||
// 2) We need to find an insertion place for the deallocation so that it
|
||||
// preserves a properly nested stack allocation-deallocation structure.
|
||||
SILBasicBlock *StartBlock = AI->getParent();
|
||||
|
||||
// The block where we assume we can insert the deallocation.
|
||||
SILBasicBlock *EndBlock = StartBlock;
|
||||
|
||||
// We visit all instructions starting at the allocation instruction.
|
||||
WorkListType WorkList;
|
||||
// It's important that the EndBlock is at the head of the WorkList so that
|
||||
// we handle it after all other blocks.
|
||||
WorkList.insert(EndBlock, -1);
|
||||
WorkList.insert(StartBlock, 0);
|
||||
|
||||
for (;;) {
|
||||
SILBasicBlock *BB = WorkList.pop_back_val();
|
||||
int StackDepth = 0;
|
||||
SILBasicBlock::iterator Iter;
|
||||
if (BB == StartBlock) {
|
||||
// In the first block we start at the allocation instruction and not at
|
||||
// the begin of the block.
|
||||
Iter = AI;
|
||||
} else {
|
||||
// Track all uses in the block arguments.
|
||||
for (SILArgument *BBArg : BB->getBBArgs()) {
|
||||
if (UsePoints.count(BBArg) != 0)
|
||||
NumUsePointsToFind--;
|
||||
}
|
||||
// Make sure that the EndBlock is not inside a loop (which does not
|
||||
// contain the StartBlock).
|
||||
// E.g.:
|
||||
// %obj = alloc_ref // the allocation
|
||||
// br loop
|
||||
// loop:
|
||||
// the_only_use_of_obj(%obj)
|
||||
// cond_br ..., loop, exit
|
||||
// exit:
|
||||
// ... // this is the new EndBlock
|
||||
for (SILBasicBlock *Pred : BB->getPreds()) {
|
||||
// Extend the lifetime region until the EndBlock post dominates the
|
||||
// StartBlock.
|
||||
while (!strictlyPostDominates(EndBlock, Pred)) {
|
||||
EndBlock = getImmediatePostDom(EndBlock);
|
||||
}
|
||||
}
|
||||
Iter = BB->begin();
|
||||
StackDepth = WorkList.getStackDepth(BB);
|
||||
}
|
||||
// Visit all instructions of the current block.
|
||||
while (Iter != BB->end()) {
|
||||
SILInstruction &I = *Iter++;
|
||||
if (BB == EndBlock && StackDepth == 0 && NumUsePointsToFind == 0) {
|
||||
// We found a place to insert the stack deallocation.
|
||||
return &I;
|
||||
}
|
||||
if (I.isAllocatingStack()) {
|
||||
StackDepth++;
|
||||
} else if (I.isDeallocatingStack()) {
|
||||
if (StackDepth == 0) {
|
||||
// The allocation is inside a stack alloc-dealloc region and we are
|
||||
// now leaving this region without having found a place for the
|
||||
// deallocation. E.g.
|
||||
// E.g.:
|
||||
// %1 = alloc_stack
|
||||
// %obj = alloc_ref // the allocation
|
||||
// dealloc_stack %1
|
||||
// use_of_obj(%obj)
|
||||
return nullptr;
|
||||
}
|
||||
StackDepth--;
|
||||
}
|
||||
// Track a use.
|
||||
if (UsePoints.count(&I) != 0)
|
||||
NumUsePointsToFind--;
|
||||
}
|
||||
if (WorkList.empty()) {
|
||||
if (EndBlock == BB) {
|
||||
// We reached the EndBlock but didn't find a place for the deallocation
|
||||
// so far (because we didn't find all uses yet or we entered a another
|
||||
// stack alloc-dealloc region). Let's extend our lifetime region.
|
||||
// E.g.:
|
||||
// %obj = alloc_ref // the allocation
|
||||
// %1 = alloc_stack
|
||||
// use_of_obj(%obj) // can't insert the deallocation in this block
|
||||
// cond_br ..., bb1, bb2
|
||||
// bb1:
|
||||
// ...
|
||||
// br bb2
|
||||
// bb2:
|
||||
// dealloc_stack %1 // this is the new EndBlock
|
||||
EndBlock = getImmediatePostDom(EndBlock);
|
||||
if (!EndBlock)
|
||||
return nullptr;
|
||||
}
|
||||
// Again, it's important that the EndBlock is the first in the WorkList.
|
||||
WorkList.insert(EndBlock, -1);
|
||||
}
|
||||
// Push the successor blocks to the WorkList.
|
||||
for (SILBasicBlock *Succ : BB->getSuccessors()) {
|
||||
if (!strictlyDominates(StartBlock, Succ)) {
|
||||
// The StartBlock is inside a loop but we couldn't find a deallocation
|
||||
// place in this loop, e.g. because there are uses outside the loop.
|
||||
// E.g.:
|
||||
// %container = alloc_ref
|
||||
// br loop
|
||||
// loop:
|
||||
// %obj = alloc_ref // the allocation
|
||||
// store %obj to %some_field_in_container
|
||||
// cond_br ..., loop, exit
|
||||
// exit:
|
||||
// use(%container)
|
||||
return nullptr;
|
||||
}
|
||||
WorkList.insert(Succ, StackDepth);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Top Level Driver
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
namespace {
|
||||
|
||||
class StackPromotion : public SILFunctionTransform {
|
||||
|
||||
public:
|
||||
StackPromotion() {}
|
||||
|
||||
private:
|
||||
/// The entry point to the transformation.
|
||||
void run() override {
|
||||
DEBUG(llvm::dbgs() << "** StackPromotion **\n");
|
||||
|
||||
auto *EA = PM->getAnalysis<EscapeAnalysis>();
|
||||
auto *DA = PM->getAnalysis<DominanceAnalysis>();
|
||||
auto *PDA = PM->getAnalysis<PostDominanceAnalysis>();
|
||||
|
||||
SILFunction *F = getFunction();
|
||||
if (auto *ConGraph = EA->getConnectionGraph(F)) {
|
||||
StackPromoter promoter(ConGraph, DA->get(F), PDA->get(F));
|
||||
switch (promoter.promote()) {
|
||||
case StackPromoter::ChangeState::None:
|
||||
break;
|
||||
case StackPromoter::ChangeState::Insts:
|
||||
invalidateAnalysis(SILAnalysis::PreserveKind::ProgramFlow);
|
||||
break;
|
||||
case StackPromoter::ChangeState::Calls:
|
||||
invalidateAnalysis(SILAnalysis::PreserveKind::Branches);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StringRef getName() override { return "StackPromotion"; }
|
||||
};
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
SILTransform *swift::createStackPromotion() {
|
||||
return new StackPromotion();
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
; RUN: %swift-llvm-opt -swift-stack-promotion -stack-promotion-limit=100 %s | FileCheck %s
|
||||
|
||||
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
||||
target triple = "x86_64-apple-macosx10.9"
|
||||
|
||||
%swift.type = type { i64 }
|
||||
%objc_object = type opaque
|
||||
|
||||
; CHECK-LABEL: define void @promote_buffer()
|
||||
; CHECK: [[B:%.+]] = alloca i8, i32 48, align 8
|
||||
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
|
||||
; CHECK: [[BC:%.+]] = bitcast i8* [[B]] to %objc_object*
|
||||
; CHECK: [[I:%.+]] = call %objc_object* @swift_initStackObject(%swift.type* [[M]], %objc_object* [[BC]])
|
||||
; CHECK: [[BC2:%.+]] = bitcast %objc_object* [[I]] to i8*
|
||||
; CHECK: call void @llvm.lifetime.end(i64 -1, i8* [[BC2]])
|
||||
; CHECK: ret void
|
||||
define void @promote_buffer() {
|
||||
entry:
|
||||
%0 = call %swift.type* @get_buffer_metadata()
|
||||
%1 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %0, i64 48, i64 7)
|
||||
call void @swift_bufferDeallocateFromStack(%objc_object* %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define void @dont_promote_buffer_exceeding_limit()
|
||||
; CHECK: [[M:%.+]] = call %swift.type* @get_buffer_metadata()
|
||||
; CHECK: call %objc_object* @swift_bufferAllocate(%swift.type* [[M]], i64 48, i64 7)
|
||||
; CHECK-NEXT: ret void
|
||||
define void @dont_promote_buffer_exceeding_limit() {
|
||||
entry:
|
||||
%0 = alloca i8, i32 128, align 8
|
||||
%1 = call %swift.type* @get_buffer_metadata()
|
||||
%2 = call %objc_object* @swift_bufferAllocateOnStack(%swift.type* %1, i64 48, i64 7)
|
||||
call void @swift_bufferDeallocateFromStack(%objc_object* %2)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare %swift.type* @get_buffer_metadata()
|
||||
declare %objc_object* @swift_bufferAllocateOnStack(%swift.type*, i64, i64)
|
||||
declare void @swift_bufferDeallocateFromStack(%objc_object*)
|
||||
@@ -5,20 +5,18 @@
|
||||
// check works by making sure we can blow through a long class hierarchy and
|
||||
// expose the various "unknown" functions.
|
||||
//
|
||||
// As a side-test it also checks if all allocs can be promoted to the stack.
|
||||
//
|
||||
// *NOTE* If something like templated protocols is ever implemented this file
|
||||
// needs to be updated.
|
||||
|
||||
// CHECK-LABEL: sil @_TF38devirt_specialized_inherited_interplay6driverFT_T_ : $@convention(thin) () -> () {
|
||||
// CHECK: bb0:
|
||||
// CHECK: [[A3:%[0-9]+]] = alloc_ref [stack] $A3<S>
|
||||
// CHECK: [[A4:%[0-9]+]] = alloc_ref [stack] $A4<S>
|
||||
// CHECK: [[A5:%[0-9]+]] = alloc_ref [stack] $A5<S>
|
||||
// CHECK: [[B1:%[0-9]+]] = alloc_ref [stack] $B1<S>
|
||||
// CHECK: [[B2:%[0-9]+]] = alloc_ref [stack] $B2<S>
|
||||
// CHECK: [[B3:%[0-9]+]] = alloc_ref [stack] $B3<S>
|
||||
// CHECK: [[B4:%[0-9]+]] = alloc_ref [stack] $B4<S>
|
||||
// CHECK: [[A3:%[0-9]+]] = alloc_ref $A3<S>
|
||||
// CHECK: [[A4:%[0-9]+]] = alloc_ref $A4<S>
|
||||
// CHECK: [[A5:%[0-9]+]] = alloc_ref $A5<S>
|
||||
// CHECK: [[B1:%[0-9]+]] = alloc_ref $B1<S>
|
||||
// CHECK: [[B2:%[0-9]+]] = alloc_ref $B2<S>
|
||||
// CHECK: [[B3:%[0-9]+]] = alloc_ref $B3<S>
|
||||
// CHECK: [[B4:%[0-9]+]] = alloc_ref $B4<S>
|
||||
// CHECK: [[F0:%[0-9]+]] = function_ref @unknown0 : $@convention(thin) () -> ()
|
||||
// CHECK: apply [[F0]]
|
||||
// CHECK: apply [[F0]]
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
// RUN: %target-sil-opt -update-escapes -stack-promotion -enable-sil-verify-all %s | FileCheck %s
|
||||
|
||||
sil_stage canonical
|
||||
|
||||
import Builtin
|
||||
import Swift
|
||||
import SwiftShims
|
||||
|
||||
class XX {
|
||||
@sil_stored var x: Int32
|
||||
|
||||
init()
|
||||
}
|
||||
|
||||
class YY {
|
||||
@sil_stored var xx: XX
|
||||
|
||||
init(newx: XX)
|
||||
}
|
||||
|
||||
struct DummyArrayStorage<Element> {
|
||||
}
|
||||
|
||||
sil @xx_init : $@convention(thin) (@guaranteed XX) -> XX {
|
||||
bb0(%0 : $XX):
|
||||
%1 = integer_literal $Builtin.Int32, 0
|
||||
%2 = struct $Int32 (%1 : $Builtin.Int32)
|
||||
%3 = ref_element_addr %0 : $XX, #XX.x
|
||||
store %2 to %3 : $*Int32
|
||||
return %0 : $XX
|
||||
}
|
||||
|
||||
sil @take_y : $@convention(thin) (@owned YY) -> () {
|
||||
bb0(%0 : $YY):
|
||||
// Currently escape analysis cannot see that this release does not capture
|
||||
// anything. For the test this strong_release is not relevant anyway.
|
||||
// strong_release %0 : $YY
|
||||
%t = tuple ()
|
||||
return %t : $()
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: sil @simple_promote
|
||||
// CHECK: [[O:%[0-9]+]] = alloc_ref [stack] $XX
|
||||
// CHECK: strong_release
|
||||
// CHECK: dealloc_ref [stack] [[O]] : $XX
|
||||
// CHECK: return
|
||||
sil @simple_promote : $@convention(thin) () -> Int32 {
|
||||
bb0:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%l1 = ref_element_addr %n1 : $XX, #XX.x
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %n1 : $XX
|
||||
return %l2 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @dont_promote_escaping
|
||||
// CHECK: alloc_ref $XX
|
||||
// CHECK-NOT: dealloc_ref
|
||||
// CHECK: return
|
||||
sil @dont_promote_escaping : $@convention(thin) () -> XX {
|
||||
bb0:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
return %n1 : $XX
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @promote_nested
|
||||
// CHECK: [[X:%[0-9]+]] = alloc_ref [stack] $XX
|
||||
// CHECK: [[Y:%[0-9]+]] = alloc_ref [stack] $YY
|
||||
// CHECK: apply
|
||||
// CHECK: dealloc_ref [stack] [[Y]] : $YY
|
||||
// CHECK: dealloc_ref [stack] [[X]] : $XX
|
||||
// CHECK: return
|
||||
sil @promote_nested : $@convention(thin) () -> () {
|
||||
bb0:
|
||||
%x = alloc_ref $XX
|
||||
%y = alloc_ref $YY
|
||||
%rea = ref_element_addr %y : $YY, #YY.xx
|
||||
store %x to %rea : $*XX
|
||||
|
||||
%f1 = function_ref @take_y : $@convention(thin) (@owned YY) -> ()
|
||||
%a = apply %f1(%y) : $@convention(thin) (@owned YY) -> ()
|
||||
%t = tuple ()
|
||||
return %t : $()
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @promote_in_loop_with_if
|
||||
// CHECK: [[O:%[0-9]+]] = alloc_ref [stack] $XX
|
||||
// CHECK: {{^}}bb4({{.*}}):
|
||||
// CHECK-NEXT: dealloc_ref [stack] [[O]] : $XX
|
||||
// CHECK: return
|
||||
sil @promote_in_loop_with_if : $@convention(thin) () -> Int32 {
|
||||
bb0:
|
||||
br bb1
|
||||
|
||||
bb1:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
cond_br undef, bb2, bb3
|
||||
|
||||
bb2:
|
||||
%l1 = ref_element_addr %n1 : $XX, #XX.x
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %n1 : $XX
|
||||
br bb4(%l2 : $Int32)
|
||||
|
||||
bb3:
|
||||
%i1 = integer_literal $Builtin.Int32, 0
|
||||
%i2 = struct $Int32 (%i1 : $Builtin.Int32)
|
||||
br bb4(%i2 : $Int32)
|
||||
|
||||
bb4(%a1 : $Int32):
|
||||
cond_br undef, bb1, bb5
|
||||
|
||||
bb5:
|
||||
return %a1 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @dont_promote_use_outside_loop
|
||||
// CHECK: alloc_ref $XX
|
||||
// CHECK-NOT: dealloc_ref
|
||||
// CHECK: return
|
||||
sil @dont_promote_use_outside_loop : $@convention(thin) () -> Int32 {
|
||||
bb0:
|
||||
br bb1
|
||||
|
||||
bb1:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
cond_br undef, bb1, bb2
|
||||
|
||||
bb2:
|
||||
%l1 = ref_element_addr %n1 : $XX, #XX.x
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %n1 : $XX
|
||||
return %l2 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @dont_promote_use_of_container_outside_loop
|
||||
// CHECK: bb0:
|
||||
// CHECK: [[Y:%[0-9]+]] = alloc_ref [stack] $YY
|
||||
// CHECK: bb1:
|
||||
// CHECK: alloc_ref $XX
|
||||
// CHECK-NOT: dealloc_ref
|
||||
// CHECK: bb2:
|
||||
// CHECK: apply
|
||||
// CHECK: dealloc_ref [stack] [[Y]] : $YY
|
||||
// CHECK: return
|
||||
sil @dont_promote_use_of_container_outside_loop : $@convention(thin) () -> () {
|
||||
bb0:
|
||||
%y = alloc_ref $YY
|
||||
br bb1
|
||||
|
||||
bb1:
|
||||
%x = alloc_ref $XX
|
||||
%rea = ref_element_addr %y : $YY, #YY.xx
|
||||
store %x to %rea : $*XX
|
||||
cond_br undef, bb1, bb2
|
||||
|
||||
bb2:
|
||||
%f1 = function_ref @take_y : $@convention(thin) (@owned YY) -> ()
|
||||
%a = apply %f1(%y) : $@convention(thin) (@owned YY) -> ()
|
||||
%t = tuple ()
|
||||
return %t : $()
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @dont_promote_use_before_alloc
|
||||
// CHECK: alloc_ref $XX
|
||||
// CHECK-NOT: dealloc_ref
|
||||
// CHECK: return
|
||||
sil @dont_promote_use_before_alloc : $@convention(thin) (@guaranteed XX) -> Int32 {
|
||||
bb0(%0 : $XX):
|
||||
br bb1(%0 : $XX)
|
||||
|
||||
bb1(%a1 : $XX):
|
||||
%l1 = ref_element_addr %a1 : $XX, #XX.x
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %a1 : $XX
|
||||
%o1 = alloc_ref $XX
|
||||
cond_br undef, bb1(%o1 : $XX), bb2
|
||||
|
||||
bb2:
|
||||
return %l2 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @promote_with_use_in_loop
|
||||
// CHECK: [[O:%[0-9]+]] = alloc_ref [stack] $XX
|
||||
// CHECK: {{^}}bb2:
|
||||
// CHECK-NEXT: dealloc_ref [stack] [[O]] : $XX
|
||||
// CHECK-NEXT: return
|
||||
sil @promote_with_use_in_loop : $@convention(thin) () -> Int32 {
|
||||
bb0:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
br bb1
|
||||
|
||||
bb1:
|
||||
%l1 = ref_element_addr %n1 : $XX, #XX.x
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %n1 : $XX
|
||||
cond_br undef, bb1, bb2
|
||||
|
||||
bb2:
|
||||
return %l2 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @promote_with_other_stack_allocs
|
||||
// CHECK: [[O:%[0-9]+]] = alloc_ref [stack] $XX
|
||||
// CHECK: {{^}}bb5:
|
||||
// CHECK-NEXT: dealloc_stack
|
||||
// CHECK-NEXT: dealloc_ref [stack] [[O]] : $XX
|
||||
// CHECK-NEXT: return
|
||||
sil @promote_with_other_stack_allocs : $@convention(thin) () -> Int32 {
|
||||
bb0:
|
||||
%o1 = alloc_ref $XX
|
||||
%f1 = function_ref @xx_init : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%n1 = apply %f1(%o1) : $@convention(thin) (@guaranteed XX) -> XX
|
||||
%l1 = ref_element_addr %n1 : $XX, #XX.x
|
||||
%s1 = alloc_stack $Int32
|
||||
%l2 = load %l1 : $*Int32
|
||||
strong_release %n1 : $XX
|
||||
br bb1
|
||||
|
||||
bb1:
|
||||
cond_br undef, bb2, bb3
|
||||
|
||||
bb2:
|
||||
br bb4(%l2 : $Int32)
|
||||
|
||||
bb3:
|
||||
%i1 = integer_literal $Builtin.Int32, 0
|
||||
%i2 = struct $Int32 (%i1 : $Builtin.Int32)
|
||||
br bb4(%i2 : $Int32)
|
||||
|
||||
bb4(%a1 : $Int32):
|
||||
cond_br undef, bb1, bb5
|
||||
|
||||
bb5:
|
||||
dealloc_stack %s1#0 : $*@local_storage Int32
|
||||
return %a1 : $Int32
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @promote_array
|
||||
// CHECK: [[AF:%[0-9]+]] = function_ref @swift_bufferAllocateOnStack : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
// CHECK: [[B:%[0-9]+]] = apply [[AF]](
|
||||
// CHECK: [[IF:%[0-9]+]] = function_ref @init_array_with_buffer
|
||||
// CHECK: [[A:%[0-9]+]] = apply [[IF]]([[B]],
|
||||
// CHECK: tuple_extract [[A]]
|
||||
// CHECK: tuple_extract [[A]]
|
||||
// CHECK: [[DF:%[0-9]+]] = function_ref @swift_bufferDeallocateFromStack : $@convention(thin) (@guaranteed AnyObject) -> @owned ()
|
||||
// CHECK: apply [[DF]]([[B]])
|
||||
// CHECK: return
|
||||
sil @promote_array : $@convention(thin) (Int, Int, Int, Int) -> () {
|
||||
bb0(%0 : $Int, %1 : $Int, %2 : $Int, %3 : $Int):
|
||||
%4 = function_ref @swift_bufferAllocate : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
%5 = metatype $@thick DummyArrayStorage<Int>.Type
|
||||
%6 = init_existential_metatype %5 : $@thick DummyArrayStorage<Int>.Type, $@thick AnyObject.Type
|
||||
|
||||
// allocate the buffer
|
||||
%7 = apply %4(%6, %1, %2) : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
%8 = metatype $@thin Array<Int>.Type
|
||||
%9 = function_ref @init_array_with_buffer : $@convention(thin) (@owned AnyObject, Int, @thin Array<Int>.Type) -> @owned (Array<Int>, UnsafeMutablePointer<Int>)
|
||||
|
||||
// initialize the buffer
|
||||
%10 = apply %9(%7, %3, %8) : $@convention(thin) (@owned AnyObject, Int, @thin Array<Int>.Type) -> @owned (Array<Int>, UnsafeMutablePointer<Int>)
|
||||
%11 = tuple_extract %10 : $(Array<Int>, UnsafeMutablePointer<Int>), 0
|
||||
%12 = tuple_extract %10 : $(Array<Int>, UnsafeMutablePointer<Int>), 1
|
||||
%13 = struct_extract %12 : $UnsafeMutablePointer<Int>, #UnsafeMutablePointer._rawValue
|
||||
%14 = pointer_to_address %13 : $Builtin.RawPointer to $*Int
|
||||
|
||||
// store the 2 elements
|
||||
store %0 to %14 : $*Int
|
||||
%16 = integer_literal $Builtin.Word, 1
|
||||
%17 = index_addr %14 : $*Int, %16 : $Builtin.Word
|
||||
store %0 to %17 : $*Int
|
||||
|
||||
// pass the array to a function
|
||||
%19 = function_ref @take_array : $@convention(thin) (@owned Array<Int>) -> ()
|
||||
%20 = apply %19(%11) : $@convention(thin) (@owned Array<Int>) -> ()
|
||||
%21 = tuple ()
|
||||
return %21 : $()
|
||||
}
|
||||
|
||||
// CHECK-LABEL: sil @dont_promote_escaping_array
|
||||
// CHECK: [[AF:%[0-9]+]] = function_ref @swift_bufferAllocate : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
// CHECK: apply [[AF]](
|
||||
// CHECK-NOT: swift_bufferDeallocateFromStack
|
||||
// CHECK: return
|
||||
sil @dont_promote_escaping_array : $@convention(thin) (Int, Int, Int, Int) -> @owned Array<Int> {
|
||||
bb0(%0 : $Int, %1 : $Int, %2 : $Int, %3 : $Int):
|
||||
%4 = function_ref @swift_bufferAllocate : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
%5 = metatype $@thick DummyArrayStorage<Int>.Type
|
||||
%6 = init_existential_metatype %5 : $@thick DummyArrayStorage<Int>.Type, $@thick AnyObject.Type
|
||||
|
||||
// allocate the buffer
|
||||
%7 = apply %4(%6, %1, %2) : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
%8 = metatype $@thin Array<Int>.Type
|
||||
%9 = function_ref @init_array_with_buffer : $@convention(thin) (@owned AnyObject, Int, @thin Array<Int>.Type) -> @owned (Array<Int>, UnsafeMutablePointer<Int>)
|
||||
|
||||
// initialize the buffer
|
||||
%10 = apply %9(%7, %3, %8) : $@convention(thin) (@owned AnyObject, Int, @thin Array<Int>.Type) -> @owned (Array<Int>, UnsafeMutablePointer<Int>)
|
||||
%11 = tuple_extract %10 : $(Array<Int>, UnsafeMutablePointer<Int>), 0
|
||||
%12 = tuple_extract %10 : $(Array<Int>, UnsafeMutablePointer<Int>), 1
|
||||
%13 = struct_extract %12 : $UnsafeMutablePointer<Int>, #UnsafeMutablePointer._rawValue
|
||||
%14 = pointer_to_address %13 : $Builtin.RawPointer to $*Int
|
||||
|
||||
// store the 2 elements
|
||||
store %0 to %14 : $*Int
|
||||
%16 = integer_literal $Builtin.Word, 1
|
||||
%17 = index_addr %14 : $*Int, %16 : $Builtin.Word
|
||||
store %0 to %17 : $*Int
|
||||
|
||||
// return the array
|
||||
return %11 : $Array<Int>
|
||||
}
|
||||
|
||||
sil [_semantics "array.uninitialized"] @init_array_with_buffer : $@convention(thin) (@owned AnyObject, Int, @thin Array<Int>.Type) -> @owned (Array<Int>, UnsafeMutablePointer<Int>)
|
||||
|
||||
sil @swift_bufferAllocate : $@convention(thin) (@thick AnyObject.Type, Int, Int) -> @owned AnyObject
|
||||
|
||||
sil @take_array : $@convention(thin) (@owned Array<Int>) -> () {
|
||||
bb0(%0 : $Array<Int>):
|
||||
release_value %0 : $Array<Int>
|
||||
%2 = tuple ()
|
||||
return %2 : $()
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -234,7 +234,6 @@ int main(int argc, char **argv) {
|
||||
initializeSwiftRCIdentityPass(Registry);
|
||||
initializeSwiftARCOptPass(Registry);
|
||||
initializeSwiftARCContractPass(Registry);
|
||||
initializeSwiftStackPromotionPass(Registry);
|
||||
|
||||
llvm::cl::ParseCommandLineOptions(argc, argv, "Swift LLVM optimizer\n");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user