mirror of
https://github.com/apple/swift.git
synced 2025-12-14 20:36:38 +01:00
This peephole optimization didn't consider that an alloc_stack of an enum can be overridden by another value. The fix is to remove this peephole optimization at all because it is already covered by `optimizeEnum` in alloc_stack simplification. Fixes a miscompile https://github.com/swiftlang/swift/issues/85687 rdar://165374568
2572 lines
92 KiB
C++
2572 lines
92 KiB
C++
//===--- InstOptUtils.cpp - SILOptimizer instruction utilities ------------===//
|
|
//
|
|
// This source file is part of the Swift.org open source project
|
|
//
|
|
// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
|
|
// Licensed under Apache License v2.0 with Runtime Library Exception
|
|
//
|
|
// See https://swift.org/LICENSE.txt for license information
|
|
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "swift/SILOptimizer/Utils/InstOptUtils.h"
|
|
#include "swift/AST/CanTypeVisitor.h"
|
|
#include "swift/AST/GenericSignature.h"
|
|
#include "swift/AST/SemanticAttrs.h"
|
|
#include "swift/AST/SubstitutionMap.h"
|
|
#include "swift/Basic/Assertions.h"
|
|
#include "swift/Basic/SmallPtrSetVector.h"
|
|
#include "swift/SIL/ApplySite.h"
|
|
#include "swift/SIL/BasicBlockUtils.h"
|
|
#include "swift/SIL/DebugUtils.h"
|
|
#include "swift/SIL/DynamicCasts.h"
|
|
#include "swift/SIL/InstructionUtils.h"
|
|
#include "swift/SIL/SILArgument.h"
|
|
#include "swift/SIL/SILBridging.h"
|
|
#include "swift/SIL/SILBuilder.h"
|
|
#include "swift/SIL/SILDebugInfoExpression.h"
|
|
#include "swift/SIL/SILModule.h"
|
|
#include "swift/SIL/SILUndef.h"
|
|
#include "swift/SIL/ScopedAddressUtils.h"
|
|
#include "swift/SIL/TypeLowering.h"
|
|
#include "swift/SILOptimizer/Analysis/ARCAnalysis.h"
|
|
#include "swift/SILOptimizer/Analysis/Analysis.h"
|
|
#include "swift/SILOptimizer/Analysis/ArraySemantic.h"
|
|
#include "swift/SILOptimizer/Analysis/BasicCalleeAnalysis.h"
|
|
#include "swift/SILOptimizer/Analysis/DominanceAnalysis.h"
|
|
#include "swift/SILOptimizer/Analysis/DestructorAnalysis.h"
|
|
#include "swift/SILOptimizer/OptimizerBridging.h"
|
|
#include "swift/SILOptimizer/Utils/CFGOptUtils.h"
|
|
#include "swift/SILOptimizer/Utils/DebugOptUtils.h"
|
|
#include "swift/SILOptimizer/Utils/OwnershipOptUtils.h"
|
|
#include "swift/SILOptimizer/Utils/ValueLifetime.h"
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
#include "llvm/ADT/StringSwitch.h"
|
|
#include "llvm/IR/Intrinsics.h"
|
|
#include "llvm/Support/CommandLine.h"
|
|
#include "llvm/Support/Compiler.h"
|
|
#include <optional>
|
|
|
|
using namespace swift;
|
|
|
|
static llvm::cl::opt<bool> KeepWillThrowCall(
|
|
"keep-will-throw-call", llvm::cl::init(false),
|
|
llvm::cl::desc(
|
|
"Keep calls to swift_willThrow, even if the throw is optimized away"));
|
|
|
|
std::optional<SILBasicBlock::iterator>
|
|
swift::getInsertAfterPoint(SILValue val) {
|
|
if (auto *inst = val->getDefiningInstruction()) {
|
|
return std::next(inst->getIterator());
|
|
}
|
|
if (isa<SILArgument>(val)) {
|
|
return cast<SILArgument>(val)->getParentBlock()->begin();
|
|
}
|
|
return std::nullopt;
|
|
}
|
|
|
|
/// Creates an increment on \p Ptr before insertion point \p InsertPt that
|
|
/// creates a strong_retain if \p Ptr has reference semantics itself or a
|
|
/// retain_value if \p Ptr is a non-trivial value without reference-semantics.
|
|
NullablePtr<SILInstruction>
|
|
swift::createIncrementBefore(SILValue ptr, SILInstruction *insertPt) {
|
|
// Set up the builder we use to insert at our insertion point.
|
|
SILBuilder builder(insertPt);
|
|
auto loc = RegularLocation::getAutoGeneratedLocation();
|
|
|
|
// If we have a trivial type, just bail, there is no work to do.
|
|
if (ptr->getType().isTrivial(builder.getFunction()))
|
|
return nullptr;
|
|
|
|
// If Ptr is refcounted itself, create the strong_retain and
|
|
// return.
|
|
if (ptr->getType().isReferenceCounted(builder.getModule())) {
|
|
#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
|
|
if (ptr->getType().is<Name##StorageType>()) \
|
|
return builder.create##Name##Retain(loc, ptr, \
|
|
builder.getDefaultAtomicity());
|
|
#include "swift/AST/ReferenceStorage.def"
|
|
|
|
return builder.createStrongRetain(loc, ptr,
|
|
builder.getDefaultAtomicity());
|
|
}
|
|
|
|
// Otherwise, create the retain_value.
|
|
return builder.createRetainValue(loc, ptr, builder.getDefaultAtomicity());
|
|
}
|
|
|
|
/// Creates a decrement on \p ptr before insertion point \p InsertPt that
|
|
/// creates a strong_release if \p ptr has reference semantics itself or
|
|
/// a release_value if \p ptr is a non-trivial value without
|
|
/// reference-semantics.
|
|
NullablePtr<SILInstruction>
|
|
swift::createDecrementBefore(SILValue ptr, SILInstruction *insertPt) {
|
|
// Setup the builder we will use to insert at our insertion point.
|
|
SILBuilder builder(insertPt);
|
|
auto loc = RegularLocation::getAutoGeneratedLocation();
|
|
|
|
if (ptr->getType().isTrivial(builder.getFunction()))
|
|
return nullptr;
|
|
|
|
// If ptr has reference semantics itself, create a strong_release.
|
|
if (ptr->getType().isReferenceCounted(builder.getModule())) {
|
|
#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
|
|
if (ptr->getType().is<Name##StorageType>()) \
|
|
return builder.create##Name##Release(loc, ptr, \
|
|
builder.getDefaultAtomicity());
|
|
#include "swift/AST/ReferenceStorage.def"
|
|
|
|
return builder.createStrongRelease(loc, ptr,
|
|
builder.getDefaultAtomicity());
|
|
}
|
|
|
|
// Otherwise create a release value.
|
|
return builder.createReleaseValue(loc, ptr, builder.getDefaultAtomicity());
|
|
}
|
|
|
|
/// Returns true if OSSA scope ending instructions end_borrow/destroy_value can
|
|
/// be deleted trivially
|
|
bool swift::canTriviallyDeleteOSSAEndScopeInst(SILInstruction *i) {
|
|
if (!isa<EndBorrowInst>(i) && !isa<DestroyValueInst>(i))
|
|
return false;
|
|
if (isa<StoreBorrowInst>(i->getOperand(0)))
|
|
return false;
|
|
|
|
auto opValue = i->getOperand(0);
|
|
// We can delete destroy_value with operands of none ownership unless
|
|
// they are move-only values, which can have custom deinit
|
|
return opValue->getOwnershipKind() == OwnershipKind::None &&
|
|
!opValue->getType().isMoveOnly();
|
|
}
|
|
|
|
/// Perform a fast local check to see if the instruction is dead.
|
|
///
|
|
/// This routine only examines the state of the instruction at hand.
|
|
bool swift::isInstructionTriviallyDead(SILInstruction *inst) {
|
|
// At Onone, consider all uses, including the debug_info.
|
|
// This way, debug_info is preserved at Onone.
|
|
if (inst->hasUsesOfAnyResult()
|
|
&& inst->getFunction()->getEffectiveOptimizationMode()
|
|
<= OptimizationMode::NoOptimization)
|
|
return false;
|
|
|
|
if (!onlyHaveDebugUsesOfAllResults(inst) || isa<TermInst>(inst))
|
|
return false;
|
|
|
|
if (auto *bi = dyn_cast<BuiltinInst>(inst)) {
|
|
// Although the onFastPath builtin has no side-effects we don't want to
|
|
// remove it.
|
|
if (bi->getBuiltinInfo().ID == BuiltinValueKind::OnFastPath)
|
|
return false;
|
|
return !bi->mayHaveSideEffects();
|
|
}
|
|
|
|
// condfail instructions that obviously can't fail are dead.
|
|
if (auto *cfi = dyn_cast<CondFailInst>(inst))
|
|
if (auto *ili = dyn_cast<IntegerLiteralInst>(cfi->getOperand()))
|
|
if (!ili->getValue())
|
|
return true;
|
|
|
|
// mark_uninitialized is never dead.
|
|
if (isa<MarkUninitializedInst>(inst))
|
|
return false;
|
|
|
|
if (isa<DebugValueInst>(inst))
|
|
return false;
|
|
|
|
// A dead borrowed-from can only be removed if the argument (= operand) is also removed.
|
|
if (isa<BorrowedFromInst>(inst))
|
|
return false;
|
|
|
|
// A dead `destructure_struct` with an owned argument can appear for a non-copyable or
|
|
// non-escapable struct which has only trivial elements. The instruction is not trivially
|
|
// dead because it ends the lifetime of its operand.
|
|
if (isa<DestructureStructInst>(inst) &&
|
|
inst->getOperand(0)->getOwnershipKind() == OwnershipKind::Owned) {
|
|
return false;
|
|
}
|
|
|
|
// These invalidate enums so "write" memory, but that is not an essential
|
|
// operation so we can remove these if they are trivially dead.
|
|
if (isa<UncheckedTakeEnumDataAddrInst>(inst))
|
|
return true;
|
|
|
|
// An ossa end scope instruction is trivially dead if its operand has
|
|
// OwnershipKind::None. This can occur after CFG simplification in the
|
|
// presence of non-payloaded or trivial payload cases of non-trivial enums.
|
|
//
|
|
// Examples of ossa end_scope instructions: end_borrow, destroy_value.
|
|
if (inst->getFunction()->hasOwnership() &&
|
|
canTriviallyDeleteOSSAEndScopeInst(inst))
|
|
return true;
|
|
|
|
if (!inst->mayHaveSideEffects())
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/// Return true if this is a release instruction and the released value
|
|
/// is a part of a guaranteed parameter.
|
|
bool swift::isIntermediateRelease(SILInstruction *inst,
|
|
EpilogueARCFunctionInfo *eafi) {
|
|
// Check whether this is a release instruction.
|
|
if (!isa<StrongReleaseInst>(inst) && !isa<ReleaseValueInst>(inst))
|
|
return false;
|
|
|
|
// OK. we have a release instruction.
|
|
// Check whether this is a release on part of a guaranteed function argument.
|
|
SILValue Op = stripValueProjections(inst->getOperand(0));
|
|
auto *arg = dyn_cast<SILFunctionArgument>(Op);
|
|
if (!arg)
|
|
return false;
|
|
|
|
// This is a release on a guaranteed parameter. Its not the final release.
|
|
if (arg->hasConvention(SILArgumentConvention::Direct_Guaranteed))
|
|
return true;
|
|
|
|
// This is a release on an owned parameter and its not the epilogue release.
|
|
// Its not the final release.
|
|
auto rel = eafi->computeEpilogueARCInstructions(
|
|
EpilogueARCContext::EpilogueARCKind::Release, arg);
|
|
if (rel.size() && !rel.count(inst))
|
|
return true;
|
|
|
|
// Failed to prove anything.
|
|
return false;
|
|
}
|
|
|
|
bool swift::hasOnlyEndOfScopeOrEndOfLifetimeUses(SILInstruction *inst) {
|
|
for (SILValue result : inst->getResults()) {
|
|
for (Operand *use : result->getUses()) {
|
|
SILInstruction *user = use->getUser();
|
|
bool isDebugUser = user->isDebugInstruction();
|
|
if (!isa<DestroyValueInst>(user) && !isa<EndLifetimeInst>(user)
|
|
&& !isa<DeallocStackInst>(user) && !isEndOfScopeMarker(user)
|
|
&& !isDebugUser) {
|
|
return false;
|
|
}
|
|
// Include debug uses only in Onone mode.
|
|
if (isDebugUser && inst->getFunction()->getEffectiveOptimizationMode() <=
|
|
OptimizationMode::NoOptimization)
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
unsigned swift::getNumInOutArguments(FullApplySite applySite) {
|
|
assert(applySite);
|
|
auto substConv = applySite.getSubstCalleeConv();
|
|
unsigned numIndirectResults = substConv.getNumIndirectSILResults();
|
|
unsigned numInOutArguments = 0;
|
|
for (unsigned argIndex = 0; argIndex < applySite.getNumArguments();
|
|
argIndex++) {
|
|
// Skip indirect results.
|
|
if (argIndex < numIndirectResults) {
|
|
continue;
|
|
}
|
|
auto paramNumber = argIndex - numIndirectResults;
|
|
auto ParamConvention =
|
|
substConv.getParameters()[paramNumber].getConvention();
|
|
switch (ParamConvention) {
|
|
case ParameterConvention::Indirect_Inout:
|
|
case ParameterConvention::Indirect_InoutAliasable: {
|
|
++numInOutArguments;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
return numInOutArguments;
|
|
}
|
|
|
|
/// If the given instruction is dead, delete it along with its dead
|
|
/// operands.
|
|
///
|
|
/// \param inst The instruction to be deleted.
|
|
/// \param force If force is set, don't check if the top level instruction is
|
|
/// considered dead - delete it regardless.
|
|
void swift::recursivelyDeleteTriviallyDeadInstructions(
|
|
SILInstruction *inst, bool force, InstModCallbacks callbacks) {
|
|
ArrayRef<SILInstruction *> ai = ArrayRef<SILInstruction *>(inst);
|
|
recursivelyDeleteTriviallyDeadInstructions(ai, force, callbacks);
|
|
}
|
|
|
|
void swift::collectUsesOfValue(SILValue v,
|
|
llvm::SmallPtrSetImpl<SILInstruction *> &insts) {
|
|
for (auto ui = v->use_begin(), E = v->use_end(); ui != E; ++ui) {
|
|
auto *user = ui->getUser();
|
|
// Instruction has been processed.
|
|
if (!insts.insert(user).second)
|
|
continue;
|
|
|
|
// Collect the users of this instruction.
|
|
for (auto result : user->getResults())
|
|
collectUsesOfValue(result, insts);
|
|
}
|
|
}
|
|
|
|
void swift::eraseUsesOfValue(SILValue v) {
|
|
llvm::SmallPtrSet<SILInstruction *, 4> insts;
|
|
// Collect the uses.
|
|
collectUsesOfValue(v, insts);
|
|
// Erase the uses, we can have instructions that become dead because
|
|
// of the removal of these instructions, leave to DCE to cleanup.
|
|
// Its not safe to do recursively delete here as some of the SILInstruction
|
|
// maybe tracked by this set.
|
|
for (auto inst : insts) {
|
|
inst->replaceAllUsesOfAllResultsWithUndef();
|
|
inst->eraseFromParent();
|
|
}
|
|
}
|
|
|
|
bool swift::hasValueDeinit(SILType type) {
|
|
// Do not look inside an aggregate type that has a user-deinit, for which
|
|
// memberwise-destruction is not equivalent to aggregate destruction.
|
|
if (auto *nominal = type.getNominalOrBoundGenericNominal()) {
|
|
return nominal->getValueTypeDestructor() != nullptr;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
SILValue swift::
|
|
getConcreteValueOfExistentialBox(AllocExistentialBoxInst *existentialBox,
|
|
SILInstruction *ignoreUser) {
|
|
StoreInst *singleStore = nullptr;
|
|
SmallPtrSetVector<Operand *, 32> worklist;
|
|
for (auto *use : getNonDebugUses(existentialBox)) {
|
|
worklist.insert(use);
|
|
}
|
|
|
|
while (!worklist.empty()) {
|
|
auto *use = worklist.pop_back_val();
|
|
SILInstruction *user = use->getUser();
|
|
switch (user->getKind()) {
|
|
case SILInstructionKind::StrongRetainInst:
|
|
case SILInstructionKind::StrongReleaseInst:
|
|
case SILInstructionKind::DestroyValueInst:
|
|
case SILInstructionKind::EndBorrowInst:
|
|
break;
|
|
case SILInstructionKind::CopyValueInst:
|
|
case SILInstructionKind::BeginBorrowInst:
|
|
// Look through copy_value, begin_borrow
|
|
for (SILValue result : user->getResults())
|
|
for (auto *transitiveUse : result->getUses())
|
|
worklist.insert(transitiveUse);
|
|
break;
|
|
case SILInstructionKind::ProjectExistentialBoxInst: {
|
|
auto *projectedAddr = cast<ProjectExistentialBoxInst>(user);
|
|
for (Operand *addrUse : getNonDebugUses(projectedAddr)) {
|
|
if (auto *store = dyn_cast<StoreInst>(addrUse->getUser())) {
|
|
assert(store->getSrc() != projectedAddr && "cannot store an address");
|
|
// Bail if there are multiple stores.
|
|
if (singleStore)
|
|
return SILValue();
|
|
singleStore = store;
|
|
continue;
|
|
}
|
|
// If there are other users to the box value address then bail out.
|
|
return SILValue();
|
|
}
|
|
break;
|
|
}
|
|
case SILInstructionKind::BuiltinInst: {
|
|
auto *builtin = cast<BuiltinInst>(user);
|
|
if (KeepWillThrowCall ||
|
|
builtin->getBuiltinInfo().ID != BuiltinValueKind::WillThrow) {
|
|
return SILValue();
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
if (user != ignoreUser)
|
|
return SILValue();
|
|
break;
|
|
}
|
|
}
|
|
if (!singleStore)
|
|
return SILValue();
|
|
return singleStore->getSrc();
|
|
}
|
|
|
|
SILValue swift::
|
|
getConcreteValueOfExistentialBoxAddr(SILValue addr, SILInstruction *ignoreUser) {
|
|
auto *stackLoc = dyn_cast<AllocStackInst>(addr);
|
|
if (!stackLoc)
|
|
return SILValue();
|
|
|
|
StoreInst *singleStackStore = nullptr;
|
|
for (Operand *stackUse : stackLoc->getUses()) {
|
|
SILInstruction *stackUser = stackUse->getUser();
|
|
switch (stackUser->getKind()) {
|
|
case SILInstructionKind::DestroyAddrInst: {
|
|
// Make sure the destroy_addr is the instruction before one of our
|
|
// dealloc_stack insts and is directly on the stack location.
|
|
auto next = std::next(stackUser->getIterator());
|
|
if (auto *dsi = dyn_cast<DeallocStackInst>(next))
|
|
if (dsi->getOperand() != stackLoc)
|
|
return SILValue();
|
|
break;
|
|
}
|
|
case SILInstructionKind::DeallocStackInst:
|
|
case SILInstructionKind::LoadInst:
|
|
break;
|
|
case SILInstructionKind::DebugValueInst:
|
|
if (!DebugValueInst::hasAddrVal(stackUser)) {
|
|
if (stackUser != ignoreUser)
|
|
return SILValue();
|
|
}
|
|
break;
|
|
case SILInstructionKind::StoreInst: {
|
|
auto *store = cast<StoreInst>(stackUser);
|
|
assert(store->getSrc() != stackLoc && "cannot store an address");
|
|
// Bail if there are multiple stores.
|
|
if (singleStackStore)
|
|
return SILValue();
|
|
singleStackStore = store;
|
|
break;
|
|
}
|
|
default:
|
|
if (stackUser != ignoreUser)
|
|
return SILValue();
|
|
break;
|
|
}
|
|
}
|
|
if (!singleStackStore)
|
|
return SILValue();
|
|
|
|
// Look through copy value insts.
|
|
SILValue val = singleStackStore->getSrc();
|
|
while (auto *cvi = dyn_cast<CopyValueInst>(val))
|
|
val = cvi->getOperand();
|
|
auto *box = dyn_cast<AllocExistentialBoxInst>(val);
|
|
if (!box)
|
|
return SILValue();
|
|
|
|
return getConcreteValueOfExistentialBox(box, singleStackStore);
|
|
}
|
|
|
|
bool swift::mayBindDynamicSelf(SILFunction *F) {
|
|
if (!F->hasDynamicSelfMetadata())
|
|
return false;
|
|
|
|
SILValue mdArg = F->getDynamicSelfMetadata();
|
|
|
|
for (Operand *mdUse : mdArg->getUses()) {
|
|
SILInstruction *mdUser = mdUse->getUser();
|
|
for (Operand &typeDepOp : mdUser->getTypeDependentOperands()) {
|
|
if (typeDepOp.get() == mdArg)
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static SILValue skipAddrProjections(SILValue v) {
|
|
for (;;) {
|
|
switch (v->getKind()) {
|
|
case ValueKind::IndexAddrInst:
|
|
case ValueKind::IndexRawPointerInst:
|
|
case ValueKind::StructElementAddrInst:
|
|
case ValueKind::TupleElementAddrInst:
|
|
v = cast<SingleValueInstruction>(v)->getOperand(0);
|
|
break;
|
|
default:
|
|
return v;
|
|
}
|
|
}
|
|
llvm_unreachable("there is no escape from an infinite loop");
|
|
}
|
|
|
|
/// Check whether the \p addr is an address of a tail-allocated array element.
|
|
bool swift::isAddressOfArrayElement(SILValue addr) {
|
|
addr = stripAddressProjections(addr);
|
|
if (auto *md = dyn_cast<MarkDependenceInst>(addr))
|
|
addr = stripAddressProjections(md->getValue());
|
|
|
|
// High-level SIL: check for an get_element_address array semantics call.
|
|
if (auto *ptrToAddr = dyn_cast<PointerToAddressInst>(addr))
|
|
if (auto *sei = dyn_cast<StructExtractInst>(ptrToAddr->getOperand())) {
|
|
ArraySemanticsCall call(sei->getOperand());
|
|
if (call && call.getKind() == ArrayCallKind::kGetElementAddress)
|
|
return true;
|
|
}
|
|
|
|
// Check for an tail-address (of an array buffer object).
|
|
if (isa<RefTailAddrInst>(skipAddrProjections(addr)))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/// Find a new position for an ApplyInst's FuncRef so that it dominates its
|
|
/// use. Not that FunctionRefInsts may be shared by multiple ApplyInsts.
|
|
void swift::placeFuncRef(ApplyInst *ai, DominanceInfo *domInfo) {
|
|
FunctionRefInst *funcRef = cast<FunctionRefInst>(ai->getCallee());
|
|
SILBasicBlock *domBB = domInfo->findNearestCommonDominator(
|
|
ai->getParent(), funcRef->getParent());
|
|
if (domBB == ai->getParent() && domBB != funcRef->getParent())
|
|
// Prefer to place the FuncRef immediately before the call. Since we're
|
|
// moving FuncRef up, this must be the only call to it in the block.
|
|
funcRef->moveBefore(ai);
|
|
else
|
|
// Otherwise, conservatively stick it at the beginning of the block.
|
|
funcRef->moveBefore(&*domBB->begin());
|
|
}
|
|
|
|
/// Add an argument, \p val, to the branch-edge that is pointing into
|
|
/// block \p Dest. Return a new instruction and do not erase the old
|
|
/// instruction.
|
|
TermInst *swift::addArgumentsToBranch(ArrayRef<SILValue> vals,
|
|
SILBasicBlock *dest, TermInst *branch) {
|
|
SILBuilderWithScope builder(branch);
|
|
|
|
if (auto *cbi = dyn_cast<CondBranchInst>(branch)) {
|
|
SmallVector<SILValue, 8> trueArgs;
|
|
SmallVector<SILValue, 8> falseArgs;
|
|
|
|
for (auto arg : cbi->getTrueArgs())
|
|
trueArgs.push_back(arg);
|
|
|
|
for (auto arg : cbi->getFalseArgs())
|
|
falseArgs.push_back(arg);
|
|
|
|
if (dest == cbi->getTrueBB()) {
|
|
for (auto val : vals)
|
|
trueArgs.push_back(val);
|
|
assert(trueArgs.size() == dest->getNumArguments());
|
|
} else {
|
|
for (auto val : vals)
|
|
falseArgs.push_back(val);
|
|
assert(falseArgs.size() == dest->getNumArguments());
|
|
}
|
|
|
|
return builder.createCondBranch(
|
|
cbi->getLoc(), cbi->getCondition(), cbi->getTrueBB(), trueArgs,
|
|
cbi->getFalseBB(), falseArgs, cbi->getTrueBBCount(),
|
|
cbi->getFalseBBCount());
|
|
}
|
|
|
|
if (auto *bi = dyn_cast<BranchInst>(branch)) {
|
|
SmallVector<SILValue, 8> args;
|
|
|
|
for (auto arg : bi->getArgs())
|
|
args.push_back(arg);
|
|
|
|
for (auto val : vals)
|
|
args.push_back(val);
|
|
assert(args.size() == dest->getNumArguments());
|
|
return builder.createBranch(bi->getLoc(), bi->getDestBB(), args);
|
|
}
|
|
|
|
llvm_unreachable("unsupported terminator");
|
|
}
|
|
|
|
SILLinkage swift::getSpecializedLinkage(SILFunction *f, SILLinkage linkage) {
|
|
if (hasPrivateVisibility(linkage) && !f->isAnySerialized()) {
|
|
// Specializations of private symbols should remain so, unless
|
|
// they were serialized, which can only happen when specializing
|
|
// definitions from a standard library built with -sil-serialize-all.
|
|
return SILLinkage::Private;
|
|
}
|
|
|
|
return SILLinkage::Shared;
|
|
}
|
|
|
|
/// Cast a value into the expected, ABI compatible type if necessary.
|
|
/// This may happen e.g. when:
|
|
/// - a type of the return value is a subclass of the expected return type.
|
|
/// - actual return type and expected return type differ in optionality.
|
|
/// - both types are tuple-types and some of the elements need to be casted.
|
|
/// Return the cast value and true if a CFG modification was required
|
|
/// NOTE: We intentionally combine the checking of the cast's handling
|
|
/// possibility and the transformation performing the cast in the same function,
|
|
/// to avoid any divergence between the check and the implementation in the
|
|
/// future.
|
|
///
|
|
/// \p usePoints are required when \p value has guaranteed ownership. It must be
|
|
/// the last users of the returned, casted value. A usePoint cannot be a
|
|
/// BranchInst (a phi is never the last guaranteed user). \p builder's current
|
|
/// insertion point must dominate all \p usePoints. \p usePoints must
|
|
/// collectively post-dominate \p builder's current insertion point.
|
|
///
|
|
/// NOTE: The implementation of this function is very closely related to the
|
|
/// rules checked by SILVerifier::requireABICompatibleFunctionTypes. It must
|
|
/// handle all cases recognized by SILFunctionType::isABICompatibleWith (see
|
|
/// areABICompatibleParamsOrReturns()).
|
|
std::pair<SILValue, bool /* changedCFG */>
|
|
swift::castValueToABICompatibleType(SILBuilder *builder, SILPassManager *pm,
|
|
SILLocation loc,
|
|
SILValue value, SILType srcTy,
|
|
SILType destTy,
|
|
ArrayRef<SILInstruction *> usePoints) {
|
|
assert(value->getOwnershipKind() != OwnershipKind::Guaranteed ||
|
|
!usePoints.empty() && "guaranteed value must have use points");
|
|
|
|
// No cast is required if types are the same.
|
|
if (srcTy == destTy)
|
|
return {value, false};
|
|
|
|
if (srcTy.isAddress() && destTy.isAddress()) {
|
|
// Cast between two addresses and that's it.
|
|
return {builder->createUncheckedAddrCast(loc, value, destTy), false};
|
|
}
|
|
|
|
// If both types are classes and dest is the superclass of src,
|
|
// simply perform an upcast.
|
|
if (destTy.isExactSuperclassOf(srcTy)) {
|
|
return {builder->createUpcast(loc, value, destTy), false};
|
|
}
|
|
|
|
if (srcTy.isHeapObjectReferenceType() && destTy.isHeapObjectReferenceType()) {
|
|
return {builder->createUncheckedRefCast(loc, value, destTy), false};
|
|
}
|
|
|
|
if (auto mt1 = srcTy.getAs<AnyMetatypeType>()) {
|
|
if (auto mt2 = destTy.getAs<AnyMetatypeType>()) {
|
|
if (mt1->getRepresentation() == mt2->getRepresentation()) {
|
|
// If builder.Type needs to be casted to A.Type and
|
|
// A is a superclass of builder, then it can be done by means
|
|
// of a simple upcast.
|
|
if (mt2.getInstanceType()->isExactSuperclassOf(mt1.getInstanceType())) {
|
|
return {builder->createUpcast(loc, value, destTy), false};
|
|
}
|
|
|
|
// Cast between two metatypes and that's it.
|
|
return {builder->createUncheckedReinterpretCast(loc, value, destTy),
|
|
false};
|
|
}
|
|
}
|
|
}
|
|
|
|
// Check if src and dest types are optional.
|
|
auto optionalSrcTy = srcTy.getOptionalObjectType();
|
|
auto optionalDestTy = destTy.getOptionalObjectType();
|
|
|
|
// Both types are optional.
|
|
if (optionalDestTy && optionalSrcTy) {
|
|
// If both wrapped types are classes and dest is the superclass of src,
|
|
// simply perform an upcast.
|
|
if (optionalDestTy.isExactSuperclassOf(optionalSrcTy)) {
|
|
// Insert upcast.
|
|
return {builder->createUpcast(loc, value, destTy), false};
|
|
}
|
|
|
|
// Unwrap the original optional value.
|
|
auto *someDecl = builder->getASTContext().getOptionalSomeDecl();
|
|
auto *curBB = builder->getInsertionPoint()->getParent();
|
|
auto *contBB = curBB->split(builder->getInsertionPoint());
|
|
auto *someBB = builder->getFunction().createBasicBlockAfter(curBB);
|
|
auto *noneBB = builder->getFunction().createBasicBlockAfter(someBB);
|
|
|
|
auto *phi = contBB->createPhiArgument(destTy, value->getOwnershipKind());
|
|
|
|
SmallVector<std::pair<EnumElementDecl *, SILBasicBlock *>, 1> caseBBs;
|
|
caseBBs.push_back(std::make_pair(someDecl, someBB));
|
|
builder->setInsertionPoint(curBB);
|
|
auto *switchEnum = builder->createSwitchEnum(loc, value, noneBB, caseBBs);
|
|
// In OSSA switch_enum destinations have terminator results.
|
|
//
|
|
// TODO: This should be in a switchEnum utility.
|
|
SILValue unwrappedValue;
|
|
if (builder->hasOwnership()) {
|
|
unwrappedValue = switchEnum->createOptionalSomeResult();
|
|
builder->setInsertionPoint(someBB);
|
|
} else {
|
|
builder->setInsertionPoint(someBB);
|
|
unwrappedValue = builder->createUncheckedEnumData(loc, value, someDecl);
|
|
}
|
|
// Cast the unwrapped value.
|
|
SILValue castedUnwrappedValue;
|
|
std::tie(castedUnwrappedValue, std::ignore) = castValueToABICompatibleType(
|
|
builder, pm, loc, unwrappedValue, optionalSrcTy, optionalDestTy, usePoints);
|
|
// Wrap into optional. An owned value is forwarded through the cast and into
|
|
// the Optional. A borrowed value will have a nested borrow for the
|
|
// rewrapped Optional.
|
|
SILValue someValue =
|
|
builder->createOptionalSome(loc, castedUnwrappedValue, destTy);
|
|
builder->createBranch(loc, contBB, {someValue});
|
|
|
|
// Handle the None case.
|
|
builder->setInsertionPoint(noneBB);
|
|
SILValue noneValue = builder->createOptionalNone(loc, destTy);
|
|
builder->createBranch(loc, contBB, {noneValue});
|
|
builder->setInsertionPoint(contBB->begin());
|
|
|
|
updateGuaranteedPhis(pm, { phi });
|
|
|
|
return {lookThroughBorrowedFromUser(phi), true};
|
|
}
|
|
|
|
// Src is not optional, but dest is optional.
|
|
if (!optionalSrcTy && optionalDestTy) {
|
|
auto optionalSrcCanTy =
|
|
OptionalType::get(srcTy.getASTType())->getCanonicalType();
|
|
auto loweredOptionalSrcType =
|
|
SILType::getPrimitiveObjectType(optionalSrcCanTy);
|
|
|
|
// Wrap the source value into an optional first.
|
|
SILValue wrappedValue =
|
|
builder->createOptionalSome(loc, value, loweredOptionalSrcType);
|
|
// Cast the wrapped value.
|
|
return castValueToABICompatibleType(builder, pm, loc, wrappedValue,
|
|
wrappedValue->getType(), destTy,
|
|
usePoints);
|
|
}
|
|
|
|
// Handle tuple types.
|
|
// Extract elements, cast each of them, create a new tuple.
|
|
if (auto srcTupleTy = srcTy.getAs<TupleType>()) {
|
|
SmallVector<SILValue, 8> expectedTuple;
|
|
bool changedCFG = false;
|
|
auto castElement = [&](unsigned idx, SILValue element) {
|
|
// Cast the value if necessary.
|
|
bool neededCFGChange;
|
|
std::tie(element, neededCFGChange) = castValueToABICompatibleType(
|
|
builder, pm, loc, element, srcTy.getTupleElementType(idx),
|
|
destTy.getTupleElementType(idx), usePoints);
|
|
changedCFG |= neededCFGChange;
|
|
expectedTuple.push_back(element);
|
|
};
|
|
builder->emitDestructureValueOperation(loc, value, castElement);
|
|
return {builder->createTuple(loc, destTy, expectedTuple), changedCFG};
|
|
}
|
|
|
|
// Function types are interchangeable if they're also ABI-compatible.
|
|
if (srcTy.is<SILFunctionType>()) {
|
|
if (destTy.is<SILFunctionType>()) {
|
|
assert(srcTy.getAs<SILFunctionType>()->isNoEscape()
|
|
== destTy.getAs<SILFunctionType>()->isNoEscape()
|
|
|| srcTy.getAs<SILFunctionType>()->getRepresentation()
|
|
!= SILFunctionType::Representation::Thick
|
|
&& "Swift thick functions that differ in escapeness are "
|
|
"not ABI "
|
|
"compatible");
|
|
// Insert convert_function.
|
|
return {builder->createConvertFunction(loc, value, destTy,
|
|
/*WithoutActuallyEscaping=*/false),
|
|
false};
|
|
}
|
|
}
|
|
NominalTypeDecl *srcNominal = srcTy.getNominalOrBoundGenericNominal();
|
|
NominalTypeDecl *destNominal = destTy.getNominalOrBoundGenericNominal();
|
|
if (srcNominal && srcNominal == destNominal &&
|
|
!layoutIsTypeDependent(srcNominal) &&
|
|
srcTy.isObject() && destTy.isObject()) {
|
|
|
|
// This can be a result from whole-module reasoning of protocol conformances.
|
|
// If a protocol only has a single conformance where the associated type (`ID`) is some
|
|
// concrete type (e.g. `Int`), then the devirtualizer knows that `p.get()`
|
|
// can only return an `Int`:
|
|
// ```
|
|
// public struct X2<ID> {
|
|
// let p: any P2<ID>
|
|
// public func testit(i: ID, x: ID) -> S2<ID> {
|
|
// return p.get(x: x)
|
|
// }
|
|
// }
|
|
// ```
|
|
// and after devirtualizing the `get` function, its result must be cast from `Int` to `ID`.
|
|
//
|
|
// The `layoutIsTypeDependent` utility is basically only used here to assert that this
|
|
// cast can only happen between layout compatible types.
|
|
return {builder->createUncheckedForwardingCast(loc, value, destTy), false};
|
|
}
|
|
|
|
llvm::errs() << "Source type: " << srcTy << "\n";
|
|
llvm::errs() << "Destination type: " << destTy << "\n";
|
|
llvm_unreachable("Unknown combination of types for casting");
|
|
}
|
|
|
|
namespace {
|
|
class TypeDependentVisitor :
|
|
public CanTypeVisitor_AnyNominal<TypeDependentVisitor, bool> {
|
|
public:
|
|
// If the type isn't actually dependent, we're okay.
|
|
bool visit(CanType type) {
|
|
if (!type->hasArchetype() && !type->hasTypeParameter())
|
|
return false;
|
|
return CanTypeVisitor::visit(type);
|
|
}
|
|
|
|
bool visitAnyStructType(CanType type, StructDecl *decl) {
|
|
return visitStructDecl(decl);
|
|
}
|
|
bool visitStructDecl(StructDecl *decl) {
|
|
auto rawLayout = decl->getAttrs().getAttribute<RawLayoutAttr>();
|
|
if (rawLayout) {
|
|
if (auto likeType = rawLayout->getResolvedScalarLikeType(decl)) {
|
|
return visit((*likeType)->getCanonicalType());
|
|
} else if (auto likeArray = rawLayout->getResolvedArrayLikeTypeAndCount(decl)) {
|
|
return visit(likeArray->first->getCanonicalType());
|
|
}
|
|
}
|
|
|
|
for (auto field : decl->getStoredProperties()) {
|
|
if (visit(field->getInterfaceType()->getCanonicalType()))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool visitAnyEnumType(CanType type, EnumDecl *decl) {
|
|
return visitEnumDecl(decl);
|
|
}
|
|
bool visitEnumDecl(EnumDecl *decl) {
|
|
if (decl->isIndirect())
|
|
return false;
|
|
|
|
for (auto elt : decl->getAllElements()) {
|
|
if (!elt->hasAssociatedValues() || elt->isIndirect())
|
|
continue;
|
|
|
|
if (visit(elt->getPayloadInterfaceType()->getCanonicalType()))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool visitTupleType(CanTupleType type) {
|
|
for (auto eltTy : type.getElementTypes()) {
|
|
if (visit(eltTy->getCanonicalType()))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// A class reference does not depend on the layout of the class.
|
|
bool visitAnyClassType(CanType type, ClassDecl *decl) {
|
|
return false;
|
|
}
|
|
|
|
// The same for non-strong references.
|
|
bool visitReferenceStorageType(CanReferenceStorageType type) {
|
|
return false;
|
|
}
|
|
|
|
// All function types have the same layout.
|
|
bool visitAnyFunctionType(CanAnyFunctionType type) {
|
|
return false;
|
|
}
|
|
|
|
// The safe default for types we didn't handle above.
|
|
bool visitType(CanType type) {
|
|
return true;
|
|
}
|
|
};
|
|
} // end anonymous namespace
|
|
|
|
bool swift::layoutIsTypeDependent(NominalTypeDecl *decl) {
|
|
if (isa<ClassDecl>(decl)) {
|
|
return false;
|
|
} else if (auto *structDecl = dyn_cast<StructDecl>(decl)) {
|
|
return TypeDependentVisitor().visitStructDecl(structDecl);
|
|
} else {
|
|
auto *enumDecl = cast<EnumDecl>(decl);
|
|
return TypeDependentVisitor().visitEnumDecl(enumDecl);
|
|
}
|
|
}
|
|
|
|
ProjectBoxInst *swift::getOrCreateProjectBox(AllocBoxInst *abi,
|
|
unsigned index) {
|
|
SILBasicBlock::iterator iter(abi);
|
|
++iter;
|
|
assert(iter != abi->getParent()->end()
|
|
&& "alloc_box cannot be the last instruction of a block");
|
|
SILInstruction *nextInst = &*iter;
|
|
if (auto *pbi = dyn_cast<ProjectBoxInst>(nextInst)) {
|
|
if (pbi->getOperand() == abi && pbi->getFieldIndex() == index)
|
|
return pbi;
|
|
}
|
|
|
|
SILBuilder builder(nextInst);
|
|
return builder.createProjectBox(abi->getLoc(), abi, index);
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Closure Deletion
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// NOTE: Instructions with transitive ownership kind are assumed to not keep
|
|
/// the underlying value alive as well. This is meant for instructions only
|
|
/// with non-transitive users.
|
|
static bool useDoesNotKeepValueAlive(const SILInstruction *inst) {
|
|
switch (inst->getKind()) {
|
|
case SILInstructionKind::StrongRetainInst:
|
|
case SILInstructionKind::StrongReleaseInst:
|
|
case SILInstructionKind::DestroyValueInst:
|
|
case SILInstructionKind::RetainValueInst:
|
|
case SILInstructionKind::ReleaseValueInst:
|
|
case SILInstructionKind::DebugValueInst:
|
|
case SILInstructionKind::EndBorrowInst:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
static bool useHasTransitiveOwnership(const SILInstruction *inst) {
|
|
// convert_escape_to_noescape is used to convert to a @noescape function type.
|
|
// It does not change ownership of the function value.
|
|
if (isa<ConvertEscapeToNoEscapeInst>(inst))
|
|
return true;
|
|
|
|
if (isa<MarkDependenceInst>(inst))
|
|
return true;
|
|
|
|
// Look through copy_value, begin_borrow, move_value. They are inert for our
|
|
// purposes, but we need to look through it.
|
|
return isa<CopyValueInst>(inst) || isa<BeginBorrowInst>(inst) ||
|
|
isa<MoveValueInst>(inst);
|
|
}
|
|
|
|
static bool shouldDestroyPartialApplyCapturedArg(SILValue arg,
|
|
SILParameterInfo paramInfo,
|
|
const SILFunction &F) {
|
|
// If we have a non-trivial type and the argument is passed in @inout, we do
|
|
// not need to destroy it here. This is something that is implicit in the
|
|
// partial_apply design that will be revisited when partial_apply is
|
|
// redesigned.
|
|
if (paramInfo.isIndirectMutating())
|
|
return false;
|
|
|
|
// If we have a trivial type, we do not need to put in any extra releases.
|
|
if (arg->getType().isTrivial(F))
|
|
return false;
|
|
|
|
// We handle all other cases.
|
|
return true;
|
|
}
|
|
|
|
void swift::emitDestroyOperation(SILBuilder &builder, SILLocation loc,
|
|
SILValue operand, InstModCallbacks callbacks) {
|
|
// If we have an address, we insert a destroy_addr and return. Any live range
|
|
// issues must have been dealt with by our caller.
|
|
if (operand->getType().isAddress()) {
|
|
// Then emit the destroy_addr for this operand. This function does not
|
|
// delete any instructions
|
|
SILInstruction *newInst = builder.emitDestroyAddrAndFold(loc, operand);
|
|
if (newInst != nullptr)
|
|
callbacks.createdNewInst(newInst);
|
|
return;
|
|
}
|
|
|
|
// Otherwise, we have an object. We emit the most optimized form of release
|
|
// possible for that value.
|
|
|
|
// If we have qualified ownership, we should just emit a destroy value.
|
|
if (builder.getFunction().hasOwnership()) {
|
|
callbacks.createdNewInst(builder.createDestroyValue(loc, operand));
|
|
return;
|
|
}
|
|
|
|
if (operand->getType().isUnownedStorageType()) {
|
|
auto release = builder.createUnownedRelease(loc, operand,
|
|
builder.getDefaultAtomicity());
|
|
callbacks.createdNewInst(release);
|
|
return;
|
|
}
|
|
|
|
if (operand->getType().isReferenceCounted(builder.getModule())) {
|
|
auto u = builder.emitStrongRelease(loc, operand);
|
|
if (u.isNull())
|
|
return;
|
|
|
|
if (auto *SRI = u.dyn_cast<StrongRetainInst *>()) {
|
|
callbacks.deleteInst(SRI);
|
|
return;
|
|
}
|
|
|
|
callbacks.createdNewInst(cast<StrongReleaseInst *>(u));
|
|
return;
|
|
}
|
|
|
|
auto u = builder.emitReleaseValue(loc, operand);
|
|
if (u.isNull())
|
|
return;
|
|
|
|
if (auto *rvi = u.dyn_cast<RetainValueInst *>()) {
|
|
callbacks.deleteInst(rvi);
|
|
return;
|
|
}
|
|
|
|
callbacks.createdNewInst(cast<ReleaseValueInst *>(u));
|
|
}
|
|
|
|
// NOTE: The ownership of the partial_apply argument does not match the
|
|
// convention of the closed over function. All non-inout arguments to a
|
|
// partial_apply are passed at +1 for regular escaping closures and +0 for
|
|
// closures that have been promoted to partial_apply [on_stack]. An escaping
|
|
// partial apply stores each capture in an owned box, even for guaranteed and
|
|
// in_guaranteed argument convention. A non-escaping/on-stack closure either
|
|
// borrows its arguments or takes an inout_aliasable address. Non-escaping
|
|
// closures do not support owned arguments.
|
|
void swift::releasePartialApplyCapturedArg(SILBuilder &builder, SILLocation loc,
|
|
SILValue arg,
|
|
SILParameterInfo paramInfo,
|
|
InstModCallbacks callbacks) {
|
|
if (!shouldDestroyPartialApplyCapturedArg(arg, paramInfo,
|
|
builder.getFunction()))
|
|
return;
|
|
|
|
emitDestroyOperation(builder, loc, arg, callbacks);
|
|
}
|
|
|
|
static bool
|
|
deadMarkDependenceUser(SILInstruction *inst,
|
|
SmallVectorImpl<SILInstruction *> &deleteInsts) {
|
|
if (!isa<MarkDependenceInst>(inst))
|
|
return false;
|
|
deleteInsts.push_back(inst);
|
|
for (auto *use : cast<SingleValueInstruction>(inst)->getUses()) {
|
|
if (!deadMarkDependenceUser(use->getUser(), deleteInsts))
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void swift::getConsumedPartialApplyArgs(PartialApplyInst *pai,
|
|
SmallVectorImpl<Operand *> &argOperands,
|
|
bool includeTrivialAddrArgs) {
|
|
ApplySite applySite(pai);
|
|
SILFunctionConventions calleeConv = applySite.getSubstCalleeConv();
|
|
unsigned firstCalleeArgIdx = applySite.getCalleeArgIndexOfFirstAppliedArg();
|
|
auto argList = pai->getArgumentOperands();
|
|
SILFunction *F = pai->getFunction();
|
|
|
|
for (unsigned i : indices(argList)) {
|
|
auto argConv = calleeConv.getSILArgumentConvention(firstCalleeArgIdx + i);
|
|
if (argConv.isInoutConvention())
|
|
continue;
|
|
|
|
Operand &argOp = argList[i];
|
|
SILType ty = argOp.get()->getType();
|
|
if (!ty.isTrivial(*F) || (includeTrivialAddrArgs && ty.isAddress()))
|
|
argOperands.push_back(&argOp);
|
|
}
|
|
}
|
|
|
|
static bool collectDestroysRecursively(SingleValueInstruction *inst,
|
|
SmallVectorImpl<Operand *> &destroys,
|
|
InstructionSet &visited) {
|
|
bool isDead = true;
|
|
for (Operand *use : inst->getUses()) {
|
|
SILInstruction *user = use->getUser();
|
|
if (!visited.insert(user))
|
|
continue;
|
|
if (useHasTransitiveOwnership(user)) {
|
|
if (!collectDestroysRecursively(cast<SingleValueInstruction>(user), destroys, visited))
|
|
isDead = false;
|
|
destroys.push_back(use);
|
|
} else if (useDoesNotKeepValueAlive(user)) {
|
|
destroys.push_back(use);
|
|
} else {
|
|
isDead = false;
|
|
}
|
|
}
|
|
return isDead;
|
|
}
|
|
|
|
bool swift::collectDestroys(SingleValueInstruction *inst,
|
|
SmallVectorImpl<Operand *> &destroys) {
|
|
InstructionSet visited(inst->getFunction());
|
|
return collectDestroysRecursively(inst, destroys, visited);
|
|
}
|
|
|
|
/// Move the original arguments of the partial_apply into newly created
|
|
/// temporaries to extend the lifetime of the arguments until the partial_apply
|
|
/// is finally destroyed.
|
|
///
|
|
/// TODO: figure out why this is needed at all. Probably because of some
|
|
/// weirdness of the old retain/release ARC model. Most likely this will
|
|
/// not be needed anymore with OSSA.
|
|
static bool keepArgsOfPartialApplyAlive(PartialApplyInst *pai,
|
|
ArrayRef<Operand *> paiUses,
|
|
SILBuilderContext &builderCtxt,
|
|
InstModCallbacks callbacks) {
|
|
SmallVector<Operand *, 8> argsToHandle;
|
|
getConsumedPartialApplyArgs(pai, argsToHandle,
|
|
/*includeTrivialAddrArgs*/ false);
|
|
if (argsToHandle.empty())
|
|
return true;
|
|
|
|
// Compute the set of endpoints, which will be used to insert destroys of
|
|
// temporaries. This may fail if the frontier is located on a critical edge
|
|
// which we may not split.
|
|
ValueLifetimeAnalysis vla(pai, paiUses);
|
|
|
|
ValueLifetimeAnalysis::Frontier partialApplyFrontier;
|
|
if (!vla.computeFrontier(partialApplyFrontier,
|
|
ValueLifetimeAnalysis::DontModifyCFG)) {
|
|
return false;
|
|
}
|
|
|
|
// We must not introduce copies for move only types.
|
|
// TODO: in OSSA, instead of bailing, it's possible to destroy the arguments
|
|
// without the need of copies.
|
|
for (Operand *argOp : argsToHandle) {
|
|
if (argOp->get()->getType().isMoveOnly())
|
|
return false;
|
|
}
|
|
|
|
for (Operand *argOp : argsToHandle) {
|
|
SILValue arg = argOp->get();
|
|
|
|
SILValue tmp = arg;
|
|
if (arg->getType().isAddress()) {
|
|
// Move the value to a stack-allocated temporary.
|
|
SILBuilderWithScope builder(pai, builderCtxt);
|
|
tmp = builder.createAllocStack(pai->getLoc(), arg->getType());
|
|
builder.createCopyAddr(pai->getLoc(), arg, tmp, IsTake_t::IsTake,
|
|
IsInitialization_t::IsInitialization);
|
|
}
|
|
|
|
// Delay the destroy of the value (either as SSA value or in the stack-
|
|
// allocated temporary) at the end of the partial_apply's lifetime.
|
|
endLifetimeAtFrontier(tmp, partialApplyFrontier, builderCtxt, callbacks);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool swift::tryDeleteDeadClosure(SingleValueInstruction *closure,
|
|
InstModCallbacks callbacks,
|
|
bool needKeepArgsAlive) {
|
|
auto *pa = dyn_cast<PartialApplyInst>(closure);
|
|
|
|
// We currently only handle locally identified values that do not escape. We
|
|
// also assume that the partial apply does not capture any addresses.
|
|
if (!pa && !isa<ThinToThickFunctionInst>(closure))
|
|
return false;
|
|
|
|
// A stack allocated partial apply does not have any release users. Delete it
|
|
// if the only users are the dealloc_stack and mark_dependence instructions.
|
|
if (pa && pa->isOnStack()) {
|
|
SmallVector<SILInstruction *, 8> deleteInsts;
|
|
for (auto *use : pa->getUses()) {
|
|
SILInstruction *user = use->getUser();
|
|
if (isa<DeallocStackInst>(user)
|
|
|| isa<DebugValueInst>(user)
|
|
|| isa<DestroyValueInst>(user)) {
|
|
deleteInsts.push_back(user);
|
|
} else if (!deadMarkDependenceUser(user, deleteInsts)) {
|
|
return false;
|
|
}
|
|
}
|
|
for (auto *inst : reverse(deleteInsts))
|
|
callbacks.deleteInst(inst);
|
|
callbacks.deleteInst(pa);
|
|
|
|
// Note: the lifetime of the captured arguments is managed outside of the
|
|
// trivial closure value i.e: there will already be releases for the
|
|
// captured arguments. Releasing captured arguments is not necessary.
|
|
return true;
|
|
}
|
|
|
|
// Collect all destroys of the closure (transitively including destroys of
|
|
// copies) and check if those are the only uses of the closure.
|
|
SmallVector<Operand *, 16> closureDestroys;
|
|
if (!collectDestroys(closure, closureDestroys))
|
|
return false;
|
|
|
|
// If we have a partial_apply, release each captured argument at each one of
|
|
// the final release locations of the partial apply.
|
|
if (auto *pai = dyn_cast<PartialApplyInst>(closure)) {
|
|
assert(!pa->isOnStack() &&
|
|
"partial_apply [stack] should have been handled before");
|
|
SILBuilderContext builderCtxt(pai->getModule());
|
|
if (needKeepArgsAlive) {
|
|
if (!keepArgsOfPartialApplyAlive(pai, closureDestroys, builderCtxt,
|
|
callbacks))
|
|
return false;
|
|
} else {
|
|
// A preceding partial_apply -> apply conversion (done in
|
|
// tryOptimizeApplyOfPartialApply) already ensured that the arguments are
|
|
// kept alive until the end of the partial_apply's lifetime.
|
|
SmallVector<Operand *, 8> argsToHandle;
|
|
getConsumedPartialApplyArgs(pai, argsToHandle,
|
|
/*includeTrivialAddrArgs*/ false);
|
|
|
|
// We can just destroy the arguments at the point of the partial_apply
|
|
// (remember: partial_apply consumes all arguments).
|
|
for (Operand *argOp : argsToHandle) {
|
|
SILValue arg = argOp->get();
|
|
SILBuilderWithScope builder(pai, builderCtxt);
|
|
emitDestroyOperation(builder, pai->getLoc(), arg, callbacks);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Delete all copy and destroy instructions in order so that leaf uses are
|
|
// deleted first.
|
|
for (auto *use : closureDestroys) {
|
|
auto *user = use->getUser();
|
|
assert(
|
|
(useDoesNotKeepValueAlive(user) || useHasTransitiveOwnership(user)) &&
|
|
"We expect only ARC operations without "
|
|
"results or a cast from escape to noescape without users");
|
|
callbacks.deleteInst(user);
|
|
}
|
|
|
|
callbacks.deleteInst(closure);
|
|
return true;
|
|
}
|
|
|
|
bool swift::simplifyUsers(SingleValueInstruction *inst) {
|
|
bool changed = false;
|
|
InstModCallbacks callbacks;
|
|
|
|
for (auto ui = inst->use_begin(), ue = inst->use_end(); ui != ue;) {
|
|
SILInstruction *user = ui->getUser();
|
|
++ui;
|
|
|
|
auto svi = dyn_cast<SingleValueInstruction>(user);
|
|
if (!svi)
|
|
continue;
|
|
|
|
callbacks.resetHadCallbackInvocation();
|
|
simplifyAndReplaceAllSimplifiedUsesAndErase(svi, callbacks);
|
|
changed |= callbacks.hadCallbackInvocation();
|
|
}
|
|
|
|
return changed;
|
|
}
|
|
|
|
/// Some support functions for the global-opt and let-properties-opts
|
|
|
|
// Encapsulate the state used for recursive analysis of a static
|
|
// initializer. Discover all the instruction in a use-def graph and return them
|
|
// in topological order.
|
|
//
|
|
// TODO: We should have a DFS utility for this sort of thing so it isn't
|
|
// recursive.
|
|
class StaticInitializerAnalysis {
|
|
SmallVectorImpl<SILInstruction *> &postOrderInstructions;
|
|
llvm::SmallDenseSet<SILValue, 8> visited;
|
|
int recursionLevel = 0;
|
|
|
|
public:
|
|
StaticInitializerAnalysis(
|
|
SmallVectorImpl<SILInstruction *> &postOrderInstructions)
|
|
: postOrderInstructions(postOrderInstructions) {}
|
|
|
|
// Perform a recursive DFS on on the use-def graph rooted at `V`. Insert
|
|
// values in the `visited` set in preorder. Insert values in
|
|
// `postOrderInstructions` in postorder so that the instructions are
|
|
// topologically def-use ordered (in execution order).
|
|
bool analyze(SILValue rootValue) {
|
|
return recursivelyAnalyzeOperand(rootValue);
|
|
}
|
|
|
|
protected:
|
|
bool recursivelyAnalyzeOperand(SILValue v) {
|
|
if (!visited.insert(v).second)
|
|
return true;
|
|
|
|
if (++recursionLevel > 50)
|
|
return false;
|
|
|
|
// TODO: For multi-result instructions, we could simply insert all result
|
|
// values in the visited set here.
|
|
auto *inst = dyn_cast<SingleValueInstruction>(v);
|
|
if (!inst)
|
|
return false;
|
|
|
|
if (!recursivelyAnalyzeInstruction(inst))
|
|
return false;
|
|
|
|
postOrderInstructions.push_back(inst);
|
|
--recursionLevel;
|
|
return true;
|
|
}
|
|
|
|
bool recursivelyAnalyzeInstruction(SILInstruction *inst) {
|
|
if (auto *si = dyn_cast<StructInst>(inst)) {
|
|
// If it is not a struct which is a simple type, bail.
|
|
if (!si->getType().isTrivial(*si->getFunction()))
|
|
return false;
|
|
|
|
return llvm::all_of(si->getAllOperands(), [&](Operand &operand) -> bool {
|
|
return recursivelyAnalyzeOperand(operand.get());
|
|
});
|
|
}
|
|
if (auto *ti = dyn_cast<TupleInst>(inst)) {
|
|
// If it is not a tuple which is a simple type, bail.
|
|
if (!ti->getType().isTrivial(*ti->getFunction()))
|
|
return false;
|
|
|
|
return llvm::all_of(ti->getAllOperands(), [&](Operand &operand) -> bool {
|
|
return recursivelyAnalyzeOperand(operand.get());
|
|
});
|
|
}
|
|
if (auto *bi = dyn_cast<BuiltinInst>(inst)) {
|
|
switch (bi->getBuiltinInfo().ID) {
|
|
case BuiltinValueKind::FPTrunc:
|
|
if (auto *li = dyn_cast<LiteralInst>(bi->getArguments()[0])) {
|
|
return recursivelyAnalyzeOperand(li);
|
|
}
|
|
return false;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
return isa<IntegerLiteralInst>(inst) || isa<FloatLiteralInst>(inst)
|
|
|| isa<StringLiteralInst>(inst);
|
|
}
|
|
};
|
|
|
|
/// Check if the value of v is computed by means of a simple initialization.
|
|
/// Populate `forwardInstructions` with references to all the instructions
|
|
/// that participate in the use-def graph required to compute `V`. The
|
|
/// instructions will be in def-use topological order.
|
|
bool swift::analyzeStaticInitializer(
|
|
SILValue v, SmallVectorImpl<SILInstruction *> &forwardInstructions) {
|
|
return StaticInitializerAnalysis(forwardInstructions).analyze(v);
|
|
}
|
|
|
|
/// FIXME: This must be kept in sync with replaceLoadSequence()
|
|
/// below. What a horrible design.
|
|
bool swift::canReplaceLoadSequence(SILInstruction *inst) {
|
|
if (isa<CopyAddrInst>(inst))
|
|
return true;
|
|
|
|
if (isa<LoadInst>(inst))
|
|
return true;
|
|
|
|
if (auto *seai = dyn_cast<StructElementAddrInst>(inst)) {
|
|
for (auto seaiUse : seai->getUses()) {
|
|
if (!canReplaceLoadSequence(seaiUse->getUser()))
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
if (auto *teai = dyn_cast<TupleElementAddrInst>(inst)) {
|
|
for (auto teaiUse : teai->getUses()) {
|
|
if (!canReplaceLoadSequence(teaiUse->getUser()))
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
if (auto *ba = dyn_cast<BeginAccessInst>(inst)) {
|
|
for (auto use : ba->getUses()) {
|
|
if (!canReplaceLoadSequence(use->getUser()))
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
// Incidental uses of an address are meaningless with regard to the loaded
|
|
// value.
|
|
if (isIncidentalUse(inst) || isa<BeginUnpairedAccessInst>(inst))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/// Replace load sequence which may contain
|
|
/// a chain of struct_element_addr followed by a load.
|
|
/// The sequence is traversed inside out, i.e.
|
|
/// starting with the innermost struct_element_addr
|
|
/// Move into utils.
|
|
///
|
|
/// FIXME: this utility does not make sense as an API. How can the caller
|
|
/// guarantee that the only uses of `I` are struct_element_addr and
|
|
/// tuple_element_addr?
|
|
void swift::replaceLoadSequence(SILInstruction *inst, SILValue value) {
|
|
if (auto *cai = dyn_cast<CopyAddrInst>(inst)) {
|
|
SILBuilder builder(cai);
|
|
builder.createStore(cai->getLoc(), value, cai->getDest(),
|
|
StoreOwnershipQualifier::Unqualified);
|
|
return;
|
|
}
|
|
|
|
if (auto *li = dyn_cast<LoadInst>(inst)) {
|
|
li->replaceAllUsesWith(value);
|
|
return;
|
|
}
|
|
|
|
if (auto *seai = dyn_cast<StructElementAddrInst>(inst)) {
|
|
SILBuilder builder(seai);
|
|
auto *sei =
|
|
builder.createStructExtract(seai->getLoc(), value, seai->getField());
|
|
for (auto seaiUse : seai->getUses()) {
|
|
replaceLoadSequence(seaiUse->getUser(), sei);
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (auto *teai = dyn_cast<TupleElementAddrInst>(inst)) {
|
|
SILBuilder builder(teai);
|
|
auto *tei =
|
|
builder.createTupleExtract(teai->getLoc(), value, teai->getFieldIndex());
|
|
for (auto teaiUse : teai->getUses()) {
|
|
replaceLoadSequence(teaiUse->getUser(), tei);
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (auto *ba = dyn_cast<BeginAccessInst>(inst)) {
|
|
for (auto use : ba->getUses()) {
|
|
replaceLoadSequence(use->getUser(), value);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// Incidental uses of an address are meaningless with regard to the loaded
|
|
// value.
|
|
if (isIncidentalUse(inst) || isa<BeginUnpairedAccessInst>(inst))
|
|
return;
|
|
|
|
llvm_unreachable("Unknown instruction sequence for reading from a global");
|
|
}
|
|
|
|
std::optional<FindLocalApplySitesResult>
|
|
swift::findLocalApplySites(FunctionRefBaseInst *fri) {
|
|
SmallVector<Operand *, 32> worklist(fri->use_begin(), fri->use_end());
|
|
|
|
std::optional<FindLocalApplySitesResult> f;
|
|
f.emplace();
|
|
|
|
// Optimistically state that we have no escapes before our def-use dataflow.
|
|
f->escapes = false;
|
|
|
|
while (!worklist.empty()) {
|
|
auto *op = worklist.pop_back_val();
|
|
auto *user = op->getUser();
|
|
|
|
// If we have a full apply site as our user.
|
|
if (auto apply = FullApplySite::isa(user)) {
|
|
if (apply.getCallee() == op->get()) {
|
|
f->fullApplySites.push_back(apply);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// If we have a partial apply as a user, start tracking it, but also look at
|
|
// its users.
|
|
if (auto *pai = dyn_cast<PartialApplyInst>(user)) {
|
|
if (pai->getCallee() == op->get()) {
|
|
// Track the partial apply that we saw so we can potentially eliminate
|
|
// dead closure arguments.
|
|
f->partialApplySites.push_back(pai);
|
|
// Look to see if we can find a full application of this partial apply
|
|
// as well.
|
|
llvm::copy(pai->getUses(), std::back_inserter(worklist));
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// Otherwise, see if we have any function casts to look through...
|
|
switch (user->getKind()) {
|
|
case SILInstructionKind::ThinToThickFunctionInst:
|
|
case SILInstructionKind::ConvertFunctionInst:
|
|
case SILInstructionKind::ConvertEscapeToNoEscapeInst:
|
|
llvm::copy(cast<SingleValueInstruction>(user)->getUses(),
|
|
std::back_inserter(worklist));
|
|
continue;
|
|
|
|
// A partial_apply [stack] marks its captured arguments with
|
|
// mark_dependence.
|
|
case SILInstructionKind::MarkDependenceInst:
|
|
llvm::copy(cast<SingleValueInstruction>(user)->getUses(),
|
|
std::back_inserter(worklist));
|
|
continue;
|
|
|
|
// Look through any reference count instructions since these are not
|
|
// escapes:
|
|
case SILInstructionKind::CopyValueInst:
|
|
llvm::copy(cast<CopyValueInst>(user)->getUses(),
|
|
std::back_inserter(worklist));
|
|
continue;
|
|
case SILInstructionKind::StrongRetainInst:
|
|
case SILInstructionKind::StrongReleaseInst:
|
|
case SILInstructionKind::RetainValueInst:
|
|
case SILInstructionKind::ReleaseValueInst:
|
|
case SILInstructionKind::DestroyValueInst:
|
|
// A partial_apply [stack] is deallocated with a dealloc_stack.
|
|
case SILInstructionKind::DeallocStackInst:
|
|
continue;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
// But everything else is considered an escape.
|
|
f->escapes = true;
|
|
}
|
|
|
|
// If we did escape and didn't find any apply sites, then we have no
|
|
// information for our users that is interesting.
|
|
if (f->escapes && f->partialApplySites.empty() && f->fullApplySites.empty())
|
|
return std::nullopt;
|
|
return f;
|
|
}
|
|
|
|
/// Insert destroys of captured arguments of partial_apply [stack].
|
|
void swift::insertDestroyOfCapturedArguments(
|
|
PartialApplyInst *pai, SILBuilder &builder,
|
|
llvm::function_ref<SILValue(SILValue)> getValueToDestroy,
|
|
SILLocation origLoc) {
|
|
assert(pai->isOnStack());
|
|
|
|
ApplySite site(pai);
|
|
SILFunctionConventions calleeConv(site.getSubstCalleeType(),
|
|
pai->getModule());
|
|
auto loc = CleanupLocation(origLoc);
|
|
for (auto &arg : pai->getArgumentOperands()) {
|
|
SILValue argValue = getValueToDestroy(arg.get());
|
|
if (!argValue)
|
|
continue;
|
|
|
|
assert(!pai->getFunction()->hasOwnership()
|
|
|| (argValue->getOwnershipKind().isCompatibleWith(
|
|
OwnershipKind::Owned)));
|
|
|
|
unsigned calleeArgumentIndex = site.getCalleeArgIndex(arg);
|
|
assert(calleeArgumentIndex >= calleeConv.getSILArgIndexOfFirstParam());
|
|
auto paramInfo = calleeConv.getParamInfoForSILArg(calleeArgumentIndex);
|
|
releasePartialApplyCapturedArg(builder, loc, argValue, paramInfo);
|
|
}
|
|
}
|
|
|
|
void swift::insertDeallocOfCapturedArguments(
|
|
PartialApplyInst *pai,
|
|
DominanceInfo *domInfo,
|
|
llvm::function_ref<SILValue(SILValue)> getAddressToDealloc)
|
|
{
|
|
assert(pai->isOnStack());
|
|
|
|
ApplySite site(pai);
|
|
SILFunctionConventions calleeConv(site.getSubstCalleeType(),
|
|
pai->getModule());
|
|
for (auto &arg : pai->getArgumentOperands()) {
|
|
unsigned calleeArgumentIndex = site.getCalleeArgIndex(arg);
|
|
assert(calleeArgumentIndex >= calleeConv.getSILArgIndexOfFirstParam());
|
|
auto paramInfo = calleeConv.getParamInfoForSILArg(calleeArgumentIndex);
|
|
if (!paramInfo.isIndirectInGuaranteed())
|
|
continue;
|
|
|
|
SILValue argValue = getAddressToDealloc(arg.get());
|
|
if (!argValue) {
|
|
continue;
|
|
}
|
|
|
|
SmallVector<SILBasicBlock *, 4> boundary;
|
|
auto *asi = cast<AllocStackInst>(argValue);
|
|
computeDominatedBoundaryBlocks(asi->getParent(), domInfo, boundary);
|
|
|
|
SmallVector<Operand *, 2> uses;
|
|
auto useFinding = findTransitiveUsesForAddress(asi, &uses);
|
|
InstructionSet users(asi->getFunction());
|
|
if (useFinding == AddressUseKind::NonEscaping) {
|
|
for (auto use : uses) {
|
|
users.insert(use->getUser());
|
|
}
|
|
}
|
|
|
|
for (auto *block : boundary) {
|
|
auto *terminator = block->getTerminator();
|
|
if (isa<UnreachableInst>(terminator))
|
|
continue;
|
|
SILInstruction *insertionPoint = nullptr;
|
|
if (useFinding == AddressUseKind::NonEscaping) {
|
|
insertionPoint = &block->front();
|
|
for (auto &instruction : llvm::reverse(*block)) {
|
|
if (users.contains(&instruction)) {
|
|
insertionPoint = instruction.getNextInstruction();
|
|
break;
|
|
}
|
|
}
|
|
} else {
|
|
insertionPoint = terminator;
|
|
}
|
|
SILBuilderWithScope builder(insertionPoint);
|
|
builder.createDeallocStack(CleanupLocation(insertionPoint->getLoc()),
|
|
argValue);
|
|
}
|
|
}
|
|
}
|
|
|
|
AbstractFunctionDecl *swift::getBaseMethod(AbstractFunctionDecl *FD) {
|
|
while (FD->getOverriddenDecl()) {
|
|
FD = FD->getOverriddenDecl();
|
|
}
|
|
return FD;
|
|
}
|
|
|
|
FullApplySite
|
|
swift::cloneFullApplySiteReplacingCallee(FullApplySite applySite,
|
|
SILValue newCallee,
|
|
SILBuilderContext &builderCtx) {
|
|
SmallVector<SILValue, 16> arguments;
|
|
llvm::copy(applySite.getArguments(), std::back_inserter(arguments));
|
|
|
|
SILBuilderWithScope builder(applySite.getInstruction(), builderCtx);
|
|
|
|
switch (applySite.getKind()) {
|
|
case FullApplySiteKind::TryApplyInst: {
|
|
auto *tai = cast<TryApplyInst>(applySite.getInstruction());
|
|
return builder.createTryApply(tai->getLoc(), newCallee,
|
|
tai->getSubstitutionMap(), arguments,
|
|
tai->getNormalBB(), tai->getErrorBB(),
|
|
tai->getApplyOptions());
|
|
}
|
|
case FullApplySiteKind::ApplyInst: {
|
|
auto *ai = cast<ApplyInst>(applySite);
|
|
auto fTy = newCallee->getType().getAs<SILFunctionType>();
|
|
|
|
auto options = ai->getApplyOptions();
|
|
// The optimizer can generate a thin_to_thick_function from a throwing thin
|
|
// to a non-throwing thick function (in case it can prove that the function
|
|
// is not throwing).
|
|
// Therefore we have to check if the new callee (= the argument of the
|
|
// thin_to_thick_function) is a throwing function and set the not-throwing
|
|
// flag in this case.
|
|
if (fTy->hasErrorResult())
|
|
options |= ApplyFlags::DoesNotThrow;
|
|
return builder.createApply(applySite.getLoc(), newCallee,
|
|
applySite.getSubstitutionMap(), arguments,
|
|
options);
|
|
}
|
|
case FullApplySiteKind::BeginApplyInst: {
|
|
llvm_unreachable("begin_apply support not implemented?!");
|
|
}
|
|
}
|
|
llvm_unreachable("Unhandled case?!");
|
|
}
|
|
|
|
// FIXME: For any situation where this may be called on an unbounded number of
|
|
// uses, it should perform a single callback invocation to notify the client
|
|
// that newValue has new uses rather than a callback for every new use.
|
|
//
|
|
// FIXME: This should almost certainly replace end_lifetime uses rather than
|
|
// deleting them.
|
|
SILBasicBlock::iterator swift::replaceAllUses(SILValue oldValue,
|
|
SILValue newValue,
|
|
SILBasicBlock::iterator nextii,
|
|
InstModCallbacks &callbacks) {
|
|
assert(oldValue != newValue && "Cannot RAUW a value with itself");
|
|
while (!oldValue->use_empty()) {
|
|
Operand *use = *oldValue->use_begin();
|
|
SILInstruction *user = use->getUser();
|
|
// Erase the end of scope marker.
|
|
if (isEndOfScopeMarker(user)) {
|
|
if (&*nextii == user)
|
|
++nextii;
|
|
callbacks.deleteInst(user);
|
|
continue;
|
|
}
|
|
callbacks.setUseValue(use, newValue);
|
|
}
|
|
return nextii;
|
|
}
|
|
|
|
SILBasicBlock::iterator
|
|
swift::replaceAllUsesAndErase(SingleValueInstruction *svi, SILValue newValue,
|
|
InstModCallbacks &callbacks) {
|
|
SILBasicBlock::iterator nextii = replaceAllUses(
|
|
svi, newValue, std::next(svi->getIterator()), callbacks);
|
|
|
|
callbacks.deleteInst(svi);
|
|
|
|
return nextii;
|
|
}
|
|
|
|
SILBasicBlock::iterator
|
|
swift::replaceAllUsesAndErase(SILValue oldValue, SILValue newValue,
|
|
InstModCallbacks &callbacks) {
|
|
auto *blockArg = dyn_cast<SILPhiArgument>(oldValue);
|
|
if (!blockArg) {
|
|
// SingleValueInstruction SSA replacement.
|
|
return replaceAllUsesAndErase(cast<SingleValueInstruction>(oldValue),
|
|
newValue, callbacks);
|
|
}
|
|
llvm_unreachable("Untested");
|
|
#if 0 // FIXME: to be enabled in a following commit
|
|
TermInst *oldTerm = blockArg->getTerminatorForResult();
|
|
assert(oldTerm && "can only replace and erase terminators, not phis");
|
|
|
|
// Before:
|
|
// oldTerm bb1, bb2
|
|
// bb1(%oldValue):
|
|
// use %oldValue
|
|
// bb2:
|
|
//
|
|
// After:
|
|
// br bb1
|
|
// bb1:
|
|
// use %newValue
|
|
// bb2:
|
|
|
|
auto nextii = replaceAllUses(blockArg, newValue,
|
|
oldTerm->getParent()->end(), callbacks);
|
|
// Now that oldValue is replaced, the terminator should have no uses
|
|
// left. The caller should have removed uses from other results.
|
|
for (auto *succBB : oldTerm->getSuccessorBlocks()) {
|
|
assert(succBB->getNumArguments() == 1 && "expected terminator result");
|
|
succBB->eraseArgument(0);
|
|
}
|
|
auto *newBr = SILBuilderWithScope(oldTerm).createBranch(
|
|
oldTerm->getLoc(), blockArg->getParent());
|
|
callbacks.createdNewInst(newBr);
|
|
callbacks.deleteInst(oldTerm);
|
|
return nextii;
|
|
#endif
|
|
}
|
|
|
|
/// Given that we are going to replace use's underlying value, if the use is a
|
|
/// lifetime ending use, insert an end scope use for the underlying value
|
|
/// before we RAUW.
|
|
static void cleanupUseOldValueBeforeRAUW(Operand *use, SILBuilder &builder,
|
|
SILLocation loc,
|
|
InstModCallbacks &callbacks) {
|
|
if (!use->isLifetimeEnding()) {
|
|
return;
|
|
}
|
|
|
|
switch (use->get()->getOwnershipKind()) {
|
|
case OwnershipKind::Any:
|
|
llvm_unreachable("Invalid ownership for value");
|
|
case OwnershipKind::Owned: {
|
|
auto *dvi = builder.createDestroyValue(loc, use->get());
|
|
callbacks.createdNewInst(dvi);
|
|
return;
|
|
}
|
|
case OwnershipKind::Guaranteed: {
|
|
// Should only happen once we model destructures as true reborrows.
|
|
auto *ebi = builder.createEndBorrow(loc, use->get());
|
|
callbacks.createdNewInst(ebi);
|
|
return;
|
|
}
|
|
case OwnershipKind::None:
|
|
return;
|
|
case OwnershipKind::Unowned:
|
|
llvm_unreachable("Unowned object can never be consumed?!");
|
|
}
|
|
llvm_unreachable("Covered switch isn't covered");
|
|
}
|
|
|
|
SILBasicBlock::iterator swift::replaceSingleUse(Operand *use, SILValue newValue,
|
|
InstModCallbacks &callbacks) {
|
|
auto oldValue = use->get();
|
|
assert(oldValue != newValue && "Cannot RAUW a value with itself");
|
|
|
|
auto *user = use->getUser();
|
|
auto nextII = std::next(user->getIterator());
|
|
|
|
// If we have an end of scope marker, just return next. We are done.
|
|
if (isEndOfScopeMarker(user)) {
|
|
return nextII;
|
|
}
|
|
|
|
// Otherwise, first insert clean up our use's value if we need to and then set
|
|
// use to have a new value.
|
|
SILBuilderWithScope builder(user);
|
|
cleanupUseOldValueBeforeRAUW(use, builder, user->getLoc(), callbacks);
|
|
callbacks.setUseValue(use, newValue);
|
|
|
|
return nextII;
|
|
}
|
|
|
|
SILValue swift::makeCopiedValueAvailable(SILValue value, SILBasicBlock *inBlock) {
|
|
if (isa<SILUndef>(value))
|
|
return value;
|
|
|
|
if (!value->getFunction()->hasOwnership())
|
|
return value;
|
|
|
|
if (value->getOwnershipKind() == OwnershipKind::None)
|
|
return value;
|
|
|
|
auto insertPt = getInsertAfterPoint(value).value();
|
|
SILBuilderWithScope builder(insertPt);
|
|
auto *copy = builder.createCopyValue(
|
|
RegularLocation::getAutoGeneratedLocation(), value);
|
|
|
|
return makeValueAvailable(copy, inBlock);
|
|
}
|
|
|
|
SILValue swift::makeValueAvailable(SILValue value, SILBasicBlock *inBlock) {
|
|
if (isa<SILUndef>(value))
|
|
return value;
|
|
|
|
if (!value->getFunction()->hasOwnership())
|
|
return value;
|
|
|
|
if (value->getOwnershipKind() == OwnershipKind::None)
|
|
return value;
|
|
|
|
assert(value->getOwnershipKind() == OwnershipKind::Owned);
|
|
|
|
SmallVector<SILBasicBlock *, 4> userBBs;
|
|
for (auto use : value->getUses()) {
|
|
userBBs.push_back(use->getParentBlock());
|
|
}
|
|
userBBs.push_back(inBlock);
|
|
|
|
// Use \p jointPostDomComputer to:
|
|
// 1. Create a control equivalent copy at \p inBlock if needed
|
|
// 2. Insert destroy_value at leaking blocks
|
|
SILValue controlEqCopy;
|
|
findJointPostDominatingSet(
|
|
value->getParentBlock(), userBBs,
|
|
[&](SILBasicBlock *loopBlock) {
|
|
assert(loopBlock == inBlock);
|
|
auto front = loopBlock->begin();
|
|
SILBuilderWithScope newBuilder(front);
|
|
controlEqCopy = newBuilder.createCopyValue(
|
|
RegularLocation::getAutoGeneratedLocation(), value);
|
|
},
|
|
[&](SILBasicBlock *postDomBlock) {
|
|
// Insert a destroy_value in the leaking block
|
|
auto front = postDomBlock->begin();
|
|
SILBuilderWithScope newBuilder(front);
|
|
newBuilder.createDestroyValue(
|
|
RegularLocation::getAutoGeneratedLocation(), value);
|
|
});
|
|
|
|
return controlEqCopy ? controlEqCopy : value;
|
|
}
|
|
|
|
bool swift::tryEliminateOnlyOwnershipUsedForwardingInst(
|
|
SingleValueInstruction *forwardingInst, InstModCallbacks &callbacks) {
|
|
auto fwdOp = ForwardingOperation(forwardingInst);
|
|
if (!fwdOp) {
|
|
return false;
|
|
}
|
|
auto *singleFwdOp = fwdOp.getSingleForwardingOperand();
|
|
if (!singleFwdOp) {
|
|
return false;
|
|
}
|
|
|
|
SmallVector<Operand *, 32> worklist(getNonDebugUses(forwardingInst));
|
|
while (!worklist.empty()) {
|
|
auto *use = worklist.pop_back_val();
|
|
auto *user = use->getUser();
|
|
|
|
if (isa<EndBorrowInst>(user) || isa<DestroyValueInst>(user) ||
|
|
isa<RefCountingInst>(user))
|
|
continue;
|
|
|
|
if (isa<CopyValueInst>(user) || isa<BeginBorrowInst>(user)) {
|
|
for (auto result : user->getResults())
|
|
for (auto *resultUse : getNonDebugUses(result))
|
|
worklist.push_back(resultUse);
|
|
continue;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// Now that we know we can perform our transform, set all uses of
|
|
// forwardingInst to be used of its operand and then delete \p forwardingInst.
|
|
auto newValue = singleFwdOp->get();
|
|
while (!forwardingInst->use_empty()) {
|
|
auto *use = *(forwardingInst->use_begin());
|
|
use->set(newValue);
|
|
}
|
|
|
|
callbacks.deleteInst(forwardingInst);
|
|
return true;
|
|
}
|
|
|
|
// The consuming use blocks are assumed either not to inside a loop relative to
|
|
// \p value or they must have their own copies.
|
|
void swift::endLifetimeAtLeakingBlocks(SILValue value,
|
|
ArrayRef<SILBasicBlock *> uses,
|
|
DeadEndBlocks *deadEndBlocks) {
|
|
if (!value->getFunction()->hasOwnership())
|
|
return;
|
|
|
|
if (value->getOwnershipKind() != OwnershipKind::Owned)
|
|
return;
|
|
|
|
findJointPostDominatingSet(
|
|
value->getParentBlock(), uses, [&](SILBasicBlock *loopBlock) {},
|
|
[&](SILBasicBlock *postDomBlock) {
|
|
// Insert a destroy_value in the leaking block
|
|
auto front = postDomBlock->begin();
|
|
SILBuilderWithScope newBuilder(front);
|
|
swift::IsDeadEnd_t isDeadEnd = swift::IsntDeadEnd;
|
|
if (deadEndBlocks) {
|
|
isDeadEnd = IsDeadEnd_t(deadEndBlocks->isDeadEnd(
|
|
newBuilder.getInsertionPoint()->getParent()));
|
|
}
|
|
newBuilder.createDestroyValue(
|
|
RegularLocation::getAutoGeneratedLocation(), value, DontPoisonRefs,
|
|
isDeadEnd);
|
|
});
|
|
}
|
|
|
|
/// Create a new debug value from a store and a debug variable.
|
|
static void transferStoreDebugValue(DebugVarCarryingInst DefiningInst,
|
|
SILInstruction *SI,
|
|
SILValue original) {
|
|
auto VarInfo = DefiningInst.getVarInfo();
|
|
if (!VarInfo)
|
|
return;
|
|
// Fix the op_deref.
|
|
if (!isa<CopyAddrInst>(SI) && VarInfo->DIExpr.startsWithDeref())
|
|
VarInfo->DIExpr.eraseElement(VarInfo->DIExpr.element_begin());
|
|
else if (isa<CopyAddrInst>(SI) && !VarInfo->DIExpr.startsWithDeref())
|
|
VarInfo->DIExpr.prependElements({
|
|
SILDIExprElement::createOperator(SILDIExprOperator::Dereference)});
|
|
// Note: The instruction should logically be in the SI's scope.
|
|
// However, LLVM does not support variables and stores in different scopes,
|
|
// so we use the variable's scope.
|
|
SILBuilder(SI, DefiningInst->getDebugScope())
|
|
.createDebugValue(SI->getLoc(), original, *VarInfo);
|
|
}
|
|
|
|
void swift::salvageStoreDebugInfo(SILInstruction *SI,
|
|
SILValue SrcVal, SILValue DestVal) {
|
|
if (auto *ASI = dyn_cast_or_null<AllocStackInst>(
|
|
DestVal.getDefiningInstruction())) {
|
|
transferStoreDebugValue(ASI, SI, SrcVal);
|
|
for (Operand *U : getDebugUses(ASI))
|
|
transferStoreDebugValue(U->getUser(), SI, SrcVal);
|
|
}
|
|
}
|
|
|
|
// TODO: this currently fails to notify the pass with notifyNewInstruction.
|
|
//
|
|
// TODO: whenever a debug_value is inserted at a new location, check that no
|
|
// other debug_value instructions exist between the old and new location for
|
|
// the same variable.
|
|
void swift::salvageDebugInfo(SILInstruction *I) {
|
|
if (!I)
|
|
return;
|
|
|
|
if (auto *SI = dyn_cast<StoreInst>(I)) {
|
|
if (SILValue DestVal = SI->getDest())
|
|
salvageStoreDebugInfo(SI, SI->getSrc(), DestVal);
|
|
}
|
|
if (auto *SI = dyn_cast<StoreBorrowInst>(I)) {
|
|
if (SILValue DestVal = SI->getDest())
|
|
salvageStoreDebugInfo(SI, SI->getSrc(), DestVal);
|
|
for (Operand *U : getDebugUses(SI))
|
|
transferStoreDebugValue(U->getUser(), SI, SI->getSrc());
|
|
}
|
|
// If a `struct` SIL instruction is "unwrapped" and removed,
|
|
// for instance, in favor of using its enclosed value directly,
|
|
// we need to make sure any of its related `debug_value` instructions
|
|
// are preserved.
|
|
if (auto *STI = dyn_cast<StructInst>(I)) {
|
|
auto STVal = STI->getResult(0);
|
|
llvm::ArrayRef<VarDecl *> FieldDecls =
|
|
STI->getStructDecl()->getStoredProperties();
|
|
for (Operand *U : getDebugUses(STVal)) {
|
|
auto *DbgInst = cast<DebugValueInst>(U->getUser());
|
|
auto VarInfo = DbgInst->getVarInfo();
|
|
if (!VarInfo)
|
|
continue;
|
|
for (VarDecl *FD : FieldDecls) {
|
|
SILDebugVariable NewVarInfo = *VarInfo;
|
|
auto FieldVal = STI->getFieldValue(FD);
|
|
// Build the corresponding fragment DIExpression
|
|
auto FragDIExpr = SILDebugInfoExpression::createFragment(FD);
|
|
NewVarInfo.DIExpr.append(FragDIExpr);
|
|
|
|
if (!NewVarInfo.Type)
|
|
NewVarInfo.Type = STI->getType();
|
|
|
|
// Create a new debug_value
|
|
SILBuilder(STI, DbgInst->getDebugScope())
|
|
.createDebugValue(DbgInst->getLoc(), FieldVal, NewVarInfo);
|
|
}
|
|
}
|
|
}
|
|
// Similarly, if a `tuple` SIL instruction is "unwrapped" and removed,
|
|
// we need to make sure any of its related `debug_value` instructions
|
|
// are preserved.
|
|
if (auto *TTI = dyn_cast<TupleInst>(I)) {
|
|
auto TTVal = TTI->getResult(0);
|
|
for (Operand *U : getDebugUses(TTVal)) {
|
|
auto *DbgInst = cast<DebugValueInst>(U->getUser());
|
|
auto VarInfo = DbgInst->getVarInfo();
|
|
if (!VarInfo)
|
|
continue;
|
|
TupleType *TT = TTI->getTupleType();
|
|
for (auto i : indices(TT->getElements())) {
|
|
SILDebugVariable NewVarInfo = *VarInfo;
|
|
auto FragDIExpr = SILDebugInfoExpression::createTupleFragment(TT, i);
|
|
NewVarInfo.DIExpr.append(FragDIExpr);
|
|
|
|
if (!NewVarInfo.Type)
|
|
NewVarInfo.Type = TTI->getType();
|
|
|
|
// Create a new debug_value
|
|
SILBuilder(TTI, DbgInst->getDebugScope())
|
|
.createDebugValue(DbgInst->getLoc(), TTI->getElement(i), NewVarInfo);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto *IA = dyn_cast<IndexAddrInst>(I)) {
|
|
if (IA->getBase() && IA->getIndex())
|
|
// Only handle cases where offset is constant.
|
|
if (const auto *LiteralInst =
|
|
dyn_cast<IntegerLiteralInst>(IA->getIndex())) {
|
|
SILValue Base = IA->getBase();
|
|
SILValue ResultAddr = IA->getResult(0);
|
|
APInt OffsetVal = LiteralInst->getValue();
|
|
const SILDIExprElement ExprElements[3] = {
|
|
SILDIExprElement::createOperator(OffsetVal.isNegative() ?
|
|
SILDIExprOperator::ConstSInt : SILDIExprOperator::ConstUInt),
|
|
SILDIExprElement::createConstInt(OffsetVal.getLimitedValue()),
|
|
SILDIExprElement::createOperator(SILDIExprOperator::Plus)
|
|
};
|
|
for (Operand *U : getDebugUses(ResultAddr)) {
|
|
auto *DbgInst = cast<DebugValueInst>(U->getUser());
|
|
auto VarInfo = DbgInst->getVarInfo();
|
|
if (!VarInfo)
|
|
continue;
|
|
VarInfo->DIExpr.prependElements(ExprElements);
|
|
// Create a new debug_value
|
|
SILBuilder(IA, DbgInst->getDebugScope())
|
|
.createDebugValue(DbgInst->getLoc(), Base, *VarInfo);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto *IL = dyn_cast<IntegerLiteralInst>(I)) {
|
|
APInt value = IL->getValue();
|
|
const SILDIExprElement ExprElements[2] = {
|
|
SILDIExprElement::createOperator(value.isNegative() ?
|
|
SILDIExprOperator::ConstSInt : SILDIExprOperator::ConstUInt),
|
|
SILDIExprElement::createConstInt(value.getLimitedValue()),
|
|
};
|
|
for (Operand *U : getDebugUses(IL)) {
|
|
auto *DbgInst = cast<DebugValueInst>(U->getUser());
|
|
auto VarInfo = DbgInst->getVarInfo();
|
|
if (!VarInfo)
|
|
continue;
|
|
VarInfo->DIExpr.prependElements(ExprElements);
|
|
// Create a new debug_value, with undef, and the correct const int
|
|
SILBuilder(DbgInst, DbgInst->getDebugScope())
|
|
.createDebugValue(DbgInst->getLoc(), SILUndef::get(IL), *VarInfo);
|
|
}
|
|
}
|
|
}
|
|
|
|
void swift::salvageLoadDebugInfo(LoadOperation load) {
|
|
for (Operand *debugUse : getDebugUses(load.getLoadInst())) {
|
|
// Create a new debug_value rather than reusing the old one so the
|
|
// SILBuilder adds 'expr(deref)' to account for the indirection.
|
|
auto *debugInst = cast<DebugValueInst>(debugUse->getUser());
|
|
auto varInfo = debugInst->getVarInfo();
|
|
if (!varInfo)
|
|
continue;
|
|
|
|
// The new debug_value must be "hoisted" to the load to ensure that the
|
|
// address is still valid.
|
|
SILBuilder(load.getLoadInst(), debugInst->getDebugScope())
|
|
.createDebugValueAddr(debugInst->getLoc(), load.getOperand(),
|
|
varInfo.value());
|
|
}
|
|
}
|
|
|
|
// TODO: this currently fails to notify the pass with notifyNewInstruction.
|
|
void swift::createDebugFragments(SILValue oldValue, Projection proj,
|
|
SILValue newValue) {
|
|
if (proj.getKind() != ProjectionKind::Struct)
|
|
return;
|
|
|
|
for (auto *use : getDebugUses(oldValue)) {
|
|
auto debugVal = dyn_cast<DebugValueInst>(use->getUser());
|
|
if (!debugVal)
|
|
continue;
|
|
|
|
auto varInfo = debugVal->getVarInfo();
|
|
|
|
SILType baseType = oldValue->getType();
|
|
|
|
// Copy VarInfo and add the corresponding fragment DIExpression.
|
|
SILDebugVariable newVarInfo = *varInfo;
|
|
newVarInfo.DIExpr.append(
|
|
SILDebugInfoExpression::createFragment(proj.getVarDecl(baseType)));
|
|
|
|
if (!newVarInfo.Type)
|
|
newVarInfo.Type = baseType;
|
|
|
|
// Create a new debug_value
|
|
SILBuilder(debugVal, debugVal->getDebugScope())
|
|
.createDebugValue(debugVal->getLoc(), newValue, newVarInfo);
|
|
}
|
|
}
|
|
|
|
IntegerLiteralInst *swift::optimizeBuiltinCanBeObjCClass(BuiltinInst *bi,
|
|
SILBuilder &builder) {
|
|
assert(bi->getBuiltinInfo().ID == BuiltinValueKind::CanBeObjCClass);
|
|
assert(bi->hasSubstitutions() && "Expected substitutions for canBeClass");
|
|
|
|
auto const &subs = bi->getSubstitutions();
|
|
assert((subs.getReplacementTypes().size() == 1) &&
|
|
"Expected one substitution in call to canBeClass");
|
|
|
|
auto ty = subs.getReplacementTypes()[0]->getCanonicalType();
|
|
switch (ty->canBeClass()) {
|
|
case TypeTraitResult::IsNot:
|
|
return builder.createIntegerLiteral(bi->getLoc(), bi->getType(),
|
|
APInt(8, 0));
|
|
case TypeTraitResult::Is:
|
|
return builder.createIntegerLiteral(bi->getLoc(), bi->getType(),
|
|
APInt(8, 1));
|
|
case TypeTraitResult::CanBe:
|
|
return nullptr;
|
|
}
|
|
llvm_unreachable("Unhandled TypeTraitResult in switch.");
|
|
}
|
|
|
|
SILValue swift::createEmptyAndUndefValue(SILType ty,
|
|
SILInstruction *insertionPoint,
|
|
SILBuilderContext &ctx,
|
|
bool noUndef) {
|
|
auto *function = insertionPoint->getFunction();
|
|
if (auto tupleTy = ty.getAs<TupleType>()) {
|
|
SmallVector<SILValue, 4> elements;
|
|
for (unsigned idx : range(tupleTy->getNumElements())) {
|
|
SILType elementTy = ty.getTupleElementType(idx);
|
|
auto element = createEmptyAndUndefValue(elementTy, insertionPoint, ctx);
|
|
elements.push_back(element);
|
|
}
|
|
SILBuilderWithScope builder(insertionPoint, ctx);
|
|
return builder.createTuple(insertionPoint->getLoc(), ty, elements);
|
|
}
|
|
if (auto *decl = ty.getStructOrBoundGenericStruct()) {
|
|
TypeExpansionContext tec = *function;
|
|
auto &module = function->getModule();
|
|
if (decl->isResilient(tec.getContext()->getParentModule(),
|
|
tec.getResilienceExpansion())) {
|
|
llvm::errs() << "Attempting to create value for illegal empty type:\n";
|
|
ty.print(llvm::errs());
|
|
llvm::report_fatal_error("illegal empty type: resilient struct");
|
|
}
|
|
SmallVector<SILValue, 4> elements;
|
|
for (auto *field : decl->getStoredProperties()) {
|
|
auto elementTy = ty.getFieldType(field, module, tec);
|
|
auto element = createEmptyAndUndefValue(elementTy, insertionPoint, ctx);
|
|
elements.push_back(element);
|
|
}
|
|
SILBuilderWithScope builder(insertionPoint, ctx);
|
|
return builder.createStruct(insertionPoint->getLoc(), ty, elements);
|
|
}
|
|
assert(!noUndef);
|
|
return SILUndef::get(insertionPoint->getFunction(), ty);
|
|
}
|
|
|
|
bool swift::findUnreferenceableStorage(StructDecl *decl, SILType structType,
|
|
SILFunction *func) {
|
|
if (decl->hasUnreferenceableStorage()) {
|
|
return true;
|
|
}
|
|
// Check if any fields have unreferenceable stoage
|
|
for (auto *field : decl->getStoredProperties()) {
|
|
TypeExpansionContext tec = *func;
|
|
auto fieldTy = structType.getFieldType(field, func->getModule(), tec);
|
|
if (auto *fieldStructDecl = fieldTy.getStructOrBoundGenericStruct()) {
|
|
if (findUnreferenceableStorage(fieldStructDecl, fieldTy, func)) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MARK: Find Initialization Value Of Temporary Alloc Stack
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
namespace {
|
|
struct AddressWalkerState {
|
|
bool foundError = false;
|
|
InstructionSet writes;
|
|
AddressWalkerState(SILFunction *fn) : writes(fn) {}
|
|
};
|
|
} // namespace
|
|
|
|
static SILValue
|
|
findRootValueForNonTupleTempAllocation(AllocationInst *allocInst,
|
|
AddressWalkerState &state) {
|
|
// These are instructions which we are ok with looking through when
|
|
// identifying our allocation. It must always refer to the entire allocation.
|
|
auto isAlloc = [&](SILValue value) -> bool {
|
|
if (auto *ieai = dyn_cast<InitExistentialAddrInst>(value))
|
|
value = ieai->getOperand();
|
|
return value == SILValue(allocInst);
|
|
};
|
|
|
|
// Walk from our allocation to one of our writes. Then make sure that the
|
|
// write writes to our entire value.
|
|
for (auto &inst : allocInst->getParent()->getRangeStartingAtInst(allocInst)) {
|
|
// See if we have a full tuple value.
|
|
|
|
if (!state.writes.contains(&inst))
|
|
continue;
|
|
|
|
if (auto *copyAddr = dyn_cast<CopyAddrInst>(&inst)) {
|
|
if (isAlloc(copyAddr->getDest()) && copyAddr->isInitializationOfDest()) {
|
|
return copyAddr->getSrc();
|
|
}
|
|
}
|
|
|
|
if (auto *si = dyn_cast<StoreInst>(&inst)) {
|
|
if (isAlloc(si->getDest()) &&
|
|
si->getOwnershipQualifier() != StoreOwnershipQualifier::Assign) {
|
|
return si->getSrc();
|
|
}
|
|
}
|
|
|
|
if (auto *sbi = dyn_cast<StoreBorrowInst>(&inst)) {
|
|
if (isAlloc(sbi->getDest()))
|
|
return sbi->getSrc();
|
|
}
|
|
|
|
// If we do not identify the write... return SILValue(). We weren't able
|
|
// to understand the write.
|
|
break;
|
|
}
|
|
|
|
return SILValue();
|
|
}
|
|
|
|
static SILValue findRootValueForTupleTempAllocation(AllocationInst *allocInst,
|
|
AddressWalkerState &state) {
|
|
SmallVector<SILValue, 8> tupleValues;
|
|
|
|
for (unsigned i : range(allocInst->getType().getNumTupleElements())) {
|
|
(void)i;
|
|
tupleValues.push_back(nullptr);
|
|
}
|
|
|
|
unsigned numEltsLeft = tupleValues.size();
|
|
|
|
// If we have an empty tuple, just return SILValue() for now.
|
|
//
|
|
// TODO: What does this pattern look like out of SILGen?
|
|
if (!numEltsLeft)
|
|
return SILValue();
|
|
|
|
// Walk from our allocation to one of our writes. Then make sure that the
|
|
// write writes to our entire value.
|
|
DestructureTupleInst *foundDestructure = nullptr;
|
|
SILValue foundRootAddress;
|
|
for (auto &inst : allocInst->getParent()->getRangeStartingAtInst(allocInst)) {
|
|
if (!state.writes.contains(&inst))
|
|
continue;
|
|
|
|
if (auto *copyAddr = dyn_cast<CopyAddrInst>(&inst)) {
|
|
if (copyAddr->isInitializationOfDest()) {
|
|
if (auto *tei = dyn_cast<TupleElementAddrInst>(copyAddr->getDest())) {
|
|
if (tei->getOperand() == allocInst) {
|
|
unsigned i = tei->getFieldIndex();
|
|
if (auto *otherTei = dyn_cast_or_null<TupleElementAddrInst>(
|
|
copyAddr->getSrc()->getDefiningInstruction())) {
|
|
// If we already were processing destructures, then we have a mix
|
|
// of struct/destructures... we do not support that, so bail.
|
|
if (foundDestructure)
|
|
return SILValue();
|
|
|
|
// Otherwise, update our root address. If we already had a root
|
|
// address and it doesn't match our tuple_element_addr's operand,
|
|
// bail. There is some sort of mix/match of tuple addresses that
|
|
// we do not support. We are looking for a specific SILGen
|
|
// pattern.
|
|
if (!foundRootAddress) {
|
|
foundRootAddress = otherTei->getOperand();
|
|
} else if (foundRootAddress != otherTei->getOperand()) {
|
|
return SILValue();
|
|
}
|
|
|
|
if (i != otherTei->getFieldIndex())
|
|
return SILValue();
|
|
if (tupleValues[i])
|
|
return SILValue();
|
|
tupleValues[i] = otherTei;
|
|
|
|
// If we have completely covered the tuple, break.
|
|
--numEltsLeft;
|
|
if (!numEltsLeft)
|
|
break;
|
|
|
|
// Otherwise, continue so we keep processing.
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto *si = dyn_cast<StoreInst>(&inst)) {
|
|
if (si->getOwnershipQualifier() != StoreOwnershipQualifier::Assign) {
|
|
// Check if we are updating the entire tuple value.
|
|
if (si->getDest() == allocInst) {
|
|
// If we already found a root address (meaning we were processing
|
|
// tuple_elt_addr), bail. We have some sort of unhandled mix of
|
|
// copy_addr and store.
|
|
if (foundRootAddress)
|
|
return SILValue();
|
|
|
|
// If we already found a destructure, return SILValue(). We are
|
|
// initializing twice.
|
|
if (foundDestructure)
|
|
return SILValue();
|
|
|
|
// We are looking for a pattern where we construct a tuple from
|
|
// destructured parts.
|
|
if (auto *ti = dyn_cast<TupleInst>(si->getSrc())) {
|
|
for (auto p : llvm::enumerate(ti->getOperandValues())) {
|
|
SILValue value = lookThroughOwnershipInsts(p.value());
|
|
if (auto *dti = dyn_cast_or_null<DestructureTupleInst>(
|
|
value->getDefiningInstruction())) {
|
|
// We should always go through the same dti.
|
|
if (foundDestructure && foundDestructure != dti)
|
|
return SILValue();
|
|
if (!foundDestructure)
|
|
foundDestructure = dti;
|
|
|
|
// If we have a mixmatch of indices, we cannot look through.
|
|
if (p.index() != dti->getIndexOfResult(value))
|
|
return SILValue();
|
|
if (tupleValues[p.index()])
|
|
return SILValue();
|
|
tupleValues[p.index()] = value;
|
|
|
|
// If we have completely covered the tuple, break.
|
|
--numEltsLeft;
|
|
if (!numEltsLeft)
|
|
break;
|
|
}
|
|
}
|
|
|
|
// If we haven't completely covered the tuple, return SILValue(). We
|
|
// should completely cover the tuple.
|
|
if (numEltsLeft)
|
|
return SILValue();
|
|
|
|
// Otherwise, break since we are done.
|
|
break;
|
|
}
|
|
}
|
|
|
|
// If we store to a tuple_element_addr, update for a single value.
|
|
if (auto *tei = dyn_cast<TupleElementAddrInst>(si->getDest())) {
|
|
if (tei->getOperand() == allocInst) {
|
|
unsigned i = tei->getFieldIndex();
|
|
if (auto *dti = dyn_cast_or_null<DestructureTupleInst>(
|
|
si->getSrc()->getDefiningInstruction())) {
|
|
// If we already found a root address (meaning we were processing
|
|
// tuple_elt_addr), bail. We have some sort of unhandled mix of
|
|
// copy_addr and store [init].
|
|
if (foundRootAddress)
|
|
return SILValue();
|
|
if (!foundDestructure) {
|
|
foundDestructure = dti;
|
|
} else if (foundDestructure != dti) {
|
|
return SILValue();
|
|
}
|
|
|
|
if (i != dti->getIndexOfResult(si->getSrc()))
|
|
return SILValue();
|
|
if (tupleValues[i])
|
|
return SILValue();
|
|
tupleValues[i] = si->getSrc();
|
|
|
|
// If we have completely covered the tuple, break.
|
|
--numEltsLeft;
|
|
if (!numEltsLeft)
|
|
break;
|
|
|
|
// Otherwise, continue so we keep processing.
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Found a write that we did not understand... bail.
|
|
break;
|
|
}
|
|
|
|
// Now check if we have a complete tuple with all elements coming from the
|
|
// same destructure_tuple. In such a case, we can look through the
|
|
// destructure_tuple.
|
|
if (numEltsLeft)
|
|
return SILValue();
|
|
|
|
if (foundDestructure)
|
|
return foundDestructure->getOperand();
|
|
if (foundRootAddress)
|
|
return foundRootAddress;
|
|
|
|
return SILValue();
|
|
}
|
|
|
|
SILValue swift::getInitOfTemporaryAllocStack(AllocStackInst *asi) {
|
|
// If we are from a VarDecl, bail.
|
|
if (asi->isFromVarDecl())
|
|
return SILValue();
|
|
|
|
struct AddressWalker final : public TransitiveAddressWalker<AddressWalker> {
|
|
AddressWalkerState &state;
|
|
|
|
AddressWalker(AddressWalkerState &state) : state(state) {}
|
|
|
|
bool visitUse(Operand *use) {
|
|
if (use->getUser()->mayWriteToMemory())
|
|
state.writes.insert(use->getUser());
|
|
return true;
|
|
}
|
|
|
|
TransitiveUseVisitation visitTransitiveUseAsEndPointUse(Operand *use) {
|
|
if (isa<StoreBorrowInst>(use->getUser()))
|
|
return TransitiveUseVisitation::OnlyUser;
|
|
return TransitiveUseVisitation::OnlyUses;
|
|
}
|
|
|
|
void onError(Operand *use) { state.foundError = true; }
|
|
};
|
|
|
|
AddressWalkerState state(asi->getFunction());
|
|
AddressWalker walker(state);
|
|
// Note: ignore pointer escapes for the purpose of finding initializers.
|
|
if (std::move(walker).walk(asi) == AddressUseKind::Unknown ||
|
|
state.foundError)
|
|
return SILValue();
|
|
|
|
if (asi->getType().is<TupleType>())
|
|
return findRootValueForTupleTempAllocation(asi, state);
|
|
return findRootValueForNonTupleTempAllocation(asi, state);
|
|
}
|
|
|
|
SILType getTypeOfLoadOfArrayOperandStorage(SILValue val) {
|
|
// The projection should look something like this:
|
|
// %29 = struct_element_addr %28 : $*Array<UInt8>, #Array._buffer
|
|
// %30 = struct_element_addr %29 : $*_ArrayBuffer<UInt8>, #_ArrayBuffer._storage
|
|
// %31 = struct_element_addr %30 : $*_BridgeStorage<__ContiguousArrayStorageBase>, #_BridgeStorage.rawValue
|
|
// %32 = load %31 : $*Builtin.BridgeObject
|
|
|
|
// We can strip casts and init_existential_ref leading to a load.
|
|
if (auto initExistRef = dyn_cast<InitExistentialRefInst>(val))
|
|
val = initExistRef->getOperand();
|
|
auto ld = dyn_cast<LoadInst>(stripCasts(val));
|
|
if (!ld)
|
|
return SILType();
|
|
|
|
auto opd = ld->getOperand();
|
|
auto opdTy = opd->getType();
|
|
if (opdTy.getObjectType() !=
|
|
SILType::getBridgeObjectType(opdTy.getASTContext()))
|
|
return SILType();
|
|
|
|
auto bridgedStoragePrj = dyn_cast<StructElementAddrInst>(opd);
|
|
if (!bridgedStoragePrj)
|
|
return SILType();
|
|
|
|
auto arrayBufferStoragePrj =
|
|
dyn_cast<StructElementAddrInst>(bridgedStoragePrj->getOperand());
|
|
if (!arrayBufferStoragePrj)
|
|
return SILType();
|
|
|
|
// If successfull return _ArrayBuffer<UInt8>.
|
|
return arrayBufferStoragePrj->getOperand()->getType().getObjectType();
|
|
}
|
|
|
|
static bool isBoxTypeWithoutSideEffectsOnRelease(SILFunction *f,
|
|
DestructorAnalysis *DA,
|
|
SILType ty) {
|
|
auto silBoxedTy = ty.getSILBoxFieldType(f);
|
|
if (silBoxedTy && !DA->mayStoreToMemoryOnDestruction(silBoxedTy))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
|
|
static bool isReleaseOfClosureWithoutSideffects(SILFunction *f,
|
|
DestructorAnalysis *DA,
|
|
SILValue opd) {
|
|
auto fnTy = dyn_cast<SILFunctionType>(opd->getType().getASTType());
|
|
if (!fnTy)
|
|
return false;
|
|
|
|
if (fnTy->isNoEscape() &&
|
|
fnTy->getRepresentation() == SILFunctionType::Representation::Thick)
|
|
return true;
|
|
|
|
auto pa = dyn_cast<PartialApplyInst>(lookThroughOwnershipInsts(opd));
|
|
if (!pa)
|
|
return false;
|
|
|
|
// Check that all captured argument types are "trivial".
|
|
for (auto &opd: pa->getArgumentOperands()) {
|
|
auto OpdTy = opd.get()->getType().getObjectType();
|
|
if (!DA->mayStoreToMemoryOnDestruction(OpdTy))
|
|
continue;
|
|
if (isBoxTypeWithoutSideEffectsOnRelease(f, DA, OpdTy))
|
|
continue;
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool swift::isDestructorSideEffectFree(SILInstruction *mayRelease,
|
|
DestructorAnalysis *DA) {
|
|
switch (mayRelease->getKind()) {
|
|
case SILInstructionKind::DestroyValueInst:
|
|
case SILInstructionKind::StrongReleaseInst:
|
|
case SILInstructionKind::ReleaseValueInst: {
|
|
auto opd = mayRelease->getOperand(0);
|
|
auto opdTy = opd->getType();
|
|
if (!DA->mayStoreToMemoryOnDestruction(opdTy))
|
|
return true;
|
|
|
|
auto arrayTy = getTypeOfLoadOfArrayOperandStorage(opd);
|
|
if (arrayTy && !DA->mayStoreToMemoryOnDestruction(arrayTy))
|
|
return true;
|
|
|
|
if (isReleaseOfClosureWithoutSideffects(mayRelease->getFunction(), DA, opd))
|
|
return true;
|
|
|
|
if (isBoxTypeWithoutSideEffectsOnRelease(mayRelease->getFunction(), DA,
|
|
opdTy))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
case SILInstructionKind::BuiltinInst: {
|
|
auto *builtin = cast<BuiltinInst>(mayRelease);
|
|
switch (builtin->getBuiltinInfo().ID) {
|
|
case BuiltinValueKind::CopyArray:
|
|
case BuiltinValueKind::TakeArrayNoAlias:
|
|
case BuiltinValueKind::TakeArrayFrontToBack:
|
|
case BuiltinValueKind::TakeArrayBackToFront:
|
|
return true; // nothing is released, harmless regardless of type
|
|
case BuiltinValueKind::AssignCopyArrayNoAlias:
|
|
case BuiltinValueKind::AssignCopyArrayFrontToBack:
|
|
case BuiltinValueKind::AssignCopyArrayBackToFront:
|
|
case BuiltinValueKind::AssignTakeArray:
|
|
case BuiltinValueKind::DestroyArray: {
|
|
SubstitutionMap substitutions = builtin->getSubstitutions();
|
|
auto eltTy = substitutions.getReplacementTypes()[0];
|
|
return !DA->mayStoreToMemoryOnDestruction(
|
|
builtin->getFunction()->getLoweredType(eltTy));
|
|
// Only harmless if the array element type destruction is harmless.
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
return false;
|
|
}
|
|
// Unhandled instruction.
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
return false;
|
|
}
|