Files
swift-mirror/lib/SIL/Utils/MemAccessUtils.cpp

1819 lines
60 KiB
C++

//===--- MemAccessUtils.cpp - Utilities for SIL memory access. ------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sil-access-utils"
#include "swift/SIL/MemAccessUtils.h"
#include "swift/SIL/SILModule.h"
#include "swift/SIL/SILUndef.h"
#include "llvm/Support/Debug.h"
using namespace swift;
//===----------------------------------------------------------------------===//
// MARK: General Helpers
//===----------------------------------------------------------------------===//
namespace {
enum StorageCastTy { StopAtStorageCast, IgnoreStorageCast };
// Handle a single phi-web within an access use-def chain.
//
// Recursively calls the useDefVisitor on any operations that aren't recognized
// as storage casts or projections. If the useDefVisitor finds a consistent
// result for all operands, then it's result will remain valid. If the
// useDefVisitor has an invalid result after processing the phi web, then it's
// original result is restored, then the phi reported to the useDefVisitor as a
// NonAccess.
//
// Phi-web's are only allowed to contain casts and projections that do not
// affect the access path. If AccessPhiVisitor reaches an unhandled projection,
// it remembers that as the commonDefinition. If after processing the entire
// web, the commonDefinition is unique, then it calls the original useDefVisitor
// to update its result. Note that visitAccessProjection and setDefinition are
// only used by visitors that process access projections; once the accessed
// address is reached, they are no longer relevant.
template <typename UseDefVisitor>
class AccessPhiVisitor
: public AccessUseDefChainVisitor<AccessPhiVisitor<UseDefVisitor>> {
UseDefVisitor &useDefVisitor;
StorageCastTy storageCastTy;
Optional<SILValue> commonDefinition;
SmallVector<SILValue, 8> pointerWorklist;
SmallPtrSet<SILPhiArgument *, 4> nestedPhis;
public:
AccessPhiVisitor(UseDefVisitor &useDefVisitor, StorageCastTy storageCastTy)
: useDefVisitor(useDefVisitor), storageCastTy(storageCastTy) {}
// Main entry point.
void findPhiAccess(SILPhiArgument *phiArg) && {
auto savedResult = useDefVisitor.saveResult();
visitPhi(phiArg);
while (!pointerWorklist.empty()) {
this->visit(pointerWorklist.pop_back_val());
}
// If a common path component was found, recursively look for the result.
if (commonDefinition) {
if (commonDefinition.getValue()) {
useDefVisitor.reenterUseDef(commonDefinition.getValue());
} else {
// Divergent paths were found; invalidate any previously discovered
// storage.
useDefVisitor.invalidateResult();
}
}
// If the result is now invalid, reset it and process the current phi as an
// unrecgonized access instead.
if (!useDefVisitor.isResultValid()) {
useDefVisitor.restoreResult(savedResult);
visitNonAccess(phiArg);
}
}
// Visitor helper.
void setDefinition(SILValue def) {
if (!commonDefinition) {
commonDefinition = def;
return;
}
if (commonDefinition.getValue() != def)
commonDefinition = SILValue();
}
void checkVisitorResult(SILValue result) {
assert(!result && "must override any visitor that returns a result");
}
// MARK: Visitor implementation.
// Recursively call the original storageVisitor for each base. We can't simply
// look for a common definition on all phi inputs, because the base may be
// cloned on each path. For example, two global_addr instructions may refer to
// the same global storage. Those global_addr instructions may each be
// converted to a RawPointer before being passed into the non-address phi.
void visitBase(SILValue base, AccessedStorage::Kind kind) {
checkVisitorResult(useDefVisitor.visitBase(base, kind));
}
void visitNonAccess(SILValue value) {
checkVisitorResult(useDefVisitor.visitNonAccess(value));
}
void visitNestedAccess(BeginAccessInst *access) {
checkVisitorResult(useDefVisitor.visitNestedAccess(access));
}
void visitPhi(SILPhiArgument *phiArg) {
if (nestedPhis.insert(phiArg).second)
phiArg->getIncomingPhiValues(pointerWorklist);
}
void visitStorageCast(SingleValueInstruction *projectedAddr,
Operand *sourceOper) {
// Allow conversions to/from pointers and addresses on disjoint phi paths
// only if the underlying useDefVisitor allows it.
if (storageCastTy == IgnoreStorageCast)
pointerWorklist.push_back(sourceOper->get());
else
visitNonAccess(projectedAddr);
}
void visitAccessProjection(SingleValueInstruction *projectedAddr,
Operand *sourceOper) {
// An offset index on a phi path is always conservatively considered an
// unknown offset.
if (isa<IndexAddrInst>(projectedAddr) || isa<TailAddrInst>(projectedAddr)) {
useDefVisitor.addUnknownOffset();
pointerWorklist.push_back(sourceOper->get());
return;
}
// No other access projections are expected to occur on disjoint phi
// paths. Stop searching at this projection.
setDefinition(projectedAddr);
}
};
enum NestedAccessTy { StopAtAccessBegin, IgnoreAccessBegin };
// Find the origin of an access while skipping projections and casts and
// handling phis.
template <typename Impl>
class FindAccessVisitorImpl : public AccessUseDefChainVisitor<Impl, SILValue> {
using SuperTy = AccessUseDefChainVisitor<Impl, SILValue>;
protected:
NestedAccessTy nestedAccessTy;
StorageCastTy storageCastTy;
SmallPtrSet<SILPhiArgument *, 4> visitedPhis;
bool hasUnknownOffset = false;
public:
FindAccessVisitorImpl(NestedAccessTy nestedAccessTy,
StorageCastTy storageCastTy)
: nestedAccessTy(nestedAccessTy), storageCastTy(storageCastTy) {}
// MARK: AccessPhiVisitor::UseDefVisitor implementation.
//
// Subclasses must implement:
// isResultValid()
// invalidateResult()
// saveResult()
// restoreResult(Result)
// addUnknownOffset()
void reenterUseDef(SILValue sourceAddr) {
SILValue nextAddr = this->visit(sourceAddr);
while (nextAddr) {
checkNextAddressType(nextAddr, sourceAddr);
nextAddr = this->visit(nextAddr);
}
}
// MARK: visitor implementation.
// Override AccessUseDefChainVisitor to ignore access markers and find the
// outer access base.
SILValue visitNestedAccess(BeginAccessInst *access) {
if (nestedAccessTy == IgnoreAccessBegin)
return access->getSource();
return SuperTy::visitNestedAccess(access);
}
SILValue visitPhi(SILPhiArgument *phiArg) {
// Cycles involving phis are only handled within AccessPhiVisitor.
// Path components are not allowed in phi cycles.
if (visitedPhis.insert(phiArg).second) {
AccessPhiVisitor<Impl>(this->asImpl(), storageCastTy)
.findPhiAccess(phiArg);
// Each phi operand was now reentrantly processed. Stop visiting.
return SILValue();
}
// Cannot treat unresolved phis as "unidentified" because they may alias
// with global or class access.
return this->asImpl().visitNonAccess(phiArg);
}
SILValue visitStorageCast(SingleValueInstruction *projectedAddr,
Operand *sourceAddr) {
assert(storageCastTy == IgnoreStorageCast);
return sourceAddr->get();
}
SILValue visitAccessProjection(SingleValueInstruction *projectedAddr,
Operand *sourceAddr) {
if (auto *indexAddr = dyn_cast<IndexAddrInst>(projectedAddr)) {
if (!Projection(indexAddr).isValid())
this->asImpl().addUnknownOffset();
} else if (isa<TailAddrInst>(projectedAddr)) {
this->asImpl().addUnknownOffset();
}
return sourceAddr->get();
}
protected:
// Helper for reenterUseDef
void checkNextAddressType(SILValue nextAddr, SILValue sourceAddr) {
#ifdef NDEBUG
return;
#endif
SILType type = nextAddr->getType();
// FIXME: This relatively expensive pointer getAnyPointerElementType check
// is only needed because keypath generation incorrectly produces
// pointer_to_address directly from stdlib Pointer types without a
// struct_extract (as is correctly done in emitAddressorAccessor), and
// the PointerToAddressInst operand type is never verified.
if (type.getASTType()->getAnyPointerElementType())
return;
if (type.isAddress() || isa<SILBoxType>(type.getASTType())
|| isa<BuiltinRawPointerType>(type.getASTType())) {
return;
}
llvm::errs() << "Visiting ";
sourceAddr->dump();
llvm::errs() << " not an address ";
nextAddr->dump();
nextAddr->getFunction()->dump();
assert(false);
}
};
// Implement getAccessAddress, getAccessBegin, and getAccessBase.
class FindAccessBaseVisitor
: public FindAccessVisitorImpl<FindAccessBaseVisitor> {
using SuperTy = FindAccessVisitorImpl<FindAccessBaseVisitor>;
protected:
Optional<SILValue> base;
public:
FindAccessBaseVisitor(NestedAccessTy nestedAccessTy,
StorageCastTy storageCastTy)
: FindAccessVisitorImpl(nestedAccessTy, storageCastTy) {}
// Returns the accessed address or an invalid SILValue.
SILValue findBase(SILValue sourceAddr) && {
reenterUseDef(sourceAddr);
return base.getValueOr(SILValue());
}
void setResult(SILValue foundBase) {
if (!base)
base = foundBase;
else if (base.getValue() != foundBase)
base = SILValue();
}
// MARK: AccessPhiVisitor::UseDefVisitor implementation.
bool isResultValid() const { return base && bool(base.getValue()); }
void invalidateResult() { base = SILValue(); }
Optional<SILValue> saveResult() const { return base; }
void restoreResult(Optional<SILValue> result) { base = result; }
void addUnknownOffset() { return; }
// MARK: visitor implementation.
SILValue visitBase(SILValue base, AccessedStorage::Kind kind) {
setResult(base);
return SILValue();
}
SILValue visitNonAccess(SILValue value) {
setResult(value);
return SILValue();
}
// Override visitStorageCast to avoid seeing through arbitrary address casts.
SILValue visitStorageCast(SingleValueInstruction *projectedAddr,
Operand *sourceAddr) {
if (storageCastTy == StopAtStorageCast)
return visitNonAccess(projectedAddr);
return SuperTy::visitStorageCast(projectedAddr, sourceAddr);
}
};
} // end anonymous namespace
SILValue swift::getAccessAddress(SILValue address) {
assert(address->getType().isAddress());
SILValue accessAddress =
FindAccessBaseVisitor(StopAtAccessBegin, StopAtStorageCast)
.findBase(address);
assert(accessAddress->getType().isAddress());
return accessAddress;
}
// TODO: When the optimizer stops stripping begin_access markers and SILGen
// protects all memory operations with at least an "unsafe" access scope, then
// we should be able to assert that this returns a BeginAccessInst.
SILValue swift::getAccessBegin(SILValue address) {
assert(address->getType().isAddress());
return FindAccessBaseVisitor(StopAtAccessBegin, IgnoreStorageCast)
.findBase(address);
}
// This is allowed to be called on a non-address pointer type.
SILValue swift::getAccessBase(SILValue address) {
return FindAccessBaseVisitor(IgnoreAccessBegin, IgnoreStorageCast)
.findBase(address);
}
bool swift::isLetAddress(SILValue address) {
SILValue base = getAccessBase(address);
if (!base)
return false;
// Is this an address of a "let" class member?
if (auto *rea = dyn_cast<RefElementAddrInst>(base))
return rea->getField()->isLet();
// Is this an address of a global "let"?
if (auto *gai = dyn_cast<GlobalAddrInst>(base)) {
auto *globalDecl = gai->getReferencedGlobal()->getDecl();
return globalDecl && globalDecl->isLet();
}
return false;
}
//===----------------------------------------------------------------------===//
// MARK: FindReferenceRoot
//===----------------------------------------------------------------------===//
namespace {
// Essentially RC identity where the starting point is already a reference.
//
// FIXME: We cannot currently see through type casts for true RC identity,
// because property indices are not unique within a class hierarchy. Either fix
// RefElementAddr::getFieldNo so it returns a unique index, or figure out a
// different way to encode the property's VarDecl. (Note that the lack of a
// unique property index could be the source of bugs elsewhere).
class FindReferenceRoot {
SmallPtrSet<SILPhiArgument *, 4> visitedPhis;
public:
SILValue findRoot(SILValue ref) && {
SILValue root = recursiveFindRoot(ref);
assert(root && "all phi inputs must be reachable");
return root;
}
protected:
// Return an invalid value for a phi with no resolved inputs.
//
// FIXME: We should be able to see through RC identity like this:
// nextRoot = stripRCIdentityCasts(nextRoot);
SILValue recursiveFindRoot(SILValue ref) {
while (true) {
SILValue nextRoot = ref;
nextRoot = stripOwnershipInsts(nextRoot);
if (nextRoot == ref)
break;
ref = nextRoot;
}
auto *phi = dyn_cast<SILPhiArgument>(ref);
if (!phi || !phi->isPhiArgument()) {
return ref;
}
// Handle phis...
if (!visitedPhis.insert(phi).second) {
return SILValue();
}
SILValue commonInput;
phi->visitIncomingPhiOperands([&](Operand *operand) {
SILValue input = recursiveFindRoot(operand->get());
// Ignore "back/cross edges" to previously visited phis.
if (!input)
return true;
if (!commonInput) {
commonInput = input;
return true;
}
if (commonInput == input)
return true;
commonInput = phi;
return false;
});
return commonInput;
}
};
} // end anonymous namespace
static SILValue findReferenceRoot(SILValue ref) {
return FindReferenceRoot().findRoot(ref);
}
//===----------------------------------------------------------------------===//
// MARK: AccessedStorage
//===----------------------------------------------------------------------===//
constexpr unsigned AccessedStorage::TailIndex;
AccessedStorage::AccessedStorage(SILValue base, Kind kind) {
assert(base && "invalid storage base");
initKind(kind);
switch (kind) {
case Box:
assert(isa<AllocBoxInst>(base));
value = base;
break;
case Stack:
assert(isa<AllocStackInst>(base));
value = base;
break;
case Nested:
assert(isa<BeginAccessInst>(base));
value = base;
break;
case Yield:
assert(isa<BeginApplyResult>(base));
value = base;
break;
case Unidentified:
value = base;
break;
case Argument:
value = base;
setElementIndex(cast<SILFunctionArgument>(base)->getIndex());
break;
case Global:
if (auto *GAI = dyn_cast<GlobalAddrInst>(base))
global = GAI->getReferencedGlobal();
else {
FullApplySite apply(cast<ApplyInst>(base));
auto *funcRef = apply.getReferencedFunctionOrNull();
assert(funcRef);
global = getVariableOfGlobalInit(funcRef);
assert(global);
// Require a decl for all formally accessed globals defined in this
// module. (Access of globals defined elsewhere has Unidentified storage).
// AccessEnforcementWMO requires this.
assert(global->getDecl());
}
break;
case Class: {
// Do a best-effort to find the identity of the object being projected
// from. It is OK to be unsound here (i.e. miss when two ref_element_addrs
// actually refer the same address) because, when the effort fails, static
// analysis will be sufficiently conservative given that classes are not
// "uniquely identified", and these addresses will be dynamically checked.
auto *REA = cast<RefElementAddrInst>(base);
value = findReferenceRoot(REA->getOperand());
setElementIndex(REA->getFieldIndex());
break;
}
case Tail: {
auto *RTA = cast<RefTailAddrInst>(base);
value = findReferenceRoot(RTA->getOperand());
break;
}
}
}
// Return true if the given access is on a 'let' lvalue.
bool AccessedStorage::isLetAccess(SILFunction *F) const {
if (auto *decl = dyn_cast_or_null<VarDecl>(getDecl()))
return decl->isLet();
// It's unclear whether a global will ever be missing it's varDecl, but
// technically we only preserve it for debug info. So if we don't have a decl,
// check the flag on SILGlobalVariable, which is guaranteed valid,
if (getKind() == AccessedStorage::Global)
return getGlobal()->isLet();
return false;
}
const ValueDecl *AccessedStorage::getDecl() const {
switch (getKind()) {
case Box:
return cast<AllocBoxInst>(value)->getLoc().getAsASTNode<VarDecl>();
case Stack:
return cast<AllocStackInst>(value)->getDecl();
case Global:
return global->getDecl();
case Class: {
auto *decl = getObject()->getType().getNominalOrBoundGenericNominal();
return getIndexedField(decl, getPropertyIndex());
}
case Tail:
return nullptr;
case Argument:
return getArgument()->getDecl();
case Yield:
return nullptr;
case Nested:
return nullptr;
case Unidentified:
return nullptr;
}
llvm_unreachable("unhandled kind");
}
const char *AccessedStorage::getKindName(AccessedStorage::Kind k) {
switch (k) {
case Box:
return "Box";
case Stack:
return "Stack";
case Nested:
return "Nested";
case Unidentified:
return "Unidentified";
case Argument:
return "Argument";
case Yield:
return "Yield";
case Global:
return "Global";
case Class:
return "Class";
case Tail:
return "Tail";
}
llvm_unreachable("unhandled kind");
}
void AccessedStorage::print(raw_ostream &os) const {
if (!*this) {
os << "INVALID\n";
return;
}
os << getKindName(getKind()) << " ";
switch (getKind()) {
case Box:
case Stack:
case Nested:
case Yield:
case Unidentified:
os << value;
break;
case Argument:
os << value;
break;
case Global:
os << *global;
break;
case Class:
os << getObject();
os << " Field: ";
getDecl()->print(os);
os << " Index: " << getPropertyIndex() << "\n";
break;
case Tail:
os << getObject();
}
}
LLVM_ATTRIBUTE_USED void AccessedStorage::dump() const { print(llvm::dbgs()); }
namespace {
// Implementation of AccessUseDefChainVisitor that looks for a single common
// AccessedStorage object for all projection paths.
class FindAccessedStorageVisitor
: public FindAccessVisitorImpl<FindAccessedStorageVisitor> {
public:
struct Result {
Optional<AccessedStorage> storage;
SILValue base;
};
private:
Result result;
void setResult(AccessedStorage foundStorage, SILValue foundBase) {
if (!result.storage) {
result.storage = foundStorage;
assert(!result.base);
result.base = foundBase;
} else {
// `storage` may still be invalid. If both `storage` and `foundStorage`
// are invalid, this check passes, but we return an invalid storage
// below.
if (!result.storage->hasIdenticalBase(foundStorage))
result.storage = AccessedStorage();
if (result.base != foundBase)
result.base = SILValue();
}
}
public:
FindAccessedStorageVisitor(NestedAccessTy nestedAccessTy)
: FindAccessVisitorImpl(nestedAccessTy, IgnoreStorageCast) {}
// Main entry point
void findStorage(SILValue sourceAddr) { this->reenterUseDef(sourceAddr); }
AccessedStorage getStorage() const {
return result.storage.getValueOr(AccessedStorage());
}
// getBase may return an invalid value for valid Global storage because there
// may be multiple global_addr bases for identical storage.
SILValue getBase() const { return result.base; }
// MARK: AccessPhiVisitor::UseDefVisitor implementation.
// A valid result requires valid storage, but not a valid base.
bool isResultValid() const {
return result.storage && bool(result.storage.getValue());
}
void invalidateResult() { setResult(AccessedStorage(), SILValue()); }
Result saveResult() const { return result; }
void restoreResult(Result savedResult) { result = savedResult; }
void addUnknownOffset() { return; }
// MARK: visitor implementation.
SILValue visitBase(SILValue base, AccessedStorage::Kind kind) {
setResult(AccessedStorage(base, kind), base);
return SILValue();
}
SILValue visitNonAccess(SILValue value) {
invalidateResult();
return SILValue();
}
};
} // end anonymous namespace
AccessedStorage swift::findAccessedStorage(SILValue sourceAddr) {
FindAccessedStorageVisitor visitor(IgnoreAccessBegin);
visitor.findStorage(sourceAddr);
return visitor.getStorage();
}
AccessedStorage swift::identifyAccessedStorageImpl(SILValue sourceAddr) {
FindAccessedStorageVisitor visitor(StopAtAccessBegin);
visitor.findStorage(sourceAddr);
return visitor.getStorage();
}
//===----------------------------------------------------------------------===//
// AccessPath
//===----------------------------------------------------------------------===//
bool AccessPath::contains(AccessPath subPath) const {
assert(isValid() && subPath.isValid());
if (!storage.hasIdenticalBase(subPath.storage))
return false;
// Does the offset index match?
if (offset != subPath.offset || offset == UnknownOffset)
return false;
return pathNode.node->isPrefixOf(subPath.pathNode.node);
}
bool AccessPath::mayOverlap(AccessPath otherPath) const {
assert(isValid() && otherPath.isValid());
if (storage.isDistinctFrom(otherPath.storage))
return false;
// If subpaths are disjoint, they do not overlap regardless of offset.
if (!pathNode.node->isPrefixOf(otherPath.pathNode.node)
&& !otherPath.pathNode.node->isPrefixOf(pathNode.node)) {
return true;
}
return offset == otherPath.offset || offset == UnknownOffset
|| otherPath.offset == UnknownOffset;
}
namespace {
// Implementation of AccessUseDefChainVisitor that builds an AccessPath.
class AccessPathVisitor : public FindAccessVisitorImpl<AccessPathVisitor> {
using SuperTy = FindAccessVisitorImpl<AccessPathVisitor>;
SILModule *module;
// This nested visitor holds the AccessedStorage and base results.
FindAccessedStorageVisitor storageVisitor;
// Save just enough information for to checkpoint before processing phis. Phis
// can add path components and add an unknown offset.
struct Result {
FindAccessedStorageVisitor::Result storageResult;
int savedOffset;
unsigned pathLength;
Result(FindAccessedStorageVisitor::Result storageResult, int offset,
unsigned pathLength)
: storageResult(storageResult), savedOffset(offset),
pathLength(pathLength) {}
};
// Only access projections affect this path. Since they are are not allowed
// beyond phis, this path is not part of AccessPathVisitor::Result.
llvm::SmallVector<AccessPath::Index, 8> reversePath;
// Holds a non-zero value if an index_addr has been processed without yet
// creating a path index for it.
int pendingOffset = 0;
public:
AccessPathVisitor(SILModule *module)
: FindAccessVisitorImpl(IgnoreAccessBegin, IgnoreStorageCast),
module(module), storageVisitor(IgnoreAccessBegin) {}
// Main entry point.
AccessPathWithBase findAccessPath(SILValue sourceAddr) && {
this->reenterUseDef(sourceAddr);
if (auto storage = storageVisitor.getStorage()) {
return AccessPathWithBase(
AccessPath(storage, computeForwardPath(), pendingOffset),
storageVisitor.getBase());
}
return AccessPathWithBase(AccessPath(), SILValue());
}
protected:
void addPathOffset(int offset) {
if (pendingOffset == AccessPath::UnknownOffset)
return;
if (offset == AccessPath::UnknownOffset) {
pendingOffset = offset;
return;
}
// Accumulate static offsets
pendingOffset = pendingOffset + offset;
}
// Return the trie node corresponding to the current state of reversePath.
AccessPath::PathNode computeForwardPath() {
IndexTrieNode *forwardPath = module->getIndexTrieRoot();
for (AccessPath::Index nextIndex : llvm::reverse(reversePath)) {
forwardPath = forwardPath->getChild(nextIndex.getEncoding());
}
return AccessPath::PathNode(forwardPath);
}
public:
// MARK: AccessPhiVisitor::UseDefVisitor implementation.
bool isResultValid() const { return storageVisitor.isResultValid(); }
void invalidateResult() {
storageVisitor.invalidateResult();
// Don't clear reversePath. We my call restoreResult later.
pendingOffset = 0;
}
Result saveResult() const {
return Result(storageVisitor.saveResult(), pendingOffset,
reversePath.size());
}
void restoreResult(Result result) {
storageVisitor.restoreResult(result.storageResult);
pendingOffset = result.savedOffset;
assert(result.pathLength <= reversePath.size()
&& "a phi should only add to the path");
reversePath.erase(reversePath.begin() + result.pathLength,
reversePath.end());
}
void addUnknownOffset() { pendingOffset = AccessPath::UnknownOffset; }
// MARK: visitor implementation. Return the address source as the next use-def
// value to process. An invalid SILValue stops def-use traversal.
SILValue visitBase(SILValue base, AccessedStorage::Kind kind) {
return storageVisitor.visitBase(base, kind);
}
SILValue visitNonAccess(SILValue value) {
invalidateResult();
return SILValue();
}
// Override FindAccessVisitorImpl to record path components.
SILValue visitAccessProjection(SingleValueInstruction *projectedAddr,
Operand *sourceAddr) {
auto projIdx = ProjectionIndex(projectedAddr);
if (auto *indexAddr = dyn_cast<IndexAddrInst>(projectedAddr)) {
addPathOffset(projIdx.isValid() ? projIdx.Index
: AccessPath::UnknownOffset);
} else if (isa<TailAddrInst>(projectedAddr)) {
addPathOffset(AccessPath::UnknownOffset);
} else if (projIdx.isValid()) {
if (pendingOffset) {
LLVM_DEBUG(llvm::dbgs() << "Subobject projection with offset index: "
<< *projectedAddr);
// Return an invalid result even though findAccessedStorage() may be
// able to find valid storage, because an offset from a subobject is an
// invalid access path.
return visitNonAccess(projectedAddr);
}
reversePath.push_back(
AccessPath::Index::forSubObjectProjection(projIdx.Index));
} else {
// Ignore everything in getAccessProjectionOperand that is an access
// projection with no affect on the access path.
assert(isa<OpenExistentialAddrInst>(projectedAddr)
|| isa<UncheckedTakeEnumDataAddrInst>(projectedAddr)
|| isa<ProjectBoxInst>(projectedAddr));
}
return sourceAddr->get();
}
};
} // end anonymous namespace
AccessPathWithBase AccessPathWithBase::compute(SILValue address) {
return AccessPathVisitor(address->getModule()).findAccessPath(address);
}
namespace {
// Perform def-use DFS traversal along a given AccessPath. DFS terminates at
// each discovered use.
//
// If \p collectOverlappingUses is false, then the collected uses all have the
// same AccessPath. Uses that exactly match the AccessPath may either be exact
// uses, or may be subobject projections within that access path, including
// struct_element_addr and tuple_element_addr. The transitive uses of those
// subobject projections are not included.
//
// If \p collectOverlappingUses is true, then the collected uses also include
// uses that access an object that contains the given AccessPath. As before,
// overlapping uses do not include transitive uses of subobject projections
// contained by the current path; the def-use traversal stops at those
// projections regardless of collectOverlappingUses. However, overlapping uses
// may be at an unknown offset relative to the current path, so they don't
// necessarily contain the current path.
//
// Example: path = "(#2)"
// %base = ... // access base
// load %base // containing use
// %elt1 = struct_element_addr %base, #1 // non-use (ignored)
// load %elt1 // non-use (unseen)
// %elt2 = struct_element_addr %base, #2 // chained use (ignored)
// load %elt2 // exact use
// %sub = struct_element_addr %elt2, #i // projection use
// load %sub // interior use (ignored)
//
// A use may be a BranchInst if the corresponding phi does not have common
// AccessedStorage.
//
// For class storage, the def-use traversal starts at the reference
// root. Eventually, traversal reach the base address of the formal access:
//
// %ref = ... // reference root
// %base = ref_element_addr %refRoot // formal access address
// load %base // use
class CollectAccessPathUses {
// Origin of the def-use traversal
AccessedStorage storage;
// Result: Exact uses, projection uses, and containing uses.
SmallVectorImpl<Operand *> &uses;
bool collectOverlappingUses;
unsigned useLimit;
// Access path indices from storage to exact uses
SmallVector<AccessPath::Index, 4> pathIndices; // in use-def order
// A point in the def-use traversal. isRef() is true only for object access
// prior to reaching the base address.
struct DFSEntry {
// Next potential use to visit and flag indicating whether traversal has
// reachaed the access base yet.
llvm::PointerIntPair<Operand *, 1, bool> useAndIsRef;
unsigned pathCursor; // position within pathIndices
int offset; // index_addr offsets seen prior to this use
DFSEntry(Operand *use, bool isRef, unsigned pathCursor, int offset)
: useAndIsRef(use, isRef), pathCursor(pathCursor), offset(offset) {}
Operand *getUse() const { return useAndIsRef.getPointer(); }
// Is this pointer a reference?
bool isRef() const { return useAndIsRef.getInt(); }
};
SmallVector<DFSEntry, 16> dfsStack;
SmallPtrSet<const SILPhiArgument *, 4> visitedPhis;
public:
CollectAccessPathUses(AccessPath accessPath, SmallVectorImpl<Operand *> &uses,
bool collectOverlappingUses, unsigned useLimit)
: storage(accessPath.getStorage()), uses(uses),
collectOverlappingUses(collectOverlappingUses), useLimit(useLimit) {
assert(accessPath.isValid());
for (AccessPath::PathNode currentNode = accessPath.getPathNode();
!currentNode.isRoot(); currentNode = currentNode.getParent()) {
assert(currentNode.getIndex().isSubObjectProjection() &&
"a valid AccessPath does not contain any intermediate offsets");
pathIndices.push_back(currentNode.getIndex());
}
if (int offset = accessPath.getOffset())
pathIndices.push_back(AccessPath::Index::forOffset(offset));
// The search will start from the object root, not the formal access base,
// so add the class index to the front.
auto storage = accessPath.getStorage();
if (storage.getKind() == AccessedStorage::Class) {
pathIndices.push_back(AccessPath::Index::forSubObjectProjection(
storage.getPropertyIndex()));
}
if (storage.getKind() == AccessedStorage::Tail) {
pathIndices.push_back(AccessPath::Index::forSubObjectProjection(
ProjectionIndex::TailIndex));
}
}
// Return true if all uses were collected. This is always true as long as the
// access has a single root, or globalBase is provided, and there is no
// useLimit.
//
// For Global storage \p globalBase must be provided as the head of the
// def-use search.
bool collectUses(SILValue globalBase = SILValue()) && {
SILValue root = storage.getRoot();
if (!root) {
assert(storage.getKind() == AccessedStorage::Global);
if (!globalBase)
return false;
root = globalBase;
}
// If the expected path has an unknown offset, then none of the uses are
// exact.
if (!collectOverlappingUses && !pathIndices.empty()
&& pathIndices.back().isUnknownOffset()) {
return true;
}
pushUsers(root,
DFSEntry(nullptr, storage.isReference(), pathIndices.size(), 0));
while (!dfsStack.empty()) {
if (!visitUser(dfsStack.pop_back_val()))
return false;
}
return true;
}
protected:
void pushUsers(SILValue def, const DFSEntry &dfs) {
for (auto *use : def->getUses())
pushUser(DFSEntry(use, dfs.isRef(), dfs.pathCursor, dfs.offset));
}
void pushUser(DFSEntry dfs) {
Operand *use = dfs.getUse();
if (auto *bi = dyn_cast<BranchInst>(use->getUser())) {
if (pushPhiUses(bi->getArgForOperand(use), dfs))
return;
}
// If we didn't find and process a phi, continue DFS.
dfsStack.emplace_back(dfs);
}
// Return true if this phi has been processed and does not need to be
// considered as a separate use.
bool pushPhiUses(const SILPhiArgument *phi, DFSEntry dfs) {
if (!visitedPhis.insert(phi).second)
return true;
// If this phi has a common base, continue to follow the access path. This
// check is different for reference types vs pointer types.
if (dfs.isRef()) {
assert(!dfs.offset && "index_addr not allowed on reference roots");
// When isRef is true, the address access hasn't been seen yet and
// we're still following the reference root's users. Check if all phi
// inputs have the same reference root before looking through it.
if (findReferenceRoot(phi) == storage.getObject()) {
pushUsers(phi, dfs);
return true;
}
// The branch will be pushed onto the normal user list.
return false;
}
// Check if all phi inputs have the same accessed storage before
// looking through it. If the phi input differ the its storage is invalid.
auto phiPath = AccessPath::compute(phi);
if (phiPath.isValid()) {
assert(phiPath.getStorage().hasIdenticalBase(storage)
&& "inconsistent phi storage");
// If the phi paths have different offsets, its path has unknown offset.
if (phiPath.getOffset() == AccessPath::UnknownOffset) {
if (!collectOverlappingUses)
return true;
dfs.offset = AccessPath::UnknownOffset;
}
pushUsers(phi, dfs);
return true;
}
// The branch will be pushed onto the normal user list.
return false;
}
// Return the offset at the current DFS path cursor, or zero.
int getPathOffset(const DFSEntry &dfs) const {
if (dfs.pathCursor == 0
|| pathIndices[dfs.pathCursor - 1].isSubObjectProjection()) {
return 0;
}
return pathIndices[dfs.pathCursor - 1].getOffset();
}
// Returns true as long as the useLimit is not reached.
bool visitUser(DFSEntry dfs) {
Operand *use = dfs.getUse();
assert(!(dfs.isRef() && use->get()->getType().isAddress()));
if (auto *svi = dyn_cast<SingleValueInstruction>(use->getUser())) {
if (use->getOperandNumber() == 0
&& visitSingleValueUser(svi, dfs) == IgnoredUse) {
return true;
}
}
// We weren't able to "see through" any more address conversions; so
// record this as a use.
// Do the path offsets match?
if (!checkAndUpdateOffset(dfs))
return true;
// Is this a full or partial path match?
if (!collectOverlappingUses && dfs.pathCursor > 0)
return true;
// Record the use if we haven't reached the limit.
if (uses.size() == useLimit)
return false;
uses.push_back(use);
return true;
}
// Return true if the accumulated offset matches the current path index.
// Update the DFSEntry and pathCursor to skip remaining offsets.
bool checkAndUpdateOffset(DFSEntry &dfs) {
int pathOffset = getPathOffset(dfs);
if (pathOffset == 0) {
// No offset is on the expected path.
if (collectOverlappingUses && dfs.offset == AccessPath::UnknownOffset) {
dfs.offset = 0;
}
return dfs.offset == 0;
}
// pop the offset from the expected path; there should only be one.
--dfs.pathCursor;
assert(getPathOffset(dfs) == 0 && "only one offset index allowed");
int useOffset = dfs.offset;
dfs.offset = 0;
// Ignore all uses on this path unless we're collecting containing uses.
// UnknownOffset appears to overlap with all offsets and subobject uses.
if (pathOffset == AccessPath::UnknownOffset
|| useOffset == AccessPath::UnknownOffset) {
return collectOverlappingUses;
}
// A known offset must match regardless of collectOverlappingUses.
return pathOffset == useOffset;
}
enum UseKind { LeafUse, IgnoredUse };
UseKind visitSingleValueUser(SingleValueInstruction *svi, DFSEntry dfs);
// Handle non-index_addr projections.
UseKind followProjection(SingleValueInstruction *svi, DFSEntry dfs) {
if (!checkAndUpdateOffset(dfs))
return IgnoredUse;
if (dfs.pathCursor == 0)
return LeafUse;
AccessPath::Index pathIndex = pathIndices[dfs.pathCursor - 1];
auto projIdx = ProjectionIndex(svi);
assert(projIdx.isValid());
// Only subobjects indices are expected because offsets are handled above.
if (projIdx.Index == pathIndex.getSubObjectIndex()) {
--dfs.pathCursor;
pushUsers(svi, dfs);
}
return IgnoredUse;
}
};
} // end anonymous namespace
// During the def-use traversal, visit a single-value instruction in which the
// used address is at operand zero.
//
// This must handle the def-use side of all operations that
// AccessUseDefChainVisitor::visit can handle.
//
// Return IgnoredUse if the def-use traversal either continues past \p
// svi or ignores this use.
//
// FIXME: Reuse getAccessProjectionOperand() instead of using special cases once
// the unchecked_take_enum_data_addr -> load -> project_box pattern is fixed.
CollectAccessPathUses::UseKind
CollectAccessPathUses::visitSingleValueUser(SingleValueInstruction *svi,
DFSEntry dfs) {
if (isAccessedStorageCast(svi)) {
pushUsers(svi, dfs);
return IgnoredUse;
}
switch (svi->getKind()) {
default:
return LeafUse;
case SILInstructionKind::BeginAccessInst:
pushUsers(svi, dfs);
return IgnoredUse;
// Handle ref_element_addr since we start at the object root instead of
// the access base.
case SILInstructionKind::RefElementAddrInst:
assert(dfs.isRef());
assert(dfs.pathCursor > 0 && "ref_element_addr cannot occur within access");
dfs.useAndIsRef.setInt(false);
return followProjection(svi, dfs);
case SILInstructionKind::RefTailAddrInst: {
assert(dfs.isRef());
assert(dfs.pathCursor > 0 && "ref_tail_addr cannot occur within an access");
dfs.useAndIsRef.setInt(false);
--dfs.pathCursor;
AccessPath::Index pathIndex = pathIndices[dfs.pathCursor];
assert(pathIndex.isSubObjectProjection());
if (pathIndex.getSubObjectIndex() == AccessedStorage::TailIndex)
pushUsers(svi, dfs);
return IgnoredUse;
}
// MARK: Access projections
case SILInstructionKind::StructElementAddrInst:
case SILInstructionKind::TupleElementAddrInst:
return followProjection(svi, dfs);
case SILInstructionKind::IndexAddrInst:
case SILInstructionKind::TailAddrInst: {
auto projIdx = ProjectionIndex(svi);
if (projIdx.isValid()) {
if (dfs.offset != AccessPath::UnknownOffset)
dfs.offset += projIdx.Index;
else
assert(collectOverlappingUses);
} else if (collectOverlappingUses) {
dfs.offset = AccessPath::UnknownOffset;
} else {
return IgnoredUse;
}
pushUsers(svi, dfs);
return IgnoredUse;
}
// open_existential_addr and unchecked_take_enum_data_addr are classified as
// access projections, but they also modify memory. Both see through them and
// also report them as uses.
case SILInstructionKind::OpenExistentialAddrInst:
case SILInstructionKind::UncheckedTakeEnumDataAddrInst:
pushUsers(svi, dfs);
return LeafUse;
case SILInstructionKind::StructExtractInst:
// Handle nested access to a KeyPath projection. The projection itself
// uses a Builtin. However, the returned UnsafeMutablePointer may be
// converted to an address and accessed via an inout argument.
if (isUnsafePointerExtraction(cast<StructExtractInst>(svi))) {
pushUsers(svi, dfs);
return IgnoredUse;
}
return LeafUse;
case SILInstructionKind::LoadInst:
// Load a box from an indirect payload of an opaque enum. See comments
// in AccessUseDefChainVisitor::visit. Record this load as a leaf-use even
// when we look through its project_box because anyone inspecting the load
// itself will see the same AccessPath.
// FIXME: if this doesn't go away with opaque values, add a new instruction
// for load+project_box.
if (svi->getType().is<SILBoxType>()) {
Operand *addrOper = &cast<LoadInst>(svi)->getOperandRef();
assert(isa<UncheckedTakeEnumDataAddrInst>(addrOper->get()));
// Push the project_box uses
for (auto *use : svi->getUses()) {
if (isa<ProjectBoxInst>(use->getUser()))
pushUser(DFSEntry(use, dfs.isRef(), dfs.pathCursor, dfs.offset));
}
}
return LeafUse;
}
}
bool AccessPath::collectUses(SmallVectorImpl<Operand *> &uses,
bool collectOverlappingUses,
unsigned useLimit) const {
return CollectAccessPathUses(*this, uses, collectOverlappingUses, useLimit)
.collectUses();
}
bool AccessPathWithBase::collectUses(SmallVectorImpl<Operand *> &uses,
bool collectOverlappingUses,
unsigned useLimit) const {
CollectAccessPathUses collector(accessPath, uses, collectOverlappingUses,
useLimit);
if (accessPath.getRoot())
return std::move(collector).collectUses();
if (!base)
return false;
return std::move(collector).collectUses(base);
}
void AccessPath::Index::print(raw_ostream &os) const {
if (isSubObjectProjection())
os << '#' << getSubObjectIndex();
else {
os << '@';
if (isUnknownOffset())
os << "Unknown";
else
os << getOffset();
}
}
LLVM_ATTRIBUTE_USED void AccessPath::Index::dump() const {
print(llvm::dbgs());
}
static void recursivelyPrintPath(AccessPath::PathNode node, raw_ostream &os) {
AccessPath::PathNode parent = node.getParent();
if (!parent.isRoot()) {
recursivelyPrintPath(parent, os);
os << ",";
}
node.getIndex().print(os);
}
void AccessPath::printPath(raw_ostream &os) const {
os << "Path: ";
if (!isValid()) {
os << "INVALID\n";
return;
}
os << "(";
PathNode node = getPathNode();
if (offset != 0) {
Index::forOffset(offset).print(os);
if (!node.isRoot())
os << ",";
}
if (!node.isRoot())
recursivelyPrintPath(node, os);
os << ")\n";
}
void AccessPath::print(raw_ostream &os) const {
if (!isValid()) {
os << "INVALID\n";
return;
}
os << "Storage: ";
getStorage().print(os);
printPath(os);
}
LLVM_ATTRIBUTE_USED void AccessPath::dump() const { print(llvm::dbgs()); }
void AccessPathWithBase::print(raw_ostream &os) const {
if (base)
os << "Base: " << base;
accessPath.print(os);
}
LLVM_ATTRIBUTE_USED void AccessPathWithBase::dump() const {
print(llvm::dbgs());
}
//===----------------------------------------------------------------------===//
// MARK: Helper API for specific formal access patterns
//===----------------------------------------------------------------------===//
static bool isScratchBuffer(SILValue value) {
// Special case unsafe value buffer access.
return value->getType().is<BuiltinUnsafeValueBufferType>();
}
bool swift::memInstMustInitialize(Operand *memOper) {
SILValue address = memOper->get();
SILInstruction *memInst = memOper->getUser();
switch (memInst->getKind()) {
default:
return false;
case SILInstructionKind::CopyAddrInst: {
auto *CAI = cast<CopyAddrInst>(memInst);
return CAI->getDest() == address && CAI->isInitializationOfDest();
}
case SILInstructionKind::InitExistentialAddrInst:
case SILInstructionKind::InitEnumDataAddrInst:
case SILInstructionKind::InjectEnumAddrInst:
return true;
case SILInstructionKind::StoreInst:
return cast<StoreInst>(memInst)->getOwnershipQualifier()
== StoreOwnershipQualifier::Init;
#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
case SILInstructionKind::Store##Name##Inst: \
return cast<Store##Name##Inst>(memInst)->isInitializationOfDest();
#include "swift/AST/ReferenceStorage.def"
}
}
bool swift::isSingleInitAllocStack(AllocStackInst *asi,
SmallVectorImpl<Operand *> &destroyingUses) {
// For now, we just look through projections and rely on memInstMustInitialize
// to classify all other uses as init or not.
SmallVector<Operand *, 32> worklist(asi->getUses());
bool foundInit = false;
while (!worklist.empty()) {
auto *use = worklist.pop_back_val();
auto *user = use->getUser();
if (Projection::isAddressProjection(user)
|| isa<OpenExistentialAddrInst>(user)) {
// Look through address projections.
for (SILValue r : user->getResults()) {
llvm::copy(r->getUses(), std::back_inserter(worklist));
}
continue;
}
if (auto *li = dyn_cast<LoadInst>(user)) {
// If we are not taking,
if (li->getOwnershipQualifier() != LoadOwnershipQualifier::Take) {
continue;
}
// Treat load [take] as a write.
return false;
}
switch (user->getKind()) {
default:
break;
case SILInstructionKind::DestroyAddrInst:
destroyingUses.push_back(use);
continue;
case SILInstructionKind::DeallocStackInst:
case SILInstructionKind::LoadBorrowInst:
case SILInstructionKind::DebugValueAddrInst:
continue;
}
// See if we have an initializer and that such initializer is in the same
// block.
if (memInstMustInitialize(use)) {
if (user->getParent() != asi->getParent() || foundInit) {
return false;
}
foundInit = true;
continue;
}
// Otherwise, if we have found something not in our allowlist, return false.
return false;
}
// We did not find any users that we did not understand. So we can
// conservatively return true here.
return true;
}
/// Return true if the given address value is produced by a special address
/// producer that is only used for local initialization, not formal access.
bool swift::isAddressForLocalInitOnly(SILValue sourceAddr) {
switch (sourceAddr->getKind()) {
default:
return false;
// Value to address conversions: the operand is the non-address source
// value. These allow local mutation of the value but should never be used
// for formal access of an lvalue.
case ValueKind::OpenExistentialBoxInst:
case ValueKind::ProjectExistentialBoxInst:
return true;
// Self-evident local initialization.
case ValueKind::InitEnumDataAddrInst:
case ValueKind::InitExistentialAddrInst:
case ValueKind::AllocExistentialBoxInst:
case ValueKind::AllocValueBufferInst:
case ValueKind::ProjectValueBufferInst:
return true;
}
}
// Return true if the given apply invokes a global addressor defined in another
// module.
bool swift::isExternalGlobalAddressor(ApplyInst *AI) {
FullApplySite apply(AI);
auto *funcRef = apply.getReferencedFunctionOrNull();
if (!funcRef)
return false;
return funcRef->isGlobalInit() && funcRef->isExternalDeclaration();
}
// Return true if the given StructExtractInst extracts the RawPointer from
// Unsafe[Mutable]Pointer.
bool swift::isUnsafePointerExtraction(StructExtractInst *SEI) {
if (!isa<BuiltinRawPointerType>(SEI->getType().getASTType()))
return false;
auto &C = SEI->getModule().getASTContext();
auto *decl = SEI->getStructDecl();
return decl == C.getUnsafeMutablePointerDecl()
|| decl == C.getUnsafePointerDecl();
}
// Given a block argument address base, check if it is actually a box projected
// from a switch_enum. This is a valid pattern at any SIL stage resulting in a
// block-type phi. In later SIL stages, the optimizer may form address-type
// phis, causing this assert if called on those cases.
void swift::checkSwitchEnumBlockArg(SILPhiArgument *arg) {
assert(!arg->getType().isAddress());
SILBasicBlock *Pred = arg->getParent()->getSinglePredecessorBlock();
if (!Pred || !isa<SwitchEnumInst>(Pred->getTerminator())) {
arg->dump();
llvm_unreachable("unexpected box source.");
}
}
bool swift::isPossibleFormalAccessBase(const AccessedStorage &storage,
SILFunction *F) {
switch (storage.getKind()) {
case AccessedStorage::Nested:
assert(false && "don't pass nested storage to this helper");
return false;
case AccessedStorage::Box:
case AccessedStorage::Stack:
if (isScratchBuffer(storage.getValue()))
return false;
break;
case AccessedStorage::Global:
break;
case AccessedStorage::Class:
break;
case AccessedStorage::Tail:
return false;
case AccessedStorage::Yield:
// Yields are accessed by the caller.
return false;
case AccessedStorage::Argument:
// Function arguments are accessed by the caller.
return false;
case AccessedStorage::Unidentified:
if (isAddressForLocalInitOnly(storage.getValue()))
return false;
if (isa<SILPhiArgument>(storage.getValue())) {
checkSwitchEnumBlockArg(cast<SILPhiArgument>(storage.getValue()));
return false;
}
// Pointer-to-address exclusivity cannot be enforced. `baseAddress` may be
// pointing anywhere within an object.
if (isa<PointerToAddressInst>(storage.getValue()))
return false;
if (isa<SILUndef>(storage.getValue()))
return false;
if (isScratchBuffer(storage.getValue()))
return false;
break;
}
// Additional checks that apply to anything that may fall through.
// Immutable values are only accessed for initialization.
if (storage.isLetAccess(F))
return false;
return true;
}
SILBasicBlock::iterator swift::removeBeginAccess(BeginAccessInst *beginAccess) {
while (!beginAccess->use_empty()) {
Operand *op = *beginAccess->use_begin();
// Delete any associated end_access instructions.
if (auto endAccess = dyn_cast<EndAccessInst>(op->getUser())) {
endAccess->eraseFromParent();
// Forward all other uses to the original address.
} else {
op->set(beginAccess->getSource());
}
}
return beginAccess->getParent()->erase(beginAccess);
}
//===----------------------------------------------------------------------===//
// Verification
//===----------------------------------------------------------------------===//
// Helper for visitApplyAccesses that visits address-type call arguments,
// including arguments to @noescape functions that are passed as closures to
// the current call.
static void visitApplyAccesses(ApplySite apply,
llvm::function_ref<void(Operand *)> visitor) {
for (Operand &oper : apply.getArgumentOperands()) {
// Consider any address-type operand an access. Whether it is read or modify
// depends on the argument convention.
if (oper.get()->getType().isAddress()) {
visitor(&oper);
continue;
}
auto fnType = oper.get()->getType().getAs<SILFunctionType>();
if (!fnType || !fnType->isNoEscape())
continue;
// When @noescape function closures are passed as arguments, their
// arguments are considered accessed at the call site.
TinyPtrVector<PartialApplyInst *> partialApplies;
findClosuresForFunctionValue(oper.get(), partialApplies);
// Recursively visit @noescape function closure arguments.
for (auto *PAI : partialApplies)
visitApplyAccesses(PAI, visitor);
}
}
static void visitBuiltinAddress(BuiltinInst *builtin,
llvm::function_ref<void(Operand *)> visitor) {
if (auto kind = builtin->getBuiltinKind()) {
switch (kind.getValue()) {
default:
builtin->dump();
llvm_unreachable("unexpected builtin memory access.");
// Handle builtin "generic_add"<V>($*V, $*V, $*V) and the like.
#define BUILTIN(Id, Name, Attrs)
#define BUILTIN_BINARY_OPERATION_POLYMORPHIC(Id, Name) \
case BuiltinValueKind::Id:
#include "swift/AST/Builtins.def"
visitor(&builtin->getAllOperands()[1]);
visitor(&builtin->getAllOperands()[2]);
return;
// WillThrow exists for the debugger, does nothing.
case BuiltinValueKind::WillThrow:
return;
// Buitins that affect memory but can't be formal accesses.
case BuiltinValueKind::AssumeTrue:
case BuiltinValueKind::UnexpectedError:
case BuiltinValueKind::ErrorInMain:
case BuiltinValueKind::IsOptionalType:
case BuiltinValueKind::CondFailMessage:
case BuiltinValueKind::AllocRaw:
case BuiltinValueKind::DeallocRaw:
case BuiltinValueKind::Fence:
case BuiltinValueKind::StaticReport:
case BuiltinValueKind::Once:
case BuiltinValueKind::OnceWithContext:
case BuiltinValueKind::Unreachable:
case BuiltinValueKind::CondUnreachable:
case BuiltinValueKind::DestroyArray:
case BuiltinValueKind::UnsafeGuaranteed:
case BuiltinValueKind::UnsafeGuaranteedEnd:
case BuiltinValueKind::Swift3ImplicitObjCEntrypoint:
case BuiltinValueKind::PoundAssert:
case BuiltinValueKind::IntInstrprofIncrement:
case BuiltinValueKind::TSanInoutAccess:
return;
// General memory access to a pointer in first operand position.
case BuiltinValueKind::CmpXChg:
case BuiltinValueKind::AtomicLoad:
case BuiltinValueKind::AtomicStore:
case BuiltinValueKind::AtomicRMW:
// Currently ignored because the access is on a RawPointer, not a
// SIL address.
// visitor(&builtin->getAllOperands()[0]);
return;
// Arrays: (T.Type, Builtin.RawPointer, Builtin.RawPointer,
// Builtin.Word)
case BuiltinValueKind::CopyArray:
case BuiltinValueKind::TakeArrayNoAlias:
case BuiltinValueKind::TakeArrayFrontToBack:
case BuiltinValueKind::TakeArrayBackToFront:
case BuiltinValueKind::AssignCopyArrayNoAlias:
case BuiltinValueKind::AssignCopyArrayFrontToBack:
case BuiltinValueKind::AssignCopyArrayBackToFront:
case BuiltinValueKind::AssignTakeArray:
// Currently ignored because the access is on a RawPointer.
// visitor(&builtin->getAllOperands()[1]);
// visitor(&builtin->getAllOperands()[2]);
return;
}
}
if (auto ID = builtin->getIntrinsicID()) {
switch (ID.getValue()) {
// Exhaustively verifying all LLVM instrinsics that access memory is
// impractical. Instead, we call out the few common cases and return in
// the default case.
default:
return;
case llvm::Intrinsic::memcpy:
case llvm::Intrinsic::memmove:
// Currently ignored because the access is on a RawPointer.
// visitor(&builtin->getAllOperands()[0]);
// visitor(&builtin->getAllOperands()[1]);
return;
case llvm::Intrinsic::memset:
// Currently ignored because the access is on a RawPointer.
// visitor(&builtin->getAllOperands()[0]);
return;
}
}
llvm_unreachable("Must be either a builtin or intrinsic.");
}
void swift::visitAccessedAddress(SILInstruction *I,
llvm::function_ref<void(Operand *)> visitor) {
assert(I->mayReadOrWriteMemory());
// Reference counting instructions do not access user visible memory.
if (isa<RefCountingInst>(I))
return;
if (isa<DeallocationInst>(I))
return;
if (auto apply = FullApplySite::isa(I)) {
visitApplyAccesses(apply, visitor);
return;
}
if (auto builtin = dyn_cast<BuiltinInst>(I)) {
visitBuiltinAddress(builtin, visitor);
return;
}
switch (I->getKind()) {
default:
I->dump();
llvm_unreachable("unexpected memory access.");
case SILInstructionKind::AssignInst:
case SILInstructionKind::AssignByWrapperInst:
visitor(&I->getAllOperands()[AssignInst::Dest]);
return;
case SILInstructionKind::CheckedCastAddrBranchInst:
visitor(&I->getAllOperands()[CheckedCastAddrBranchInst::Src]);
visitor(&I->getAllOperands()[CheckedCastAddrBranchInst::Dest]);
return;
case SILInstructionKind::CopyAddrInst:
visitor(&I->getAllOperands()[CopyAddrInst::Src]);
visitor(&I->getAllOperands()[CopyAddrInst::Dest]);
return;
#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
case SILInstructionKind::Store##Name##Inst:
#include "swift/AST/ReferenceStorage.def"
case SILInstructionKind::StoreInst:
case SILInstructionKind::StoreBorrowInst:
visitor(&I->getAllOperands()[StoreInst::Dest]);
return;
case SILInstructionKind::SelectEnumAddrInst:
visitor(&I->getAllOperands()[0]);
return;
case SILInstructionKind::InitExistentialAddrInst:
case SILInstructionKind::InjectEnumAddrInst:
#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
case SILInstructionKind::Load##Name##Inst:
#include "swift/AST/ReferenceStorage.def"
case SILInstructionKind::LoadInst:
case SILInstructionKind::LoadBorrowInst:
case SILInstructionKind::OpenExistentialAddrInst:
case SILInstructionKind::SwitchEnumAddrInst:
case SILInstructionKind::UncheckedTakeEnumDataAddrInst:
case SILInstructionKind::UnconditionalCheckedCastInst: {
// Assuming all the above have only a single address operand.
assert(I->getNumOperands() - I->getNumTypeDependentOperands() == 1);
Operand *singleOperand = &I->getAllOperands()[0];
// Check the operand type because UnconditionalCheckedCastInst may operate
// on a non-address.
if (singleOperand->get()->getType().isAddress())
visitor(singleOperand);
return;
}
// Non-access cases: these are marked with memory side effects, but, by
// themselves, do not access formal memory.
#define UNCHECKED_REF_STORAGE(Name, ...) \
case SILInstructionKind::StrongCopy##Name##ValueInst:
#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
case SILInstructionKind::StrongCopy##Name##ValueInst:
#include "swift/AST/ReferenceStorage.def"
case SILInstructionKind::AbortApplyInst:
case SILInstructionKind::AllocBoxInst:
case SILInstructionKind::AllocExistentialBoxInst:
case SILInstructionKind::AllocGlobalInst:
case SILInstructionKind::BeginAccessInst:
case SILInstructionKind::BeginApplyInst:
case SILInstructionKind::BeginBorrowInst:
case SILInstructionKind::BeginCOWMutationInst:
case SILInstructionKind::EndCOWMutationInst:
case SILInstructionKind::BeginUnpairedAccessInst:
case SILInstructionKind::BindMemoryInst:
case SILInstructionKind::CheckedCastValueBranchInst:
case SILInstructionKind::CondFailInst:
case SILInstructionKind::CopyBlockInst:
case SILInstructionKind::CopyBlockWithoutEscapingInst:
case SILInstructionKind::CopyValueInst:
case SILInstructionKind::DeinitExistentialAddrInst:
case SILInstructionKind::DeinitExistentialValueInst:
case SILInstructionKind::DestroyAddrInst:
case SILInstructionKind::DestroyValueInst:
case SILInstructionKind::EndAccessInst:
case SILInstructionKind::EndApplyInst:
case SILInstructionKind::EndBorrowInst:
case SILInstructionKind::EndUnpairedAccessInst:
case SILInstructionKind::EndLifetimeInst:
case SILInstructionKind::ExistentialMetatypeInst:
case SILInstructionKind::FixLifetimeInst:
case SILInstructionKind::GlobalAddrInst:
case SILInstructionKind::InitExistentialValueInst:
case SILInstructionKind::IsUniqueInst:
case SILInstructionKind::IsEscapingClosureInst:
case SILInstructionKind::KeyPathInst:
case SILInstructionKind::OpenExistentialBoxInst:
case SILInstructionKind::OpenExistentialBoxValueInst:
case SILInstructionKind::OpenExistentialValueInst:
case SILInstructionKind::PartialApplyInst:
case SILInstructionKind::ProjectValueBufferInst:
case SILInstructionKind::YieldInst:
case SILInstructionKind::UnwindInst:
case SILInstructionKind::UncheckedOwnershipConversionInst:
case SILInstructionKind::UncheckedRefCastAddrInst:
case SILInstructionKind::UnconditionalCheckedCastAddrInst:
case SILInstructionKind::UnconditionalCheckedCastValueInst:
case SILInstructionKind::ValueMetatypeInst:
return;
}
}