Add new loop invariant code motion.

This commit is contained in:
Jakub Florek
2025-08-28 21:00:33 +01:00
parent 3514b2b9df
commit 07ac8b3478
17 changed files with 1403 additions and 106 deletions

View File

@@ -25,6 +25,7 @@ swift_compiler_sources(Optimizer
LifetimeDependenceDiagnostics.swift
LifetimeDependenceInsertion.swift
LifetimeDependenceScopeFixup.swift
LoopInvariantCodeMotion.swift
ObjectOutliner.swift
ObjCBridgingOptimization.swift
MergeCondFails.swift

View File

@@ -106,6 +106,7 @@ private func registerSwiftPasses() {
registerPass(tempLValueElimination, { tempLValueElimination.run($0) })
registerPass(generalClosureSpecialization, { generalClosureSpecialization.run($0) })
registerPass(autodiffClosureSpecialization, { autodiffClosureSpecialization.run($0) })
registerPass(loopInvariantCodeMotionPass, { loopInvariantCodeMotionPass.run($0) })
// Instruction passes
registerForSILCombine(BeginBorrowInst.self, { run(BeginBorrowInst.self, $0) })

View File

@@ -484,6 +484,22 @@ extension Instruction {
}
return false
}
/// Returns true if `otherInst` is in the same block and is strictly dominated by this instruction or
/// the parent block of the instruction dominates parent block of `otherInst`.
func dominates(
_ otherInst: Instruction,
_ domTree: DominatorTree
) -> Bool {
if parentBlock == otherInst.parentBlock {
return dominatesInSameBlock(otherInst)
} else {
return parentBlock.dominates(
otherInst.parentBlock,
domTree
)
}
}
/// If this instruction uses a (single) existential archetype, i.e. it has a type-dependent operand,
/// returns the concrete type if it is known.
@@ -583,36 +599,22 @@ extension StoreInst {
extension LoadInst {
@discardableResult
func trySplit(_ context: FunctionPassContext) -> Bool {
var elements = [Value]()
let builder = Builder(before: self, context)
if type.isStruct {
if (type.nominal as! StructDecl).hasUnreferenceableStorage {
guard !(type.nominal as! StructDecl).hasUnreferenceableStorage,
let fields = type.getNominalFields(in: parentFunction) else {
return false
}
guard let fields = type.getNominalFields(in: parentFunction) else {
return false
}
for idx in 0..<fields.count {
let fieldAddr = builder.createStructElementAddr(structAddress: address, fieldIndex: idx)
let splitLoad = builder.createLoad(fromAddress: fieldAddr, ownership: self.splitOwnership(for: fieldAddr))
elements.append(splitLoad)
}
let newStruct = builder.createStruct(type: self.type, elements: elements)
self.replace(with: newStruct, context)
_ = splitStruct(fields: fields, context)
return true
} else if type.isTuple {
var elements = [Value]()
let builder = Builder(before: self, context)
for idx in 0..<type.tupleElements.count {
let fieldAddr = builder.createTupleElementAddr(tupleAddress: address, elementIndex: idx)
let splitLoad = builder.createLoad(fromAddress: fieldAddr, ownership: self.splitOwnership(for: fieldAddr))
elements.append(splitLoad)
}
let newTuple = builder.createTuple(type: self.type, elements: elements)
self.replace(with: newTuple, context)
_ = splitTuple(context)
return true
} else {
return false
}
return false
}
private func splitOwnership(for fieldValue: Value) -> LoadOwnership {
@@ -623,6 +625,70 @@ extension LoadInst {
return fieldValue.type.isTrivial(in: parentFunction) ? .trivial : self.loadOwnership
}
}
func trySplit(
alongPath projectionPath: SmallProjectionPath,
_ context: FunctionPassContext
) -> [LoadInst]? {
if projectionPath.isEmpty {
return nil
}
let (fieldKind, index, pathRemainder) = projectionPath.pop()
var elements: [LoadInst]
switch fieldKind {
case .structField where type.isStruct:
guard !(type.nominal as! StructDecl).hasUnreferenceableStorage,
let fields = type.getNominalFields(in: parentFunction) else {
return nil
}
elements = splitStruct(fields: fields, context)
case .tupleField where type.isTuple:
elements = splitTuple(context)
default:
return nil
}
if let recursiveSplitLoad = elements[index].trySplit(alongPath: pathRemainder, context) {
elements.remove(at: index)
elements += recursiveSplitLoad
}
return elements
}
private func splitStruct(fields: NominalFieldsArray, _ context: FunctionPassContext) -> [LoadInst] {
var elements = [LoadInst]()
let builder = Builder(before: self, context)
for idx in 0..<fields.count {
let fieldAddr = builder.createStructElementAddr(structAddress: address, fieldIndex: idx)
let splitLoad = builder.createLoad(fromAddress: fieldAddr, ownership: self.splitOwnership(for: fieldAddr))
elements.append(splitLoad)
}
let newStruct = builder.createStruct(type: self.type, elements: elements)
self.replace(with: newStruct, context)
return elements
}
private func splitTuple(_ context: FunctionPassContext) -> [LoadInst] {
var elements = [LoadInst]()
let builder = Builder(before: self, context)
for idx in 0..<type.tupleElements.count {
let fieldAddr = builder.createTupleElementAddr(tupleAddress: address, elementIndex: idx)
let splitLoad = builder.createLoad(fromAddress: fieldAddr, ownership: self.splitOwnership(for: fieldAddr))
elements.append(splitLoad)
}
let newTuple = builder.createTuple(type: self.type, elements: elements)
self.replace(with: newTuple, context)
return elements
}
}
extension FunctionPassContext {

View File

@@ -140,7 +140,7 @@ public struct Stack<Element> : CollectionLikeSequence {
public mutating func deinitialize() { removeAll() }
}
extension Stack {
public extension Stack {
/// Mark a stack location for future iteration.
///
/// TODO: Marker should be ~Escapable.
@@ -155,7 +155,7 @@ extension Stack {
let low: Marker
let high: Marker
init(in stack: Stack, low: Marker, high: Marker) {
public init(in stack: Stack, low: Marker, high: Marker) {
if low.slab.data == nil {
assert(low.index == 0, "invalid empty stack marker")
// `low == nil` and `high == nil` is a valid empty segment,
@@ -173,7 +173,7 @@ extension Stack {
self.high = high
}
func makeIterator() -> Stack.Iterator {
public func makeIterator() -> Stack.Iterator {
return Iterator(slab: low.slab, index: low.index,
lastSlab: high.slab, endIndex: high.index)
}
@@ -219,3 +219,87 @@ extension Stack {
}
}
}
public struct StackWithCount<Element> : CollectionLikeSequence {
public private(set) var count = 0
private var underlyingStack: Stack<Element>
public typealias Iterator = Stack<Element>.Iterator
public init(_ context: some Context) {
self.underlyingStack = Stack<Element>(context)
}
public func makeIterator() -> Stack<Element>.Iterator {
underlyingStack.makeIterator()
}
public var first: Element? { underlyingStack.first }
public var last: Element? { underlyingStack.last }
public mutating func push(_ element: Element) {
count += 1
underlyingStack.push(element)
}
/// The same as `push` to provide an Array-like append API.
public mutating func append(_ element: Element) { push(element) }
public mutating func append<S: Sequence>(contentsOf other: S) where S.Element == Element {
for elem in other {
append(elem)
}
}
public var isEmpty: Bool { underlyingStack.isEmpty }
public mutating func pop() -> Element? {
if underlyingStack.isEmpty {
return nil
}
count -= 1
return underlyingStack.pop()
}
public mutating func removeAll() {
underlyingStack.removeAll()
}
/// TODO: once we have move-only types, make this a real deinit.
public mutating func deinitialize() { removeAll() }
}
public extension StackWithCount {
typealias Marker = Stack<Element>.Marker
struct Segment : CollectionLikeSequence {
var underlyingSegment: Stack<Element>.Segment
public init(in stack: StackWithCount, low: Marker, high: Marker) {
underlyingSegment = Stack<Element>.Segment(in: stack.underlyingStack, low: low, high: high)
}
public func makeIterator() -> StackWithCount.Iterator {
return underlyingSegment.makeIterator()
}
}
var top: Marker { underlyingStack.top }
func assertValid(marker: Marker) { underlyingStack.assertValid(marker: marker) }
mutating func withMarker<R>(
_ body: (inout Stack<Element>, Marker) throws -> R) rethrows -> R {
return try underlyingStack.withMarker(body)
}
mutating func withMarker<R>(
pushElements body: (inout Stack<Element>) throws -> R,
withNewElements handleNewElements: ((Segment) -> ())
) rethrows -> R {
return try underlyingStack.withMarker(pushElements: body) { [self] segment in
handleNewElements(Segment(in: self, low: segment.low, high: segment.high))
}
}
}

View File

@@ -505,6 +505,14 @@ public struct SideEffects : CustomStringConvertible, NoReflectionChildren {
/// This is true when the function (or a callee, transitively) contains a
/// deinit barrier instruction.
public var isDeinitBarrier: Bool
public static var noEffects: GlobalEffects {
return GlobalEffects(memory: .noEffects, ownership: .noEffects, allocates: false, isDeinitBarrier: false)
}
public var isOnlyReading: Bool {
return !memory.write && ownership == .noEffects && !allocates && !isDeinitBarrier
}
/// When called with default arguments, it creates an "effect-free" GlobalEffects.
public init(memory: Memory = Memory(read: false, write: false),
@@ -643,6 +651,10 @@ public struct SideEffects : CustomStringConvertible, NoReflectionChildren {
copy = copy || other.copy
destroy = destroy || other.destroy
}
public static var noEffects: Ownership {
return Ownership(copy: false, destroy: false)
}
public static var worstEffects: Ownership {
Ownership(copy: true, destroy: true)

View File

@@ -96,6 +96,11 @@ public class Instruction : CustomStringConvertible, Hashable {
BridgedContext.moveInstructionBefore(bridged, otherInstruction.bridged)
context.notifyInstructionsChanged()
}
public final func copy(before otherInstruction: Instruction, _ context: some MutatingContext) {
BridgedContext.copyInstructionBefore(bridged, otherInstruction.bridged)
context.notifyInstructionsChanged()
}
public var mayTrap: Bool { false }
@@ -177,6 +182,10 @@ public class Instruction : CustomStringConvertible, Hashable {
public static func ==(lhs: Instruction, rhs: Instruction) -> Bool {
lhs === rhs
}
public func isIdenticalTo(_ otherInst: Instruction) -> Bool {
return bridged.isIdenticalTo(otherInst.bridged)
}
public func hash(into hasher: inout Hasher) {
hasher.combine(ObjectIdentifier(self))

View File

@@ -392,6 +392,11 @@ public struct AccessPath : CustomStringConvertible, Hashable {
public func isEqualOrContains(_ other: AccessPath) -> Bool {
return getProjection(to: other) != nil
}
/// Returns true if this access contains `other` access and is not equal.
public func contains(_ other: AccessPath) -> Bool {
return !(getProjection(to: other)?.isEmpty ?? true)
}
public var materializableProjectionPath: SmallProjectionPath? {
if projectionPath.isMaterializable {

View File

@@ -534,6 +534,18 @@ public struct SmallProjectionPath : Hashable, CustomStringConvertible, NoReflect
return false
}
}
public var isConstant: Bool {
let (kind, _, subPath) = pop()
switch kind {
case .root:
return true
case .structField, .tupleField, .enumCase, .classField, .existential, .indexedElement:
return subPath.isConstant
default:
return false
}
}
}
//===----------------------------------------------------------------------===//

View File

@@ -51,7 +51,7 @@
/// This macro follows the same conventions as PASS(Id, Tag, Description),
/// but is used for IRGen passes which are built outside of the
/// SILOptimizer library.
///
///
/// An IRGen pass is created by IRGen and needs to be registered with the pass
/// manager dynamically.
#ifndef IRGEN_PASS
@@ -148,6 +148,8 @@ PASS(TempRValueElimination, "temp-rvalue-elimination",
"Remove short-lived immutable temporary copies")
PASS(TempLValueElimination, "temp-lvalue-elimination",
"Remove short-lived immutable temporary l-values")
PASS(LoopInvariantCodeMotion, "loop-invariant-code-motion",
"New Loop Invariant Code Motion")
// NOTE - ExperimentalSwiftBasedClosureSpecialization and AutodiffClosureSpecialization are a WIP
PASS(ExperimentalSwiftBasedClosureSpecialization, "experimental-swift-based-closure-specialization",

View File

@@ -392,7 +392,7 @@ void addHighLevelLoopOptPasses(SILPassPipelinePlan &P) {
// before CanonicalOSSA re-hoists destroys.
P.addAccessEnforcementReleaseSinking();
P.addAccessEnforcementOpts();
P.addHighLevelLICM();
P.addLoopInvariantCodeMotion();
// Simplify CFG after LICM that creates new exit blocks
P.addSimplifyCFG();
// LICM might have added new merging potential by hoisting
@@ -481,7 +481,7 @@ void addFunctionPasses(SILPassPipelinePlan &P,
// late as possible before inlining because it must run between runs of the
// inliner when the pipeline restarts.
if (OpLevel == OptimizationLevelKind::MidLevel) {
P.addHighLevelLICM();
P.addLoopInvariantCodeMotion();
P.addArrayCountPropagation();
P.addBoundsCheckOpts();
P.addDCE();
@@ -739,10 +739,10 @@ static void addMidLevelFunctionPipeline(SILPassPipelinePlan &P) {
// A LICM pass at mid-level is mainly needed to hoist addressors of globals.
// It needs to be before global_init functions are inlined.
P.addLICM();
P.addLoopInvariantCodeMotion();
// Run loop unrolling after inlining and constant propagation, because loop
// trip counts may have became constant.
P.addLICM();
P.addLoopInvariantCodeMotion();
P.addLoopUnroll();
}
@@ -848,7 +848,7 @@ static void addLateLoopOptPassPipeline(SILPassPipelinePlan &P) {
// It will also set the no_nested_conflict for dynamic accesses
P.addAccessEnforcementReleaseSinking();
P.addAccessEnforcementOpts();
P.addLICM();
P.addLoopInvariantCodeMotion();
P.addCOWOpts();
// Simplify CFG after LICM that creates new exit blocks
P.addSimplifyCFG();
@@ -892,7 +892,7 @@ static void addLastChanceOptPassPipeline(SILPassPipelinePlan &P) {
P.addAccessEnforcementDom();
// addAccessEnforcementDom might provide potential for LICM:
// A loop might have only one dynamic access now, i.e. hoistable
P.addLICM();
P.addLoopInvariantCodeMotion();
// Verify AccessStorage once again after optimizing and lowering OSSA.
#ifndef NDEBUG

View File

@@ -1,8 +1,8 @@
// RUN: %target-swift-frontend %s -emit-sil -O \
// RUN: -disable-availability-checking | %FileCheck %s --check-prefix=CHECK-SIL
// RUN: -disable-availability-checking | %FileCheck %s --check-prefix=CHECK-SIL
// RUN: %target-swift-frontend %s -emit-ir -O \
// RUN: -disable-availability-checking | %FileCheck %s --check-prefix=CHECK-IR
// RUN: -disable-availability-checking | %FileCheck %s --check-prefix=CHECK-IR
// REQUIRES: swift_in_compiler
// REQUIRES: swift_stdlib_no_asserts, optimized_stdlib
@@ -131,7 +131,7 @@ public func inlinearray_sum_iterate_to_deducible_count2_wo_trap<let N: Int>(_ v:
// Bounds check should be eliminated
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A42_sum_iterate_to_deducible_count2_with_trapySis11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A42_sum_iterate_to_deducible_count2_with_trapySis11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL: bb3
// CHECK-SIL-NOT: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
@@ -147,7 +147,7 @@ public func inlinearray_sum_iterate_to_deducible_count2_with_trap<let N: Int>(_
// Bounds check should be eliminated
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A29_iterate_over_indices_wo_trapySis11InlineArrayVyxSiGSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A29_iterate_over_indices_wo_trapySis11InlineArrayVyxSiGSiRVzlF :
// CHECK-SIL: bb3
// CHECK-SIL-NOT: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
@@ -166,7 +166,7 @@ public func inlinearray_iterate_over_indices_wo_trap<let N: Int>(_ v: InlineArra
// Bounds check should be eliminated
// Induction variable optimization eliminates the bounds check in SIL
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A31_iterate_over_indices_with_trapySis11InlineArrayVyxSiGSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A31_iterate_over_indices_with_trapySis11InlineArrayVyxSiGSiRVzlF :
// CHECK-SIL: bb3
// CHECK-SIL-NOT: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
@@ -181,7 +181,7 @@ public func inlinearray_iterate_over_indices_with_trap<let N: Int>(_ v: InlineAr
// Eliminate duplicate bounds check
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A17_element_equalityySbs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A17_element_equalityySbs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL-NOT: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL-LABEL: } // end sil function '$s30inlinearray_bounds_check_tests0A17_element_equalityySbs11InlineArrayVyxSiG_SitSiRVzlF'
@@ -201,7 +201,7 @@ public func inlinearray_element_sum<let N: Int>(_ v: InlineArray<N, Int>, _ i: I
// Bounds check should be eliminated
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A7_searchySiSgs11InlineArrayVyq_xG_xtSiRV_SQRzr0_lF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A7_searchySiSgs11InlineArrayVyq_xG_xtSiRV_SQRzr0_lF :
// CHECK-SIL: bb3:
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
@@ -218,7 +218,7 @@ public func inlinearray_search<T : Equatable, let N: Int>(_ v: InlineArray<N, T>
// Bounds check should be eliminated
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A11_search_splySiSgs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A11_search_splySiSgs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL: bb3:
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
@@ -235,7 +235,7 @@ public func inlinearray_search_spl<let N: Int>(_ v: InlineArray<N, Int>, _ elem:
// Bounds check should be eliminated
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A18_binary_search_splySiSgs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A18_binary_search_splySiSgs11InlineArrayVyxSiG_SitSiRVzlF :
// CHECK-SIL: bb2
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
@@ -262,7 +262,7 @@ public func inlinearray_binary_search_spl<let N: Int>(_ v: InlineArray<N, Int>,
// This prevents LoopRotate which prevent bounds checks opts since it depends on induction variable analysis which doesn't work on unrotated loops.
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A35_sum_iterate_to_count_with_trap_splySis11InlineArrayVy$63_SiGF :
// CHECK-SIL: bb2
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-NOT-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
// CHECK-SIL-LABEL: } // end sil function '$s30inlinearray_bounds_check_tests0A35_sum_iterate_to_count_with_trap_splySis11InlineArrayVy$63_SiGF'
public func inlinearray_sum_iterate_to_count_with_trap_spl(_ v: InlineArray<64, Int>) -> Int {
@@ -274,7 +274,7 @@ public func inlinearray_sum_iterate_to_count_with_trap_spl(_ v: InlineArray<64,
}
// InlineArray is copied into a temporary within the loop in the "specialized" version
// This prevents LoopRotate which prevent bounds checks opts since it depends on induction variable analysis which doesn't work on unrotated loops.
// This prevents LoopRotate which prevent bounds checks opts since it depends on induction variable analysis which doesn't work on unrotated loops.
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A37_sum_iterate_to_unknown_with_trap_splySis11InlineArrayVy$63_SiG_SitF :
// CHECK-SIL: bb2
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
@@ -290,7 +290,7 @@ public func inlinearray_sum_iterate_to_unknown_with_trap_spl(_ v: InlineArray<64
// Current codegen for this in SIL is very poor
// First a temp is created and the elements are stored to it, then they get loaded from the temp to be stored in the let which is then loaded again to get the sum
// However, LLVM can constant fold everything
// However, LLVM can constant fold everything
public func local_inlinearray_sum_iterate_to_count_with_trap_spl() -> Int {
var sum = 0
let v : InlineArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16]
@@ -302,7 +302,7 @@ public func local_inlinearray_sum_iterate_to_count_with_trap_spl() -> Int {
// Current codegen for this in SIL is very poor
// First a temp is created and the elements are stored to it, then they get loaded from the temp to be stored in the let which is then loaded again to get the sum
// LLVM cannot constant fold, it memsets, memcopies and then loops over to sum
// LLVM cannot constant fold, it memsets, memcopies and then loops over to sum
public func local_inlinearray_repeating_init_sum_iterate_to_count_trap_spl() -> Int {
var sum = 0
let v = InlineArray<64, Int>(repeating: 64)
@@ -324,9 +324,9 @@ public func inlinearray_inc_by_one<let N: Int>(_ v: inout InlineArray<N, Int>, _
}
}
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A15_inc_by_one_splyys11InlineArrayVy$63_SiGz_SitF :
// CHECK-SIL-LABEL: sil @$s30inlinearray_bounds_check_tests0A15_inc_by_one_splyys11InlineArrayVy$63_SiGz_SitF :
// CHECK-SIL: bb2
// CHECK-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-NOT-SIL: cond_fail {{.*}}, "Index out of bounds"
// CHECK-SIL: cond_br
// CHECK-SIL-LABEL: } // end sil function '$s30inlinearray_bounds_check_tests0A15_inc_by_one_splyys11InlineArrayVy$63_SiGz_SitF'
public func inlinearray_inc_by_one_spl(_ v: inout InlineArray<64, Int>, _ n: Int) {

View File

@@ -1,4 +1,4 @@
// RUN: %target-sil-opt -sil-print-types -enforce-exclusivity=none -enable-sil-verify-all %s -licm | %FileCheck %s
// RUN: %target-sil-opt -sil-print-types -enforce-exclusivity=none -enable-sil-verify-all %s -loop-invariant-code-motion | %FileCheck %s
// REQUIRES: swift_in_compiler
@@ -182,13 +182,14 @@ bb4:
sil [_semantics "array.get_count"] @getCount : $@convention(method) (@guaranteed Array<Int>) -> Int
sil @user : $@convention(thin) (Int) -> ()
// CHECK-LABEL: sil @dont_hoist_get_count_on_low_level_sil
// CHECK: {{^}}bb1:
// CHECK-LABEL: sil @hoist_get_count_on_low_level_sil
// CHECK: {{^}}bb0(%0 : $Array<Int>):
// CHECK: apply
// CHECK: {{^}}bb1:
// CHECK: apply
// CHECK: {{^}}bb2:
// CHECK: return
sil @dont_hoist_get_count_on_low_level_sil : $@convention(thin) (@guaranteed Array<Int>) -> () {
sil @hoist_get_count_on_low_level_sil : $@convention(thin) (@guaranteed Array<Int>) -> () {
bb0(%0 : $Array<Int>):
br bb1
@@ -943,12 +944,9 @@ bb5:
return %99 : $()
}
// Test load splitting with a loop-invariant stored value. The loop
// Test load splitting with a loop-invariant. The loop
// will be empty after combined load/store hoisting/sinking.
//
// TODO: sink a struct_extract (or other non-side-effect instructions)
// with no uses in the loop.
//
// CHECK-LABEL: sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: [[STOREDVAL:%.*]] = struct_extract %0 : $Int64, #Int64._value
@@ -984,12 +982,12 @@ bb2:
bb3:
%result = tuple (%val1 : $Index, %val2 : $Int64, %val3 : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
}
} // end sil function 'testLoadSplit'
// Test load splitting with a loop-varying stored value.
// CHECK-LABEL: sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: br bb1(%4 : $Int64)
// CHECK: br bb1(%{{.*}} : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64)
// CHECK-NEXT: [[EXTRACT:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value
@@ -1030,7 +1028,7 @@ bb2:
bb3:
%result = tuple (%outerVal : $Index, %middleVal : $Int64, %innerVal : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
}
} // end sil function 'testLoadSplitPhi'
struct State {
@_hasStorage var valueSet: (Int64, Int64, Int64) { get set }
@@ -1045,11 +1043,12 @@ struct State {
// CHECK: bb0(%0 : $Builtin.RawPointer):
// CHECK: [[HOISTADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// ...Preload stored element #1
// CHECK: tuple_element_addr
// CHECK: [[PRELOADADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1
// CHECK: [[PRELOAD:%.*]] = load [[PRELOADADR]] : $*Int64
// ...Split element 0
// CHECK: [[SPLIT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// CHECK: [[ELT0:%.*]] = load [[SPLIT0]] : $*Int64
// CHECK: [[ELT0:%.*]] = load [[HOISTADR]] : $*Int64
// CHECK: [[HOISTVAL:%.*]] = struct_extract [[ELT0]] : $Int64, #Int64._value
// ...Split element 2
// CHECK: [[SPLIT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2
// CHECK: [[ELT2:%.*]] = load [[SPLIT2]] : $*Int64
@@ -1057,8 +1056,6 @@ struct State {
// CHECK: [[SINGLEADR:%.*]] = struct_element_addr %{{.*}} : $*State, #State.singleValue
// CHECK: [[SINGLEVAL:%.*]] = load [[SINGLEADR]] : $*Int64
// ...Hoisted element 0
// CHECK: [[HOISTLOAD:%.*]] = load [[HOISTADR]] : $*Int64
// CHECK: [[HOISTVAL:%.*]] = struct_extract [[HOISTLOAD]] : $Int64, #Int64._value
// CHECK: br bb1([[PRELOAD]] : $Int64)
// ...Loop
// CHECK: bb1([[PHI:%.*]] : $Int64):
@@ -1166,7 +1163,6 @@ bb3:
// CHECK: [[ELT_1a:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1
// CHECK: [[ELT_1_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 0
// CHECK: [[V_1_0:%.*]] = load [[ELT_1_0]] : $*Int64
// CHECK: [[ELT_1b:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1
// CHECK: [[ELT_1_1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 1
// CHECK: [[V_1_1:%.*]] = load [[ELT_1_1]] : $*Int64
// CHECK: br bb1([[V_0:%.*]] : $Int64, [[V_1_0]] : $Int64)
@@ -1241,25 +1237,24 @@ bb3:
return %val1 : $(Int64, (Int64, Int64))
}
// Two wide loads. The first can be successfully split and the second
// half hoisted. The second cannot be split because of a pointer
// cast. Make sure two remaining loads and the store are still in the loop.
// Split two wide loads.
//
// CHECK-LABEL: sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
//
// The first load was split, so one half is hoisted.
// CHECK: [[V1:%.*]] = load %{{.*}} : $*Int64
// CHECK: br bb1
// CHECK: bb1:
// CHECK: [[V0:%.*]] = load %{{.*}} : $*Int64
// CHECK: [[INNER:%.*]] = tuple ([[V0]] : $Int64, [[V1]] : $Int64)
// CHECK: store %0 to %{{.*}} : $*Int64
// CHECK: [[OUTER:%.*]] = load %{{.*}} : $*(Int64, (Int64, Int64))
// CHECK: [[ADDR1:%.*]] = tuple_element_addr %{{.*}}, 0
// CHECK: [[V1:%.*]] = load [[ADDR1]] : $*Int64
// CHECK: [[ADDR0:%.*]] = tuple_element_addr %{{.*}}, 1
// CHECK: [[V0:%.*]] = load [[ADDR0]] : $*Int64
// CHECK: [[OUTER:%.*]] = tuple (%{{.*}} : $Int64, %{{.*}} : $(Int64, Int64))
// CHECK: br bb1([[V1]] : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK: [[INNER:%.*]] = tuple ([[PHI]] : $Int64, [[V0]] : $Int64)
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1
// CHECK: br bb1(%0 : $Int64)
// CHECK: bb3:
// CHECK: store %0 to [[ADDR1]] : $*Int64
// CHECK: [[RESULT:%.*]] = tuple ([[OUTER]] : $(Int64, (Int64, Int64)), [[INNER]] : $(Int64, Int64))
// CHECK: return [[RESULT]] : $((Int64, (Int64, Int64)), (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testSplitNonStandardProjection'
@@ -1291,11 +1286,11 @@ bb3:
// CHECK: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer):
// CHECK: [[ELT_1:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 1
// CHECK: [[V1:%.*]] = load %4 : $*(Int64, Int64)
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0
// CHECK: [[V0:%.*]] = load %6 : $*Int64
// CHECK: [[ARG0:%.*]] = tuple (%0 : $Int64, %0 : $Int64)
// CHECK: [[ARG0_0:%.*]] = tuple_extract %8 : $(Int64, Int64), 0
// CHECK: [[ARG1:%.*]] = tuple (%1 : $Int64, %1 : $Int64)
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0
// CHECK: [[V0:%.*]] = load %9 : $*Int64
// CHECK: [[ARG0_0:%.*]] = tuple_extract [[ARG0]] : $(Int64, Int64), 0
// CHECK: br bb1([[V1]] : $(Int64, Int64))
// CHECK: bb1([[PHI:%.*]] : $(Int64, Int64)):
// CHECK: [[LOOPVAL:%.*]] = tuple ([[V0]] : $Int64, [[PHI]] : $(Int64, Int64))
@@ -1343,8 +1338,10 @@ class C {}
// This won't be hoisted because we can't find a base to check if it is invariant
// CHECK-LABEL: sil @testLoopInvariantStoreNoBase1 :
// CHECK: bb3(%11 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: bb6:
// CHECK-NOT: store
// CHECK: store
// CHECK-LABEL: } // end sil function 'testLoopInvariantStoreNoBase1'
sil @testLoopInvariantStoreNoBase1 : $@convention(thin) (Builtin.BridgeObject, Double) -> () {
bb0(%0 : $Builtin.BridgeObject, %1 : $Double):
@@ -1380,8 +1377,10 @@ bb6:
// This won't be hoisted because we can't find a base to check if it is invariant
// CHECK-LABEL: sil @testLoopInvariantStoreNoBase2 :
// CHECK: bb3(%11 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: bb6:
// CHECK-NOT: store
// CHECK: store
// CHECK-LABEL: } // end sil function 'testLoopInvariantStoreNoBase2'
sil @testLoopInvariantStoreNoBase2 : $@convention(thin) (Builtin.BridgeObject, Double) -> () {
bb0(%0 : $Builtin.BridgeObject, %1 : $Double):
@@ -1561,7 +1560,7 @@ bb3:
// CHECK-LABEL: sil [ossa] @hoist_trivial_load :
// CHECK: load [trivial]
// CHECK-NEXT: br bb1
// CHECK-NEXT: br bb1
// CHECK: } // end sil function 'hoist_trivial_load'
sil [ossa] @hoist_trivial_load : $@convention(thin) (@inout Int) -> () {
bb0(%0 : $*Int):
@@ -1657,7 +1656,7 @@ bb3:
// CHECK-LABEL: } // end sil function 'store_of_optional_none'
sil [ossa] @store_of_optional_none : $@convention(thin) () -> () {
bb0:
%0 = enum $Optional<String>, #Optional.none!enumelt
%0 = enum $Optional<String>, #Optional.none!enumelt
%1 = alloc_stack $Optional<String>
store %0 to [init] %1
br bb1
@@ -1733,5 +1732,3 @@ bb3:
%13 = tuple ()
return %13
}

View File

@@ -1,4 +1,4 @@
// RUN: %target-sil-opt -sil-print-types -enable-sil-verify-all %s -compute-side-effects -licm | %FileCheck %s
// RUN: %target-sil-opt -sil-print-types -enable-sil-verify-all %s -compute-side-effects -loop-invariant-code-motion | %FileCheck %s
// REQUIRES: swift_in_compiler

View File

@@ -1,4 +1,4 @@
// RUN: %target-sil-opt -sil-print-types -enforce-exclusivity=checked -enable-sil-verify-all %s -licm | %FileCheck %s
// RUN: %target-sil-opt -sil-print-types -enforce-exclusivity=checked -enable-sil-verify-all %s -loop-invariant-code-motion | %FileCheck %s
sil_stage canonical

View File

@@ -1,17 +1,9 @@
// RUN: %target-swift-frontend -O -enforce-exclusivity=checked -Xllvm -sil-print-types -emit-sil -Xllvm -debug-only=sil-licm -primary-file %s 2>&1 | %FileCheck %s --check-prefix=TESTLICM
// RUN: %target-swift-frontend -O -enforce-exclusivity=checked -Xllvm -sil-print-types -emit-sil -Xllvm -debug-only=sil-licm -primary-file %s 2>&1 | %FileCheck %s --check-prefix=TESTLICM2
// RUN: %target-swift-frontend -O -enforce-exclusivity=checked -Xllvm -sil-print-types -emit-sil -primary-file %s | %FileCheck %s --check-prefix=TESTSIL
// RUN: %target-swift-frontend -O -enforce-exclusivity=checked -Xllvm -sil-print-types -emit-sil -Xllvm -debug-only=sil-licm -whole-module-optimization %s 2>&1 | %FileCheck %s --check-prefix=TESTLICMWMO
// RUN: %target-swift-frontend -O -enforce-exclusivity=checked -Xllvm -sil-print-types -emit-sil -whole-module-optimization %s | %FileCheck %s --check-prefix=TESTSILWMO
// REQUIRES: optimized_stdlib,asserts,swift_stdlib_no_asserts
// REQUIRES: PTRSIZE=64
// TESTLICM-LABEL: Processing loops in {{.*}}run_ReversedArray{{.*}}
// TESTLICM: Hoist and Sink pairs attempt
// TESTLICM: Hoisted
// TESTLICM: Successfully hoisted and sank pair
// TESTSIL-LABEL: sil hidden @$s16licm_exclusivity17run_ReversedArrayyySiF : $@convention(thin) (Int) -> () {
// TESTSIL: bb
// TESTSIL: begin_access [modify] [dynamic] [no_nested_conflict]
@@ -35,10 +27,6 @@ func run_ReversedArray(_ N: Int) {
}
}
// TESTLICM2-LABEL: Processing loops in {{.*}}count_unicodeScalars{{.*}}
// TESTLICM2: Hoist and Sink pairs attempt
// TESTLICM2: Hoisted
// TESTSIL-LABEL: sil @$s16licm_exclusivity20count_unicodeScalarsyySS17UnicodeScalarViewVF : $@convention(thin) (@guaranteed String.UnicodeScalarView) -> () {
// TESTSIL: bb0(%0 : $String.UnicodeScalarView)
// TESTSIL: bb5:
@@ -66,12 +54,6 @@ public class ClassWithArrs {
B = [Int](repeating: 0, count: N)
}
// TESTLICMWMO-LABEL: Processing loops in {{.*}}ClassWithArrsC7readArr{{.*}}
// TESTLICMWMO: Hoist and Sink pairs attempt
// TESTLICMWMO: Hoisted
// TESTLICMWMO: Successfully hoisted and sank pair
// TESTLICMWMO: Hoisted
// TESTLICMWMO: Successfully hoisted and sank pair
// TESTSILWMO-LABEL: sil {{.*}}@$s16licm_exclusivity13ClassWithArrsC7readArryyF : $@convention(method) (@guaranteed ClassWithArrs) -> () {
// TESTSILWMO: [[R1:%.*]] = ref_element_addr %0 : $ClassWithArrs, #ClassWithArrs.A
// TESTSILWMO: [[R2:%.*]] = ref_element_addr %0 : $ClassWithArrs, #ClassWithArrs.B

View File

@@ -1,4 +1,4 @@
// RUN: %target-sil-opt -sil-print-types -enable-sil-verify-all %s -licm | %FileCheck %s
// RUN: %target-sil-opt -sil-print-types -enable-sil-verify-all %s -loop-invariant-code-motion | %FileCheck %s
// REQUIRES: PTRSIZE=64
// REQUIRES: OS=macosx