Files
swift-mirror/test/SILOptimizer/simplify_cfg_address_phi.sil
Andrew Trick b1fbc2b389 Rewrite SimplifyCFG's trampoline removal.
Avoid introducing new critical edges. Passes will end up resplitting
them, forcing SimplifyCFG to continually rerun. Also, we want to allow
SIL utilities to assume no critical edges, and avoid the need for
several passes to internally split edges and modify the CFG for no
reason.

Also, handle arbitrary block arguments which may be trivial and
unrelated to the real optimizations that trampoline removal exposes,
such as "unwrapping" enumeration-type arguments.

The previous code was an example of how to write an unstable
optimization. It could be defeated by other code in the function that
isn't directly related to the SSA graph being optimized. In general,
when an optimization can be defeated by unrelated code in the
function, that leads to instability which can be very hard to track
down (I spent multiple full days on this one). In this case, we have
enum-type branch args which need to be simplified by unwrapping
them. But, the existence of a trivial and entirely unrelated block
argument would defeat the optimization.
2020-10-29 15:25:20 -07:00

252 lines
8.5 KiB
Plaintext

// RUN: %target-sil-opt -enable-objc-interop -enable-sil-verify-all %s -jumpthread-simplify-cfg | %FileCheck %s
//
// Test SimplifyCFG's handling of address-type phis. In other words, makes sure it doesn't create them at all!
sil_stage canonical
import Builtin
import Swift
import SwiftShims
class C {
@_hasStorage @_hasInitialValue var field: Int { get set }
@objc deinit
init()
}
sil @getC : $@convention(thin) () -> C
// Test that jump threading sinks a ref_element_addr, generating a
// non-address phi for its operand.
//
// The retain on separate paths followed by a merged release, and
// target block with a conditional branch are necessary just to get
// jump threading to kick in.
//
// CHECK-LABEL: sil @testJumpThreadRefEltLoop : $@convention(thin) () -> () {
// CHECK: bb0
// CHECK: function_ref @getC : $@convention(thin) () -> C
// CHECK: cond_br undef, bb1, bb2
// CHECK: bb1:
// CHECK: [[C1:%.*]] = apply %0() : $@convention(thin) () -> C
// CHECK: strong_retain [[C1]] : $C
// CHECK: strong_release [[C1]] : $C
// CHECK: br bb3([[C1]] : $C)
// CHECK: bb2:
// CHECK: [[C2:%.*]] = apply %0() : $@convention(thin) () -> C
// CHECK: strong_retain [[C2]] : $C
// CHECK: strong_release [[C2]] : $C
// CHECK: br bb3([[C2]] : $C)
// CHECK: bb3([[PHI:%.*]] : $C):
// CHECK: [[ADR:%.*]] = ref_element_addr [[PHI]] : $C, #C.field
// CHECK: begin_access [read] [dynamic] [[ADR]] : $*Int
// CHECK: load
// CHECK: end_access
// CHECK-LABEL: } // end sil function 'testJumpThreadRefEltLoop'
sil @testJumpThreadRefEltLoop : $@convention(thin) () -> () {
bb0:
%f = function_ref @getC : $@convention(thin) () -> C
cond_br undef, bb1, bb2
bb1:
%c1 = apply %f() : $@convention(thin) ()->C
strong_retain %c1 : $C
br bb3(%c1 : $C)
bb2:
%c2 = apply %f() : $@convention(thin) ()->C
strong_retain %c2 : $C
br bb3(%c2 : $C)
bb3(%arg : $C):
strong_release %arg : $C
%18 = ref_element_addr %arg : $C, #C.field
br bb4
bb4:
%19 = begin_access [read] [dynamic] %18 : $*Int
%20 = load %19 : $*Int
end_access %19 : $*Int
cond_br undef, bb4, bb5
bb5:
%z = tuple ()
return %z : $()
}
// Test that jump threading sinks a
// ref_tail_addr->index_addr->struct_element_addr chain and generates
// a phi for the index_addr's index operand.
//
// The retain on separate paths followed by a merged release, and
// target block with a conditional branch are necessary just to get
// jump threading to kick in.
//
// CHECK-LABEL: sil @testJumpThreadIndex : $@convention(thin) (__ContiguousArrayStorageBase, Builtin.Int64) -> Builtin.Int32 {
// CHECK: bb0(%0 : $__ContiguousArrayStorageBase, %1 : $Builtin.Int64):
// CHECK: cond_br undef, bb2, bb1
// CHECK: bb1:
// CHECK: apply
// CHECK: strong_retain
// CHECK: strong_release
// CHECK: [[IDX2:%.*]] = builtin "truncOrBitCast_Int64_Word"(%1 : $Builtin.Int64) : $Builtin.Word
// CHECK: br bb3([[IDX2]] : $Builtin.Word)
// CHECK: bb2:
// CHECK: apply
// CHECK: strong_retain
// CHECK: strong_release
// CHECK: [[IDX1:%.*]] = builtin "truncOrBitCast_Int64_Word"(%1 : $Builtin.Int64) : $Builtin.Word
// CHECK: br bb3([[IDX1]] : $Builtin.Word)
// CHECK: bb3([[PHI:%.*]] : $Builtin.Word):
// CHECK: [[TAIL:%.*]] = ref_tail_addr %0 : $__ContiguousArrayStorageBase, $Int32
// CHECK: [[ELT:%.*]] = index_addr [[TAIL]] : $*Int32, %14 : $Builtin.Word
// CHECK: [[ADR:%.*]] = struct_element_addr [[ELT]] : $*Int32, #Int32._value
// CHECK: load [[ADR]] : $*Builtin.Int32
// CHECK: cond_br undef, bb4, bb5
// CHECK-LABEL: } // end sil function 'testJumpThreadIndex'
sil @testJumpThreadIndex : $@convention(thin) (__ContiguousArrayStorageBase, Builtin.Int64) -> Builtin.Int32 {
bb0(%0 : $__ContiguousArrayStorageBase, %1 : $Builtin.Int64):
%f = function_ref @getC : $@convention(thin) () -> C
cond_br undef, bb1, bb2
bb1:
%c1 = apply %f() : $@convention(thin) ()->C
strong_retain %c1 : $C
br bb3(%c1 : $C)
bb2:
%c2 = apply %f() : $@convention(thin) ()->C
strong_retain %c2 : $C
br bb3(%c2 : $C)
bb3(%arg : $C):
strong_release %arg : $C
%tail = ref_tail_addr %0 : $__ContiguousArrayStorageBase, $Int32
%idx = builtin "truncOrBitCast_Int64_Word"(%1 : $Builtin.Int64) : $Builtin.Word
%elt = index_addr %tail : $*Int32, %idx : $Builtin.Word
%adr = struct_element_addr %elt : $*Int32, #Int32._value
br bb4
bb4:
%val = load %adr : $*Builtin.Int32
cond_br undef, bb4, bb5
bb5:
return %val : $Builtin.Int32
}
// Test that debug_value_addr is not unnecessarilly lost during address projection sinking.
public class CC<R> {
let r : R
init(_ _r: R) { r = _r }
}
sil @useAny : $@convention(thin) <V> (@in_guaranteed V) -> ()
// CHECK-LABEL: sil @testDebugValue : $@convention(method) <R><S> (@in_guaranteed S, @guaranteed CC<R>, Bool) -> () {
// CHECK: debug_value_addr %0 : $*S, let, name "u"
// CHECK: apply %{{.*}}<S>(%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
// CHECK: [[FIELD:%.*]] = ref_element_addr %1 : $CC<R>, #CC.r
// CHECK: debug_value_addr [[FIELD]] : $*R, let, name "u"
// CHECK: apply %{{.*}}<R>([[FIELD]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
// CHECK-LABEL: } // end sil function 'testDebugValue'
sil @testDebugValue : $@convention(method) <R><S> (@in_guaranteed S, @guaranteed CC<R>, Bool) -> () {
bb0(%0 : $*S, %1 : $CC<R>, %2 : $Bool):
%bool = struct_extract %2 : $Bool, #Bool._value
cond_br %bool, bb1, bb2
bb1:
debug_value_addr %0 : $*S, let, name "u"
%f1 = function_ref @useAny : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
%call1 = apply %f1<S>(%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
br bb2
bb2:
%field = ref_element_addr %1 : $CC<R>, #CC.r
debug_value_addr %field : $*R, let, name "t"
cond_br %bool, bb3, bb4
bb3:
debug_value_addr %field : $*R, let, name "u"
%f2 = function_ref @useAny : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
%call2 = apply %f2<R>(%field) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> ()
br bb4
bb4:
%z = tuple ()
return %z : $()
}
// Test multiple uses and cloned allocation.
//
// project_box and struct_extract_addr will be sunk into three
// different blocks, but only once per block.
struct S {
@_hasStorage @_hasInitialValue var x: Int { get set }
init(x: Int = 0)
init()
}
sil @doNothing : $@convention(thin) (@inout Int) -> ()
// CHECK-LABEL: sil @testMultiUse : $@convention(method) (Bool, @inout Int) -> () {
// CHECK: bb0(%0 : $Bool, %1 : $*Int):
// CHECK: cond_br %{{.*}}, bb1, bb2
// CHECK: bb1:
// CHECK: apply %{{.*}}(%1) : $@convention(thin) (@inout Int) -> ()
// CHECK: [[ALLOC2:%.*]] = alloc_box ${ var S }, var, name "s"
// CHECK: [[PROJ2:%.*]] = project_box [[ALLOC2]] : ${ var S }, 0
// CHECK: [[ADR2:%.*]] = struct_element_addr [[PROJ2]] : $*S, #S.x
// CHECK: store %{{.*}} to [[ADR2]] : $*Int
// CHECK: apply %{{.*}}([[ADR2]]) : $@convention(thin) (@inout Int) -> ()
// CHECK: br bb3([[ALLOC2]] : ${ var S })
// CHECK: bb2:
// CHECK: [[ALLOC1:%.*]] = alloc_box ${ var S }, var, name "s"
// CHECK: [[PROJ1:%.*]] = project_box [[ALLOC1]] : ${ var S }, 0
// CHECK: [[ADR1:%.*]] = struct_element_addr [[PROJ1]] : $*S, #S.x
// CHECK: store %{{.*}} to [[ADR1]] : $*Int
// CHECK: br bb3([[ALLOC1]] : ${ var S })
// CHECK: bb3([[BOXARG:%.*]] : ${ var S }):
// CHECK: [[PROJ3:%.*]] = project_box [[BOXARG]] : ${ var S }, 0
// CHECK: [[ADR3:%.*]] = struct_element_addr [[PROJ3]] : $*S, #S.x
// CHECK: apply %{{.*}}([[ADR3]]) : $@convention(thin) (@inout Int) -> ()
// CHECK: release_value [[BOXARG]] : ${ var S }
// CHECK-LABEL: } // end sil function 'testMultiUse'
sil @testMultiUse : $@convention(method) (Bool, @inout Int) -> () {
bb0(%0 : $Bool, %1 : $*Int):
%bool = struct_extract %0 : $Bool, #Bool._value
cond_br %bool, bb1, bb2
bb1:
%f1 = function_ref @doNothing : $@convention(thin) (@inout Int) -> ()
%call1 = apply %f1(%1) : $@convention(thin) (@inout Int) -> ()
br bb3
bb2:
br bb3
bb3:
%box3 = alloc_box ${ var S }, var, name "s"
%proj3 = project_box %box3 : ${ var S }, 0
%adr3 = struct_element_addr %proj3 : $*S, #S.x
cond_br %bool, bb4, bb5
bb4:
%i4 = load %1 : $*Int
store %i4 to %adr3 : $*Int
%f2 = function_ref @doNothing : $@convention(thin) (@inout Int) -> ()
%call2 = apply %f2(%adr3) : $@convention(thin) (@inout Int) -> ()
br bb6
bb5:
%i5 = load %1 : $*Int
store %i5 to %adr3 : $*Int
br bb6
bb6:
%f6 = function_ref @doNothing : $@convention(thin) (@inout Int) -> ()
%call6 = apply %f6(%adr3) : $@convention(thin) (@inout Int) -> ()
release_value %box3 : ${ var S }
%z = tuple ()
return %z : $()
}