Files
swift-mirror/test/SILOptimizer/licm.sil
Erik Eckstein 18063707b5 Optimizer: enable complete OSSA lifetimes throughout the pass pipeline
This new OSSA invariant simplifies many optimizations because they don't have to take care of the corner case of incomplete lifetimes in dead-end blocks.

The implementation basically consists of these changes:
* add the lifetime completion utility
* add a flag in SILFunction which tells optimization that they need to run the lifetime completion utility
* let all optimizations complete lifetimes if necessary
* enable the ownership verifier to check complete lifetimes
2026-01-22 17:41:48 +01:00

2265 lines
72 KiB
Plaintext

// RUN: %target-sil-opt -sil-print-types -enforce-exclusivity=none -enable-sil-verify-all %s -loop-invariant-code-motion | %FileCheck %s
// REQUIRES: swift_in_compiler
// Declare this SIL to be canonical because some tests break raw SIL
// conventions. e.g. address-type block args. -enforce-exclusivity=none is also
// required to allow address-type block args in canonical SIL.
sil_stage canonical
import Builtin
import Swift
class Storage {
init()
}
struct NonCopyable : ~Copyable {
var x: Int
}
struct S {
var i: Int
var s: String
}
struct S2 {
var i: Int
var s1: String
var s2: String
}
struct Pair {
var t: (a: Int, b: Int)
}
// globalArray
sil_global @globalArray : $Storage
// CHECK-LABEL: @memset
// CHECK: bb0
// CHECK: load %0
// CHECK: br bb2
// CHECK: bb2({{.*}}):
// CHECK-NOT: load
// CHECK: cond_br
sil @memset : $@convention(thin) (@inout Builtin.NativeObject, Int) -> () {
bb0(%0 : $*Builtin.NativeObject, %1 : $Int):
%5 = integer_literal $Builtin.Int1, -1
%46 = integer_literal $Builtin.Word, 0
br bb2(%46 : $Builtin.Word)
bb1:
%52 = tuple ()
return %52 : $()
bb2(%54 : $Builtin.Word):
%55 = integer_literal $Builtin.Word, 1
%57 = builtin "sadd_with_overflow_Word"(%54 : $Builtin.Word, %55 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%58 = tuple_extract %57 : $(Builtin.Word, Builtin.Int1), 0
%59 = load %0 : $*Builtin.NativeObject
%60 = integer_literal $Builtin.Word, 100
%96 = ref_to_raw_pointer %59 : $Builtin.NativeObject to $Builtin.RawPointer
%97 = index_raw_pointer %96 : $Builtin.RawPointer, %58 : $Builtin.Word
%98 = pointer_to_address %97 : $Builtin.RawPointer to [strict] $*Int
%99 = index_addr %98 : $*Int, %54 : $Builtin.Word
fix_lifetime %59: $Builtin.NativeObject
store %1 to %99 : $*Int
%101 = builtin "cmp_eq_Word"(%58 : $Builtin.Word, %60 : $Builtin.Word) : $Builtin.Int1
cond_br %101, bb1, bb2(%58 : $Builtin.Word)
}
// CHECK-LABEL: @must_move_condfail
// CHECK: bb0
// CHECK: load %0
// CHECK: cond_fail
// CHECK: [[INVARIANTADDR:%.*]] = index_addr
// CHECK: load [[INVARIANTADDR]]
// CHECK: br bb2
// CHECK: bb2({{.*}}):
// The address computation of the load was guarded by the cond_fail. If we hoist
// the load we must also hoist the cond_fail.
// CHECK-NOT: cond_fail
// CHECK-NOT: load
// CHECK: cond_br
sil @must_move_condfail : $@convention(thin) (@inout Builtin.NativeObject, Int, Builtin.Word) -> () {
bb0(%0 : $*Builtin.NativeObject, %1 : $Int, %2: $Builtin.Word):
%5 = integer_literal $Builtin.Int1, -1
%6 = load %0 : $*Builtin.NativeObject
%46 = integer_literal $Builtin.Word, 0
br bb2(%46 : $Builtin.Word)
bb1:
%102 = tuple ()
return %102 : $()
bb2(%48 : $Builtin.Word):
%51 = builtin "sadd_with_overflow_Word"(%2 : $Builtin.Word, %46 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%52 = tuple_extract %51 : $(Builtin.Word, Builtin.Int1), 0
%53 = tuple_extract %51 : $(Builtin.Word, Builtin.Int1), 1
cond_fail %53 : $Builtin.Int1
%55 = integer_literal $Builtin.Word, 1
%57 = builtin "sadd_with_overflow_Word"(%48 : $Builtin.Word, %55 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%58 = tuple_extract %57 : $(Builtin.Word, Builtin.Int1), 0
%60 = integer_literal $Builtin.Word, 100
%61 = unchecked_ref_cast %6 : $Builtin.NativeObject to $C
%62 = ref_tail_addr %61 : $C, $Builtin.NativeObject
%63 = index_addr %62 : $*Builtin.NativeObject, %52 : $Builtin.Word
%64 = load %63 : $*Builtin.NativeObject
%96 = unchecked_ref_cast %64 : $Builtin.NativeObject to $Storage
%97 = ref_tail_addr %96 : $Storage, $Int
%99 = index_addr %97 : $*Int, %48 : $Builtin.Word
store %1 to %99 : $*Int
%101 = builtin "cmp_eq_Word"(%58 : $Builtin.Word, %60 : $Builtin.Word) : $Builtin.Int1
cond_br %101, bb1, bb2(%58 : $Builtin.Word)
}
// CHECK-LABEL: sil @hoist_outer_loop
// CHECK: bb0([[ADDR:%.*]] : $*Builtin.Int1
// CHECK: load [[ADDR]]
// CHECK: integer_literal $Builtin.Word, 101
// CHECK: br bb1
// CHECK: return
sil @hoist_outer_loop : $@convention(thin) (@inout Builtin.Int1, Int) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $Int):
%2 = integer_literal $Builtin.Int1, -1
%3 = integer_literal $Builtin.Word, 0
br bb1
// Outer loop.
bb1:
%5 = load %0 : $*Builtin.Int1
%6 = integer_literal $Builtin.Word, 101
cond_br %5, bb2, bb3
bb2:
cond_br %5, bb4, bb1
// Inner loop.
bb3:
cond_br %5, bb2, bb3
bb4:
%10 = tuple ()
return %10 : $()
}
// CHECK-LABEL: sil @dont_hoist_outer_loop
// CHECK: bb0([[ADDR:%.*]] : $*Builtin.Int1
// CHECK: integer_literal $Builtin.Word, 101
// CHECK: br bb1
// CHECK: bb1:
// CHECK: load [[ADDR]]
// CHECK: return
sil @dont_hoist_outer_loop : $@convention(thin) (@inout Builtin.Int1, Int) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $Int):
%2 = integer_literal $Builtin.Int1, -1
%3 = integer_literal $Builtin.Word, 0
br bb1
// Outer loop.
bb1:
%5 = load %0 : $*Builtin.Int1
%6 = integer_literal $Builtin.Word, 101
cond_br %5, bb2, bb3
bb2:
cond_br %5, bb4, bb1
// Inner loop.
bb3:
store %2 to %0 : $*Builtin.Int1
cond_br %5, bb2, bb3
bb4:
%10 = tuple ()
return %10 : $()
}
sil [_semantics "array.get_count"] @getCount : $@convention(method) (@guaranteed Array<Int>) -> Int
sil @user : $@convention(thin) (Int) -> ()
// CHECK-LABEL: sil @hoist_get_count_on_low_level_sil
// CHECK: {{^}}bb0(%0 : $Array<Int>):
// CHECK: apply
// CHECK: {{^}}bb1:
// CHECK: apply
// CHECK: {{^}}bb2:
// CHECK: return
sil @hoist_get_count_on_low_level_sil : $@convention(thin) (@guaranteed Array<Int>) -> () {
bb0(%0 : $Array<Int>):
br bb1
bb1:
%f1 = function_ref @getCount : $@convention(method) (@guaranteed Array<Int>) -> Int
%f2 = function_ref @user : $@convention(thin) (Int) -> ()
%c1 = apply %f1(%0) : $@convention(method) (@guaranteed Array<Int>) -> Int
%c2 = apply %f2(%c1) : $@convention(thin) (Int) -> ()
cond_br undef, bb1, bb2
bb2:
%r1 = tuple ()
return %r1 : $()
}
sil @use_addr : $@convention(thin) (@inout Int32) -> ()
// CHECK-LABEL: sil @dont_hoist_aliased_stack_location
// CHECK: {{^}}bb0
// CHECK-NOT: load
// CHECK: {{^}}bb1:
// CHECK: store
// CHECK: apply
// CHECK: {{^}}bb2:
// CHECK: return
sil @dont_hoist_aliased_stack_location : $@convention(thin) (Int32) -> () {
bb0(%0 : $Int32):
%313 = alloc_stack $Int32
br bb1
bb1:
store %0 to %313 : $*Int32
%f = function_ref @use_addr : $@convention(thin) (@inout Int32) -> ()
%a = apply %f(%313) : $@convention(thin) (@inout Int32) -> ()
cond_br undef, bb1, bb2
bb2:
dealloc_stack %313 : $*Int32
%52 = tuple ()
return %52 : $()
}
public protocol P : AnyObject {
func foo() -> Int32
func boo() -> Int32
}
// Check that LICM does not hoist a metatype instruction before
// the open_existential instruction which creates the archetype,
// because this would break the dominance relation between them.
// CHECK-LABEL: sil @dont_hoist_metatype
// CHECK-NOT: metatype
// CHECK-NOT: witness_method
// CHECK: bb1({{%.*}} : $any P)
// CHECK-NOT: metatype
// CHECK-NOT: witness_method
// CHECK: open_existential_ref
// CHECK: metatype
// CHECK: witness_method
// CHECK: cond_br
sil @dont_hoist_metatype : $@convention(thin) (@inout Builtin.Int1, @owned P) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $P):
br bb1(%1 : $P)
// Loop
bb1(%existential : $P):
%2 = open_existential_ref %existential : $P to $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60", P) Self
%3 = metatype $@thick (@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60", P) Self).Type
%4 = witness_method $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60", P) Self, #P.foo, %2 : $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60", P) Self : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@guaranteed τ_0_0) -> Int32
%5 = apply %4<@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60", P) Self>(%2) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@guaranteed τ_0_0) -> Int32
%6 = load %0 : $*Builtin.Int1
cond_br %6, bb3, bb1(%existential : $P)
bb3:
br bb4
bb4:
strong_release %1 : $P
%10 = tuple ()
return %10 : $()
}
// CHECK-LABEL: dont_hoist_existential_meta_type
// CHECK: bb0({{.*}}:
// CHECK-NOT: existential_metatype
// CHECK: bb1:
// CHECK: existential_metatype
// CHECK: cond_br
// CHECK: bb2:
sil @dont_hoist_existential_meta_type : $@convention(thin) (@in P) -> () {
bb0(%0 : $*P):
%1 = alloc_stack $P
br bb1
bb1:
copy_addr %0 to [init] %1 : $*P
%2 = existential_metatype $@thick P.Type, %1 : $*P
cond_br undef, bb1, bb2
bb2:
dealloc_stack %1 : $*P
destroy_addr %0 : $*P
%52 = tuple ()
return %52 : $()
}
sil @get_unknown_value : $@convention(thin) () -> Builtin.Int32
sil @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
sil @callee : $@convention(thin) (@inout Builtin.Int32) -> () {
bb0(%0 : $*Builtin.Int32):
%1 = function_ref @get_unknown_value : $@convention(thin) () -> Builtin.Int32
%2 = apply %1() : $@convention(thin) () -> Builtin.Int32
store %2 to %0 : $*Builtin.Int32
%9999 = tuple()
return %9999 : $()
}
sil @use_value : $@convention(thin) (Builtin.Int32) -> ()
// Check if escape analysis figures out that the alloc_stack escapes to callee.
//
// CHECK-LABEL: sil @dont_hoist_aliased_load
// CHECK: bb2:
// CHECK-NEXT: apply
// CHECK-NEXT: load
// CHECK-NEXT: apply
sil @dont_hoist_aliased_load : $@convention(thin) () -> () {
bb0:
%0 = alloc_stack $Builtin.Int32
%1 = integer_literal $Builtin.Int32, 0
%3 = function_ref @callee : $@convention(thin) (@inout Builtin.Int32) -> ()
%5 = function_ref @use_value : $@convention(thin) (Builtin.Int32) -> ()
%unknown_value_fn = function_ref @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
store %1 to %0 : $*Builtin.Int32
br bb1
bb1:
br bb2
bb2:
apply %3(%0) : $@convention(thin) (@inout Builtin.Int32) -> ()
%4 = load %0 : $*Builtin.Int32
%6 = apply %unknown_value_fn() : $@convention(thin) () -> Builtin.Int32
%33 = builtin "cmp_eq_Int32"(%4 : $Builtin.Int32, %6 : $Builtin.Int32) : $Builtin.Int1
cond_br %33, bb2, bb3
bb3:
%9999 = tuple()
dealloc_stack %0 : $*Builtin.Int32
return %9999 : $()
}
class RefElemClass {
var x : Int32
init()
}
// Check hoisting of ref_element_addr in conditional control flow (for exclusivity)
//
// CHECK-LABEL: sil @hoist_ref_elem
// CHECK: bb0(%0 : $RefElemClass):
// CHECK-NEXT: ref_element_addr %0 : $RefElemClass, #RefElemClass.x
// CHECK-NEXT: br bb1
sil @hoist_ref_elem : $@convention(thin) (RefElemClass) -> () {
bb0(%0 : $RefElemClass):
br bb1
// loop.
bb1:
cond_br undef, bb2, bb3
bb2:
cond_br undef, bb4, bb1
bb3:
%x = ref_element_addr %0 : $RefElemClass, #RefElemClass.x
br bb1
bb4:
%10 = tuple ()
return %10 : $()
}
sil @potential_escape : $@convention(thin) (@guaranteed RefElemClass) -> ()
// CHECK-LABEL: sil @dont_hoist_begin_cow_mutation
// CHECK: bb1:
// CHECK-NEXT: begin_cow_mutation
// CHECK-NEXT: end_cow_mutation
// CHECK-NEXT: apply
sil @dont_hoist_begin_cow_mutation : $@convention(thin) (@owned RefElemClass) -> @owned RefElemClass {
bb0(%0 : $RefElemClass):
br bb1
bb1:
(%u, %m) = begin_cow_mutation %0 : $RefElemClass
%b = end_cow_mutation %m : $RefElemClass
%f = function_ref @potential_escape : $@convention(thin) (@guaranteed RefElemClass) -> ()
%a = apply %f(%b) : $@convention(thin) (@guaranteed RefElemClass) -> ()
cond_br undef, bb1, bb2
bb2:
return %b : $RefElemClass
}
// CHECK-LABEL: sil @hoist_load_and_store
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK-NOT: store
// CHECK: bb2:
// CHECK: br bb1([[V3]] : $Int32)
// CHECK: bb3:
// CHECK: store [[V3]] to %0
// CHECK: } // end sil function 'hoist_load_and_store'
sil @hoist_load_and_store : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
store %20 to %0 : $*Int32
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%12 = tuple ()
return %12 : $()
}
// Just make sure the optimizer does not crash in case the operand of the
// store is the load itself.
sil @hoist_load_and_redundant_store : $@convention(thin) (@inout Int32) -> () {
bb0(%0 : $*Int32):
br bb1
bb1:
%1 = load %0 : $*Int32
store %1 to %0 : $*Int32
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @hoist_load_and_two_stores
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK: bb2:
// CHECK-NOT: store
// CHECK: br bb4
// CHECK: bb3:
// CHECK-NOT: store
// CHECK: br bb4
// CHECK: bb4:
// CHECK: cond_br
// CHECK: bb5:
// CHECK: br bb1([[V3]] : $Int32)
// CHECK: bb6:
// CHECK: store [[V3]] to %0
// CHECK: } // end sil function 'hoist_load_and_two_stores'
sil @hoist_load_and_two_stores : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
store %20 to %0 : $*Int32
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @dont_hoist_stores_not_dominating_exit
// CHECK: bb0(%0 : $*Int32, %1 : $Int32):
// CHECK-NOT: load
// CHECK: bb1:
// CHECK: load
// CHECK: bb3:
// CHECK: store
// CHECK: bb4:
// CHECK: bb6:
// CHECK-NOT: store
// CHECK: } // end sil function 'dont_hoist_stores_not_dominating_exit'
sil @dont_hoist_stores_not_dominating_exit : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @hoist_when_store_is_in_preheader
// CHECK: bb0(%0 : $*Int32, %1 : $Int32):
// CHECK: store
// CHECK: load
// CHECK: bb1(%{{[0-9]+}} : $Int32):
// CHECK-NOT: load
// CHECK-NOT: store
// CHECK: bb4([[P:%[0-9]+]] : $Int32):
// CHECK: bb6:
// CHECK: store [[P]] to %0
// CHECK: } // end sil function 'hoist_when_store_is_in_preheader'
sil @hoist_when_store_is_in_preheader : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
store %1 to %0 : $*Int32
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @dont_sink_store_with_nonmaterializable_load_projection
// CHECK: alloc_stack
// CHECK-NOT: store
// CHECK: bb1(%8 : $Builtin.Int64, %9 : $Builtin.Int64):
// CHECK: store
// CHECK: [[INDEXADDR:%.*]] = index_addr
// CHECK: load [[INDEXADDR]]
// CHECK: bb3:
// CHECK-NOT: store
// CHECK: dealloc_stack
// CHECK: } // end sil function 'dont_sink_store_with_nonmaterializable_load_projection'
sil @dont_sink_store_with_nonmaterializable_load_projection : $@convention(method) (InlineArray<3, Int>) -> () {
bb0(%0 : $InlineArray<3, Int>):
%1 = integer_literal $Builtin.Int64, 0
%2 = integer_literal $Builtin.Int64, 1
%3 = integer_literal $Builtin.Int1, -1
%4 = struct_extract %0, #InlineArray._storage
%5 = alloc_stack $Builtin.FixedArray<3, Int>
%6 = vector_base_addr %5
br bb1(%1, %1)
bb1(%8 : $Builtin.Int64, %9 : $Builtin.Int64):
%10 = builtin "sadd_with_overflow_Int64"(%8, %2, %3) : $(Builtin.Int64, Builtin.Int1)
%11 = tuple_extract %10, 0
store %4 to %5
%13 = builtin "truncOrBitCast_Int64_Word"(%9) : $Builtin.Word
%14 = index_addr [stack_protection] %6, %13
%15 = load %14
cond_br undef, bb2, bb3
bb2:
br bb1(%1, %1)
bb3:
dealloc_stack %5
%18 = tuple ()
return %18
}
// CHECK-LABEL: sil @hoist_loads_and_stores_multiple_exits
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK: bb2:
// CHECK-NOT: store
// CHECK: cond_br undef, bb1([[V3]] : $Int32), bb3
// CHECK: bb3:
// CHECK: store [[V3]] to %0
// CHECK: br bb6
// CHECK: bb4:
// CHECK-NOT: store
// CHECK: cond_br undef, bb1([[V3]] : $Int32), bb5
// CHECK: bb5:
// CHECK: store [[V3]] to %0
// CHECK: br bb6
// CHECK: bb6:
// CHECK: } // end sil function 'hoist_loads_and_stores_multiple_exits'
sil @hoist_loads_and_stores_multiple_exits : $@convention(thin) (@inout Int32, Int32) -> () {
// %0 // users: %14, %17, %5, %2
// %1 // user: %3
bb0(%0 : $*Int32, %1 : $Int32):
%2 = struct_element_addr %0 : $*Int32, #Int32._value
%3 = struct_extract %1 : $Int32, #Int32._value // user: %9
%4 = integer_literal $Builtin.Int1, 0 // user: %9
%5 = load %0 : $*Int32 // user: %6
br bb1(%5 : $Int32) // id: %6
// %7 // user: %8
bb1(%7 : $Int32): // Preds: bb0 bb2 bb4
%8 = struct_extract %7 : $Int32, #Int32._value // user: %9
%9 = builtin "sadd_with_overflow_Int64"(%8 : $Builtin.Int32, %3 : $Builtin.Int32, %4 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1) // user: %10
%10 = tuple_extract %9 : $(Builtin.Int32, Builtin.Int1), 0 // user: %11
%11 = struct $Int32 (%10 : $Builtin.Int32) // users: %14, %17, %13, %16
cond_br undef, bb2, bb4 // id: %12
bb2: // Preds: bb1
cond_br undef, bb1(%11 : $Int32), bb3 // id: %13
bb3: // Preds: bb2
store %11 to %0 : $*Int32 // id: %14
br bb6 // id: %15
bb4: // Preds: bb1
cond_br undef, bb1(%11 : $Int32), bb5 // id: %16
bb5: // Preds: bb4
store %11 to %0 : $*Int32 // id: %17
br bb6 // id: %18
bb6: // Preds: bb3 bb5
%19 = tuple () // user: %20
return %19 : $() // id: %20
} // end sil function 'hoist_loads_and_stores'
// ==================================================================
// Test combined load/store hoisting/sinking with aliases
struct Index {
@_hasStorage var value: Int64 { get set }
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with obvious aliasing loads
// The loop contains loads and stores to the same accesspath: %3 alloc_stack -> #0 -> #0
// However, they don't share the same projection instructions.
// LICM should still hoist the loads and sink the stores in a combined transformation.
//
// CHECK-LABEL: sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store {{.*}} to %{{.*}} : $*Int64
// CHECK: load %{{.*}} : $*Int64
// CHECK: br bb1
// CHECK-NOT: {{(load|store)}}
// CHECK: bb3:
// CHECK-NOT: {{(load|store)}}
// CHECK: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingLoad'
sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 {
bb0(%0 : $Int64):
%zero = integer_literal $Builtin.Int64, 0
%intz = struct $Int64(%zero : $Builtin.Int64)
%stackAddr = alloc_stack $Index
%outerAddr1 = struct_element_addr %stackAddr : $*Index, #Index.value
store %intz to %outerAddr1 : $*Int64
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%outerAddr2 = struct_element_addr %stackAddr : $*Index, #Index.value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
%intv = struct $Int64(%zero : $Builtin.Int64)
store %intv to %outerAddr2 : $*Int64
%val2 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %stackAddr : $*Index
%result = struct $Int64(%val2 : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with obvious aliasing stores
// CHECK-LABEL: sil shared @testCombinedLdStAliasingStore : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store
// CHECK-NOT: {{(load|store)}}
// CHECK: bb1:
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: cond_br
// CHECK-NOT: {{(load|store)}}
// CHECK: load
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingStore'
sil shared @testCombinedLdStAliasingStore : $@convention(method) (Int64) -> Int64 {
bb0(%0 : $Int64):
%zero = integer_literal $Builtin.Int64, 0
%intz = struct $Int64(%zero : $Builtin.Int64)
%stackAddr = alloc_stack $Index
%outerAddr1 = struct_element_addr %stackAddr : $*Index, #Index.value
store %intz to %outerAddr1 : $*Int64
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%outerAddr2 = struct_element_addr %stackAddr : $*Index, #Index.value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
%intv = struct $Int64(%zero : $Builtin.Int64)
store %intv to %outerAddr2 : $*Int64
store %val to %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %stackAddr : $*Index
%final = load %innerAddr2 : $*Builtin.Int64
%result = struct $Int64(%final : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with unknown aliasing loads
// CHECK-LABEL: sil shared @testCombinedLdStUnknownLoad : $@convention(method) (Int64, Builtin.RawPointer, Builtin.RawPointer) -> Int64 {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer, %2 : $Builtin.RawPointer):
// CHECK-NOT: {{(load|store)}}
// CHECK: bb1:
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: cond_br
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStUnknownLoad'
sil shared @testCombinedLdStUnknownLoad : $@convention(method) (Int64, Builtin.RawPointer, Builtin.RawPointer) -> Int64 {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer, %2 : $Builtin.RawPointer):
%addr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%addr2 = pointer_to_address %2 : $Builtin.RawPointer to $*Index
%outerAddr1 = struct_element_addr %addr1 : $*Index, #Index.value
%outerAddr2 = struct_element_addr %addr2 : $*Index, #Index.value
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
store %0 to %outerAddr2 : $*Int64
%val2 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = struct $Int64(%val2 : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Reduced test case from rdar://61246061
//
// Test miscompilation of BidirectionalCollection<IndexSet>._distance with
// combined load/store hoisting/sinking with multiple loads from
// aliasing addresses.
// getRange
sil @getRange : $@convention(thin) () -> Range<Int64>
// CHECK-LABEL: sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store %0 to %{{.*}} : $*Int64
// CHECK: load %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK: bb7:
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testLICMReducedCombinedLdStExtraProjection'
sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 {
// %0 // users: %5, %1
bb0(%0 : $Int64):
%1 = struct_extract %0 : $Int64, #Int64._value // users: %35, %20
%2 = integer_literal $Builtin.Int64, 0 // user: %9
%3 = alloc_stack $Index // users: %41, %13, %4
%4 = struct_element_addr %3 : $*Index, #Index.value // users: %8, %5
store %0 to %4 : $*Int64 // id: %5
%6 = integer_literal $Builtin.Int64, 1 // user: %11
%7 = integer_literal $Builtin.Int1, -1 // user: %11
%8 = struct_element_addr %4 : $*Int64, #Int64._value // user: %34
br bb1(%2 : $Builtin.Int64) // id: %9
// %10 // user: %11
bb1(%10 : $Builtin.Int64): // Preds: bb8 bb0
%11 = builtin "sadd_with_overflow_Int64"(%10 : $Builtin.Int64, %6 : $Builtin.Int64, %7 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) // user: %12
%12 = tuple_extract %11 : $(Builtin.Int64, Builtin.Int1), 0 // users: %38, %37
%13 = struct_element_addr %3 : $*Index, #Index.value // users: %32, %27, %24, %14
%14 = struct_element_addr %13 : $*Int64, #Int64._value // user: %15
%15 = load %14 : $*Builtin.Int64 // user: %18
%16 = integer_literal $Builtin.Int64, 1 // user: %18
%17 = integer_literal $Builtin.Int1, -1 // user: %18
%18 = builtin "sadd_with_overflow_Int64"(%15 : $Builtin.Int64, %16 : $Builtin.Int64, %17 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) // user: %19
%19 = tuple_extract %18 : $(Builtin.Int64, Builtin.Int1), 0 // users: %26, %23, %20
%20 = builtin "cmp_eq_Int64"(%19 : $Builtin.Int64, %1 : $Builtin.Int64) : $Builtin.Int1 // user: %21
cond_br %20, bb2, bb3 // id: %21
bb2: // Preds: bb1
cond_br undef, bb4, bb5 // id: %22
bb3: // Preds: bb1
%23 = struct $Int64 (%19 : $Builtin.Int64) // user: %24
store %23 to %13 : $*Int64 // id: %24
br bb6 // id: %25
bb4: // Preds: bb2
%26 = struct $Int64 (%19 : $Builtin.Int64) // user: %27
store %26 to %13 : $*Int64 // id: %27
br bb6 // id: %28
bb5: // Preds: bb2
// function_ref getRange
%29 = function_ref @getRange : $@convention(thin) () -> Range<Int64> // user: %30
%30 = apply %29() : $@convention(thin) () -> Range<Int64> // user: %31
%31 = struct_extract %30 : $Range<Int64>, #Range.lowerBound // user: %32
store %31 to %13 : $*Int64 // id: %32
br bb6 // id: %33
bb6: // Preds: bb5 bb4 bb3
%34 = load %8 : $*Builtin.Int64 // user: %35
%35 = builtin "cmp_eq_Int64"(%34 : $Builtin.Int64, %1 : $Builtin.Int64) : $Builtin.Int1 // user: %36
cond_br %35, bb7, bb8 // id: %36
bb7: // Preds: bb6
br bb9(%12 : $Builtin.Int64) // id: %37
bb8: // Preds: bb6
br bb1(%12 : $Builtin.Int64) // id: %38
// %39 // user: %40
bb9(%39 : $Builtin.Int64): // Preds: bb7
%40 = struct $Int64 (%39 : $Builtin.Int64) // user: %42
dealloc_stack %3 : $*Index // id: %41
return %40 : $Int64 // id: %42
}
// testConditionalTrapInInfiniteSyncLoop and
// testConditionalTrapDominatingSyncLoopExit
//
// It's legal for the optimizer to consider code after the loop as
// always reachable, but when a loop has no exits, or when the loops
// exits are dominated by a conditional statement we should not
// consider conditional statements within the loop as dominating all
// possible execution paths through the loop. At least not when there
// is at least one path through the loop that contains a
// "synchronization point", such as a function that may contain a
// memory barrier, perform I/O, or exit the program.
sil @mayExit : $@convention(thin) () -> ()
// CHECK-LABEL: sil @testConditionalTrapInInfiniteSyncLoop : $@convention(thin) (Builtin.Int1, Builtin.Int1) -> () {
// CHECK: bb0
// CHECK-NOT: cond_fail
// CHECK: br bb1
// CHECK: bb1:
// CHECK: cond_br %0, bb2, bb3
// CHECK: bb2:
// CHECK: cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
// CHECK-LABEL: } // end sil function 'testConditionalTrapInInfiniteSyncLoop'
sil @testConditionalTrapInInfiniteSyncLoop : $@convention(thin) (Builtin.Int1, Builtin.Int1) -> () {
bb0(%0 : $Builtin.Int1, %1 : $Builtin.Int1):
br bb1
bb1: // loop head
cond_br %0, bb2, bb3
bb2: // maybe never executed
cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
br bb4
bb3:
// synchronization point: has "real" side-effects that we can't
// reorder with traps
%f = function_ref @mayExit : $@convention(thin) () -> ()
apply %f() : $@convention(thin) () -> ()
br bb4
bb4: // latch
br bb1
}
// CHECK-LABEL: sil @testConditionalTrapDominatingSyncLoopExit : $@convention(thin) (Builtin.Int1, Builtin.Int1, Builtin.Int1) -> () {
// CHECK: bb0
// CHECK-NOT: cond_fail
// CHECK: br bb1
// CHECK: bb1:
// CHECK: cond_br %0, bb2, bb4
// CHECK: bb2:
// CHECK: cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
// CHECK-LABEL: } // end sil function 'testConditionalTrapDominatingSyncLoopExit'
sil @testConditionalTrapDominatingSyncLoopExit : $@convention(thin) (Builtin.Int1, Builtin.Int1, Builtin.Int1) -> () {
bb0(%0 : $Builtin.Int1, %1 : $Builtin.Int1, %2 : $Builtin.Int1):
br bb1
bb1: // loop head
cond_br %0, bb2, bb4
bb2: // maybe never executed
cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
cond_br %2, bb3, bb5
bb3: // tail
br bb1
bb4:
// synchronization point: has "real" side-effects that we can't
// reorder with traps
%f = function_ref @mayExit : $@convention(thin) () -> ()
apply %f() : $@convention(thin) () -> ()
br bb1
bb5:
%99 = tuple ()
return %99 : $()
}
// Test load splitting with a loop-invariant. The loop
// will be empty after combined load/store hoisting/sinking.
//
// CHECK-LABEL: sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: [[STOREDVAL:%.*]] = struct_extract %0 : $Int64, #Int64._value
// CHECK: br bb1([[PRELOAD]] : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1(%0 : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store %0 to %{{.*}} : $*Int64
// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[PHI]] : $Int64, [[STOREDVAL]] : $Builtin.Int64)
// CHECK-LABEL: } // end sil function 'testLoadSplit'
sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value
br bb1
bb1:
%val1 = load %outerAddr1 : $*Index
%val2 = load %middleAddr1 : $*Int64
%middleAddr2 = struct_element_addr %outerAddr1 : $*Index, #Index.value
store %0 to %middleAddr2 : $*Int64
%innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value
%val3 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%val1 : $Index, %val2 : $Int64, %val3 : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
} // end sil function 'testLoadSplit'
// Test load splitting with a loop-varying stored value.
// CHECK-LABEL: sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: br bb1(%{{.*}} : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64)
// CHECK-NEXT: [[EXTRACT:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value
// CHECK-NEXT: builtin "uadd_with_overflow_Int32"([[EXTRACT]] : $Builtin.Int64
// CHECK-NEXT: tuple_extract
// CHECK-NEXT: [[ADD:%.*]] = struct $Int64
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1([[ADD]] : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store [[ADD]] to %{{.*}} : $*Int64
// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[ADD]] : $Int64, [[EXTRACT]] : $Builtin.Int64)
// CHECK-LABEL: } // end sil function 'testLoadSplitPhi'
sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value
%innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value
br bb1
bb1:
%outerVal = load %outerAddr1 : $*Index
%innerVal = load %innerAddr1 : $*Builtin.Int64
%one = integer_literal $Builtin.Int64, 1
%zero = integer_literal $Builtin.Int1, 0
%add = builtin "uadd_with_overflow_Int32"(%innerVal : $Builtin.Int64, %one : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
%inc = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0
%middleAddr2 = struct_element_addr %outerAddr1 : $*Index, #Index.value
%newVal = struct $Int64 (%inc : $Builtin.Int64)
store %newVal to %middleAddr2 : $*Int64
%middleVal = load %middleAddr1 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%outerVal : $Index, %middleVal : $Int64, %innerVal : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
} // end sil function 'testLoadSplitPhi'
struct State {
@_hasStorage var valueSet: (Int64, Int64, Int64) { get set }
@_hasStorage var singleValue: Int64 { get set }
}
// Test the we can remove a store to an individual tuple element when
// the struct containing the tuple is used within the loop.
// The optimized loop should only contain the add operation and a phi, with no memory access.
//
// CHECK-LABEL: sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State {
// CHECK: bb0(%0 : $Builtin.RawPointer):
// CHECK: [[HOISTADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// ...Preload stored element #1
// CHECK: tuple_element_addr
// CHECK: [[PRELOADADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1
// CHECK: [[PRELOAD:%.*]] = load [[PRELOADADR]] : $*Int64
// ...Split element 0
// CHECK: [[ELT0:%.*]] = load [[HOISTADR]] : $*Int64
// CHECK: [[HOISTVAL:%.*]] = struct_extract [[ELT0]] : $Int64, #Int64._value
// CHECK: [[HOISTADR2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// CHECK: [[ELT0B:%.*]] = load [[HOISTADR2]] : $*Int64
// ...Split element 2
// CHECK: [[SPLIT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2
// CHECK: [[ELT2:%.*]] = load [[SPLIT2]] : $*Int64
// ...Split State.singlevalue
// CHECK: [[SINGLEADR:%.*]] = struct_element_addr %{{.*}} : $*State, #State.singleValue
// CHECK: [[SINGLEVAL:%.*]] = load [[SINGLEADR]] : $*Int64
// ...Hoisted element 0
// CHECK: br bb1([[PRELOAD]] : $Int64)
// ...Loop
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[TUPLE:%.*]] = tuple ([[ELT0B]] : $Int64, [[PHI]] : $Int64, [[ELT2]] : $Int64)
// CHECK-NEXT: [[STRUCT:%.*]] = struct $State ([[TUPLE]] : $(Int64, Int64, Int64), [[SINGLEVAL]] : $Int64)
// CHECK-NEXT: [[ADDEND:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value
// CHECK-NEXT: [[UADD:%.*]] = builtin "uadd_with_overflow_Int32"([[HOISTVAL]] : $Builtin.Int64, [[ADDEND]] : $Builtin.Int64, %{{.*}} : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
// CHECK-NEXT: [[ADDVAL:%.*]] = tuple_extract [[UADD]] : $(Builtin.Int64, Builtin.Int1), 0
// CHECK-NEXT: [[ADDINT:%.*]] = struct $Int64 ([[ADDVAL]] : $Builtin.Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1([[ADDINT]] : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store [[ADDINT]] to [[PRELOADADR]] : $*Int64
// CHECK-NEXT: return [[STRUCT]] : $State
// CHECK-LABEL: } // end sil function 'testTupleSplit'
sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State {
bb0(%0 : $Builtin.RawPointer):
%stateAddr = pointer_to_address %0 : $Builtin.RawPointer to $*State
%tupleAddr0 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr0 = tuple_element_addr %tupleAddr0 : $*(Int64, Int64, Int64), 0
%tupleAddr1 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr1 = tuple_element_addr %tupleAddr1 : $*(Int64, Int64, Int64), 1
%tupleAddr11 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr11 = tuple_element_addr %tupleAddr11 : $*(Int64, Int64, Int64), 1
br bb1
bb1:
%state = load %stateAddr : $*State
%element0 = load %elementAddr0 : $*Int64
%val0 = struct_extract %element0 : $Int64, #Int64._value
%element1 = load %elementAddr1 : $*Int64
%val1 = struct_extract %element1 : $Int64, #Int64._value
%zero = integer_literal $Builtin.Int1, 0
%add = builtin "uadd_with_overflow_Int32"(%val0 : $Builtin.Int64, %val1 : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
%addVal = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0
%addInt = struct $Int64 (%addVal : $Builtin.Int64)
store %addInt to %elementAddr11 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %state : $State
}
// Test multiple stores to disjoint access paths with a single load
// that spans both of them. The load should be split and hoisted and
// and the stores be sunk.
// testCommonSplitLoad
// CHECK-LABEL: sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
// CHECK: [[ELT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// CHECK: [[V0:%.*]] = load [[ELT0]] : $*Int64
// CHECK: [[ELT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2
// CHECK: [[V2:%.*]] = load [[ELT2]] : $*Int64
// CHECK: [[ELT1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1
// CHECK: [[V1:%.*]] = load [[ELT1]] : $*Int64
// CHECK: br bb1([[V0]] : $Int64, [[V2]] : $Int64)
//
// Nothing in this loop except phis...
// CHECK: bb1([[PHI0:%.*]] : $Int64, [[PHI2:%.*]] : $Int64):
// CHECK-NEXT: [[RESULT:%.*]] = tuple ([[PHI0]] : $Int64, [[V1]] : $Int64, [[PHI2]] : $Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1(%0 : $Int64, %0 : $Int64)
//
// Stores are all sunk...
// CHECK: bb3:
// CHECK: store %0 to [[ELT2]] : $*Int64
// CHECK: store %0 to [[ELT0]] : $*Int64
// CHECK: return [[RESULT]] : $(Int64, Int64, Int64)
// CHECK-LABEL: } // end sil function 'testCommonSplitLoad'
sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, Int64, Int64)
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, Int64, Int64)
%elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 0
store %0 to %elementAddr0 : $*Int64
%elementAddr2 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 2
store %0 to %elementAddr2 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, Int64, Int64)
}
sil @$foo_read_write : $@convention(method) (Int) -> () {
[global: read,write]
}
sil @$foo_read : $@convention(method) (Int) -> () {
[global: read]
}
// Read only apply can't be hoisted because of a conflicting write apply in the loop.
//
// CHECK-LABEL: sil @test_conflicting_write_apply : $@convention(thin) (Int) -> () {
// CHECK: bb2:
// CHECK-NEXT: apply
// CHECK-NEXT: apply
// CHECK: bb3:
// CHECK-LABEL: } // end sil function 'test_conflicting_write_apply'
sil @test_conflicting_write_apply : $@convention(thin) (Int) -> () {
bb0(%0 : $Int):
%4 = function_ref @$foo_read_write : $@convention(method) (Int) -> ()
%5 = function_ref @$foo_read : $@convention(method) (Int) -> ()
br bb2
bb1:
br bb2
bb2:
%8 = apply %5(%0) : $@convention(method) (Int) -> ()
%9 = apply %4(%0) : $@convention(method) (Int) -> ()
cond_br undef, bb3, bb1
bb3:
%11 = tuple ()
return %11
}
// Two stores, one to the outer tuple and one to the inner tuple. This
// results in two access paths that are only loaded/stored to. First
// split the outer tuple when processing the outer access path, then
// the inner tuple when processing the inner access path. All loads
// should be hoisted and all stores should be sunk.
//
// CHECK-LABEL: sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 0
// CHECK: [[V0:%.*]] = load [[ELT_0]] : $*Int64
// CHECK: [[ELT_1a:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1
// CHECK: [[ELT_1_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 0
// CHECK: [[V_1_0:%.*]] = load [[ELT_1_0]] : $*Int64
// CHECK: [[ELT_1_1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 1
// CHECK: [[V_1_1:%.*]] = load [[ELT_1_1]] : $*Int64
// CHECK: br bb1([[V_0:%.*]] : $Int64, [[V_1_0]] : $Int64)
//
// Nothing in this loop except phis and tuple reconstruction...
// CHECK: bb1([[PHI_0:%.*]] : $Int64, [[PHI_1_0:%.*]] : $Int64):
// CHECK: [[INNER:%.*]] = tuple ([[PHI_1_0]] : $Int64, [[V_1_1]] : $Int64)
// CHECK: [[OUTER:%.*]] = tuple ([[PHI_0]] : $Int64, [[INNER]] : $(Int64, Int64))
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1(%0 : $Int64, %0 : $Int64)
//
// The two stores are sunk...
// CHECK: bb3:
// CHECK: store %0 to [[ELT_1_0]] : $*Int64
// CHECK: store %0 to [[ELT_0]] : $*Int64
// CHECK: return [[OUTER]] : $(Int64, (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testResplit'
sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 0
store %0 to %elementAddr0 : $*Int64
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
store %0 to %elementAddr10 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, (Int64, Int64))
}
// Two stores to overlapping accesspaths. Combined load/store hoisting
// cannot currently handle stores to overlapping accesspaths, so
// nothing is optimized.
// CHECK-LABEL: sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK-LABEL: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: br bb1
// CHECK: bb1:
// CHECK: load %{{.*}} : $*(Int64, (Int64, Int64))
// CHECK: store {{.*}} : $*(Int64, Int64)
// CHECK: store {{.*}} : $*Int64
// CHECK: cond_br undef, bb2, bb3
// CHECK-NOT: store
// CHECK-LABEL: } // end sil function 'testTwoStores'
sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%tuple = tuple (%0 : $Int64, %1: $Int64)
store %tuple to %elementAddr1 : $*(Int64, Int64)
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
store %1 to %elementAddr10 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, (Int64, Int64))
}
// Split two wide loads.
//
// CHECK-LABEL: sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
//
// CHECK: [[ADDR1:%.*]] = tuple_element_addr %{{.*}}, 0
// CHECK: [[V1:%.*]] = load [[ADDR1]] : $*Int64
// CHECK: [[OUTER:%.*]] = tuple (%{{.*}} : $Int64, %{{.*}} : $(Int64, Int64))
// CHECK: br bb1([[V1]] : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK: [[INNER:%.*]] = tuple ([[PHI]] : $Int64, {{%[0-9]+}} : $Int64)
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1(%0 : $Int64)
// CHECK: bb3:
// CHECK: store %0 to [[ADDR1]] : $*Int64
// CHECK: [[RESULT:%.*]] = tuple ([[OUTER]] : $(Int64, (Int64, Int64)), [[INNER]] : $(Int64, Int64))
// CHECK: return [[RESULT]] : $((Int64, (Int64, Int64)), (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testSplitNonStandardProjection'
sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%elt1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%ptr = address_to_pointer %elt1 : $*(Int64, Int64) to $Builtin.RawPointer
%ptrAdr = pointer_to_address %ptr : $Builtin.RawPointer to [strict] $*(Int64, Int64)
%val2 = load %ptrAdr : $*(Int64, Int64)
%eltptr0 = tuple_element_addr %ptrAdr : $*(Int64, Int64), 0
store %0 to %eltptr0 : $*Int64
// Process the outermost load after splitting the inner load
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%val1 : $(Int64, (Int64, Int64)), %val2 : $(Int64, Int64))
return %result : $((Int64, (Int64, Int64)), (Int64, Int64))
}
// CHECK-LABEL: sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer):
// CHECK: [[ELT_1:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 1
// CHECK: [[V1:%.*]] = load %4 : $*(Int64, Int64)
// CHECK: [[ARG0:%.*]] = tuple (%0 : $Int64, %0 : $Int64)
// CHECK: [[ARG1:%.*]] = tuple (%1 : $Int64, %1 : $Int64)
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0
// CHECK: [[ARG0_0:%.*]] = tuple_extract [[ARG0]] : $(Int64, Int64), 0
// CHECK: br bb1([[V1]] : $(Int64, Int64))
// CHECK: bb1([[PHI:%.*]] : $(Int64, Int64)):
// CHECK: [[LOOPVAL:%.*]] = tuple ({{%[0-9]+}} : $Int64, [[PHI]] : $(Int64, Int64))
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1([[ARG1]] : $(Int64, Int64))
// CHECK: bb3:
// CHECK: store [[ARG1]] to [[ELT_1]] : $*(Int64, Int64)
// CHECK: [[EXTRACT0:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 0
// CHECK: [[EXTRACT1:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 1
// CHECK: [[EXTRACT1_1:%.*]] = tuple_extract [[EXTRACT1]] : $(Int64, Int64), 1
// CHECK: [[TUPLE1:%.*]] = tuple ([[ARG0_0]] : $Int64, [[EXTRACT1_1]] : $Int64)
// CHECK: [[RESULT:%.*]] = tuple ([[EXTRACT0]] : $Int64, [[TUPLE1]] : $(Int64, Int64))
// CHECK: return [[RESULT]] : $(Int64, (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testSameTwoStores'
sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%tupleA = tuple (%0 : $Int64, %0: $Int64)
store %tupleA to %elementAddr1 : $*(Int64, Int64)
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
%val10 = load %elementAddr10 : $*Int64
%tupleB = tuple (%1 : $Int64, %1: $Int64)
store %tupleB to %elementAddr1 : $*(Int64, Int64)
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%extract0 = tuple_extract %val : $(Int64, (Int64, Int64)), 0
%extract1 = tuple_extract %val : $(Int64, (Int64, Int64)), 1
%extract11 = tuple_extract %extract1 : $(Int64, Int64), 1
%inner = tuple (%val10 : $Int64, %extract11: $Int64)
%outer = tuple (%extract0 : $Int64, %inner: $(Int64, Int64))
return %outer : $(Int64, (Int64, Int64))
}
class C {}
// This won't be hoisted because we can't find a base to check if it is invariant
// CHECK-LABEL: sil @testLoopInvariantStoreNoBase1 :
// CHECK: bb3(%11 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: bb6:
// CHECK: store
// CHECK-LABEL: } // end sil function 'testLoopInvariantStoreNoBase1'
sil @testLoopInvariantStoreNoBase1 : $@convention(thin) (Builtin.BridgeObject, Double) -> () {
bb0(%0 : $Builtin.BridgeObject, %1 : $Double):
cond_br undef, bb1, bb2
bb1:
%2 = unchecked_ref_cast %0 : $Builtin.BridgeObject to $C
%3 = ref_tail_addr [immutable] %2 : $C, $Double
%4 = address_to_pointer %3 : $*Double to $Builtin.RawPointer
br bb3(%4 : $Builtin.RawPointer)
bb2:
%6 = unchecked_ref_cast %0 : $Builtin.BridgeObject to $C
%7 = ref_tail_addr [immutable] %6 : $C, $Double
%8 = address_to_pointer %7 : $*Double to $Builtin.RawPointer
br bb3(%8 : $Builtin.RawPointer)
bb3(%9 : $Builtin.RawPointer):
br bb4
bb4:
%11 = pointer_to_address %9 : $Builtin.RawPointer to [strict] $*Double
store %1 to %11 : $*Double
cond_br undef, bb5, bb6
bb5:
br bb4
bb6:
%15 = tuple ()
return %15 : $()
}
// This won't be hoisted because we can't find a base to check if it is invariant
// CHECK-LABEL: sil @testLoopInvariantStoreNoBase2 :
// CHECK: bb3(%11 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: bb6:
// CHECK: store
// CHECK-LABEL: } // end sil function 'testLoopInvariantStoreNoBase2'
sil @testLoopInvariantStoreNoBase2 : $@convention(thin) (Builtin.BridgeObject, Double) -> () {
bb0(%0 : $Builtin.BridgeObject, %1 : $Double):
cond_br undef, bb1, bb2
bb1:
%2 = unchecked_ref_cast %0 : $Builtin.BridgeObject to $C
%3 = ref_tail_addr [immutable] %2 : $C, $Double
%4 = address_to_pointer %3 : $*Double to $Builtin.RawPointer
br bb3(%4 : $Builtin.RawPointer)
bb2:
%6 = unchecked_ref_cast %0 : $Builtin.BridgeObject to $C
%7 = ref_tail_addr [immutable] %6 : $C, $Double
%8 = address_to_pointer %7 : $*Double to $Builtin.RawPointer
br bb3(%8 : $Builtin.RawPointer)
bb3(%9 : $Builtin.RawPointer):
br bb4
bb4:
%11 = pointer_to_address %9 : $Builtin.RawPointer to [strict] $*Double
%12 = integer_literal $Builtin.Word, 1
%13 = index_addr %11 : $*Double, %12 : $Builtin.Word
store %1 to %13 : $*Double
cond_br undef, bb5, bb6
bb5:
br bb4
bb6:
%15 = tuple ()
return %15 : $()
}
struct UInt64 {
@_hasStorage var _value: Builtin.Int64 { get set }
init(_value: Builtin.Int64)
}
public struct UInt64Wrapper {
@_hasStorage public var rawValue: UInt64 { get set }
private init(_ rawValue: UInt64)
public init()
}
// rdar://92191909 (LICM assertion: isSubObjectProjection(), MemAccessUtils.h, line 1069)
//
// projectLoadValue needs to rematerialize a loaded value within the
// loop using projections and the loop-invariant address is an
// index_addr.
//
// The store inside the loop is deleted, and the load is hoisted such
// that it now loads the UInt64Wrapper instead of Builtin.Int64
// CHECK-LABEL: sil @testTailProjection : $@convention(thin) () -> () {
// CHECK: bb0:
// CHECK: [[A:%.*]] = index_addr %4 : $*UInt64Wrapper, %1 : $Builtin.Word
// CHECK: store %{{.*}} to [[A]] : $*UInt64Wrapper
// CHECK: load %5 : $*UInt64Wrapper
// CHECK: br bb1
// CHECK: bb1(%{{.*}} : $Builtin.Int64, %{{.*}} : $UInt64Wrapper, [[PHI:%.*]] : $UInt64Wrapper):
// CHECK: cond_br undef, bb3, bb2
// CHECK: bb2:
// CHECK-NOT: (load|store)
// CHECK: struct_extract [[PHI]] : $UInt64Wrapper, #UInt64Wrapper.rawValue
// CHECK: struct_extract
// CHECK: struct $UInt64
// CHECK: struct $UInt64Wrapper
// CHECK-NOT: (load|store)
// CHECK: br bb1
// CHECK: bb3:
// CHECK: store [[PHI]] to [[A]] : $*UInt64Wrapper
// CHECK-LABEL: } // end sil function 'testTailProjection'
sil @testTailProjection : $@convention(thin) () -> () {
bb0:
%0 = integer_literal $Builtin.Int64, 0
%1 = integer_literal $Builtin.Word, 1
%2 = integer_literal $Builtin.Word, 2
%3 = alloc_ref [tail_elems $UInt64Wrapper * %2 : $Builtin.Word] $Storage
%4 = ref_tail_addr %3 : $Storage, $UInt64Wrapper
%5 = index_addr %4 : $*UInt64Wrapper, %1 : $Builtin.Word
%6 = struct $UInt64 (%0 : $Builtin.Int64)
%7 = struct $UInt64Wrapper (%6 : $UInt64)
store %7 to %5 : $*UInt64Wrapper
%9 = load %5 : $*UInt64Wrapper
br bb1(%0 : $Builtin.Int64, %9 : $UInt64Wrapper)
bb1(%11 : $Builtin.Int64, %12 : $UInt64Wrapper):
cond_br undef, bb3, bb2
bb2:
%14 = struct_element_addr %5 : $*UInt64Wrapper, #UInt64Wrapper.rawValue
%15 = struct_element_addr %14 : $*UInt64, #UInt64._value
%16 = load %15 : $*Builtin.Int64
%17 = struct $UInt64 (%16 : $Builtin.Int64)
%18 = struct $UInt64Wrapper (%17 : $UInt64)
store %18 to %5 : $*UInt64Wrapper
br bb1(%16 : $Builtin.Int64, %18 : $UInt64Wrapper)
bb3:
%21 = tuple ()
return %21 : $()
}
sil_global private @g_token : $Builtin.Word
sil_global @gi : $Int
sil @g_init : $@convention(c) () -> ()
// CHECK-LABEL: sil @hoist_builtin_once :
// CHECK: function_ref
// CHECK: builtin "once"
// CHECK: br bb1
// CHECK: bb1:
// CHECK-NOT: builtin
// CHECK: cond_br
// CHECK: } // end sil function 'hoist_builtin_once'
sil @hoist_builtin_once : $@convention(thin) () -> () {
bb0:
br bb1
bb1:
%1 = global_addr @g_token : $*Builtin.Word
%2 = address_to_pointer %1 : $*Builtin.Word to $Builtin.RawPointer
%3 = function_ref @g_init : $@convention(c) () -> ()
%4 = builtin "once"(%2 : $Builtin.RawPointer, %3 : $@convention(c) () -> ()) : $()
cond_br undef, bb1, bb2
bb2:
%r1 = tuple ()
return %r1 : $()
}
struct Outer {
var interactions: Inner
}
public struct Inner {
public let x: Int
var storage: ContiguousArray<Double>
}
sil @$get_array_from_inner : $@convention(method) (Inner) -> (ContiguousArray<Double>)
// Make sure we project load access path to matching store access path.
// Otherwise the pass could crash.
//
// CHECK-LABEL: sil @project_load_path_before_splitting_crash :
// CHECK: load
// CHECK: bb1({{%[0-9]+}} : $ContiguousArray<Double>):
// CHECK: bb3:
// CHECK: store
// CHECK: } // end sil function 'project_load_path_before_splitting_crash'
sil @project_load_path_before_splitting_crash : $@convention(thin) (@inout Outer) -> () {
bb0(%0 : $*Outer):
%1 = struct_element_addr %0, #Outer.interactions
%2 = struct_element_addr %1, #Inner.storage
br bb1
bb1:
%4 = load %1
%5 = function_ref @$get_array_from_inner : $@convention(method) (Inner) -> ContiguousArray<Double>
%6 = apply %5(%4) : $@convention(method) (Inner) -> ContiguousArray<Double>
store %6 to %2
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%10 = tuple ()
return %10
}
// CHECK-LABEL: sil @dont_hoist_builtin_once_memory_conflict :
// CHECK: function_ref
// CHECK: br bb1
// CHECK: bb1:
// CHECK: builtin "once"
// CHECK: cond_br
// CHECK: } // end sil function 'dont_hoist_builtin_once_memory_conflict'
sil @dont_hoist_builtin_once_memory_conflict : $@convention(thin) (Int) -> () {
bb0(%0 : $Int):
br bb1
bb1:
%1 = global_addr @gi : $*Int
store %0 to %1 : $*Int
%3 = global_addr @g_token : $*Builtin.Word
%4 = address_to_pointer %3 : $*Builtin.Word to $Builtin.RawPointer
%5 = function_ref @g_init : $@convention(c) () -> ()
%6 = builtin "once"(%4 : $Builtin.RawPointer, %5 : $@convention(c) () -> ()) : $()
cond_br undef, bb1, bb2
bb2:
%r1 = tuple ()
return %r1 : $()
}
// CHECK-LABEL: sil [ossa] @hoist_load_copy :
// CHECK: %1 = load_borrow %0
// CHECK: bb1:
// CHECK-NEXT: %3 = copy_value %1
// CHECK: bb3:
// CHECK-NEXT: end_borrow %1
// CHECK: } // end sil function 'hoist_load_copy'
sil [ossa] @hoist_load_copy : $@convention(thin) (@inout Builtin.NativeObject) -> () {
bb0(%0 : $*Builtin.NativeObject):
br bb1
bb1:
%2 = load [copy] %0
fix_lifetime %2
destroy_value %2
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @hoist_load_take_store_init :
// CHECK: %2 = load [take] %0
// CHECK: bb1(%4 : @owned $S):
// CHECK-NEXT: fix_lifetime %4
// CHECK: bb3:
// CHECK-NEXT: store %7 to [init] %0
// CHECK: } // end sil function 'hoist_load_take_store_init'
sil [ossa] @hoist_load_take_store_init : $@convention(thin) (@inout S, @guaranteed S) -> () {
bb0(%0 : $*S, %new_val : @guaranteed $S):
br bb1
bb1:
%2 = load [take] %0
fix_lifetime %2
destroy_value %2
%copied = copy_value %new_val
store %copied to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
sil @$foo_yield : $@yield_once @convention(method) (Int) -> @yields Int {
[global: read]
}
// CHECK-LABEL: sil [ossa] @hoist_begin_apply : $@convention(thin) (Int) -> () {
// CHECK: bb0(%0 : $Int):
// CHECK: function_ref
// CHECK: begin_apply
// CHECK: bb3:
// CHECK: end_apply
// CHECK: } // end sil function 'hoist_begin_apply'
sil [ossa] @hoist_begin_apply : $@convention(thin) (Int) -> () {
bb0(%0 : $Int):
br bb1
bb1:
%reader = function_ref @$foo_yield : $@yield_once @convention(method) (Int) -> @yields Int
(%value, %token) = begin_apply %reader(%0) : $@yield_once @convention(method) (Int) -> @yields Int
end_apply %token as $()
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @hoist_trivial_load :
// CHECK: load [trivial]
// CHECK-NEXT: br bb1
// CHECK: } // end sil function 'hoist_trivial_load'
sil [ossa] @hoist_trivial_load : $@convention(thin) (@inout Int) -> () {
bb0(%0 : $*Int):
br bb1
bb1:
%2 = load [trivial] %0
fix_lifetime %2
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @hoist_load_borrow :
// CHECK: bb0(%0 : $*Builtin.NativeObject):
// CHECK-NEXT: load_borrow
// CHECK: bb3:
// CHECK-NEXT: end_borrow
// CHECK: } // end sil function 'hoist_load_borrow'
sil [ossa] @hoist_load_borrow : $@convention(thin) (@inout Builtin.NativeObject) -> () {
bb0(%0 : $*Builtin.NativeObject):
br bb1
bb1:
%2 = load_borrow %0
fix_lifetime %2
end_borrow %2
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @dont_hoist_load_borrow :
// CHECK: bb1:
// CHECK-NEXT: copy_addr
// CHECK-NEXT: load_borrow
// CHECK: } // end sil function 'dont_hoist_load_borrow'
sil [ossa] @dont_hoist_load_borrow : $@convention(thin) (@in_guaranteed String) -> () {
bb0(%0 : $*String):
%1 = alloc_stack $String
br bb1
bb1:
copy_addr %0 to [init] %1
%3 = load_borrow %1
fix_lifetime %3
end_borrow %3
%6 = load [take] %1
destroy_value %6
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %1
%r = tuple()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @dont_hoist_struct :
// CHECK: bb1:
// CHECK-NEXT: struct $NonCopyable
// CHECK: } // end sil function 'dont_hoist_struct'
sil [ossa] @dont_hoist_struct : $@convention(thin) (Int) -> () {
bb0(%0 : $Int):
br bb1
bb1:
%2 = struct $NonCopyable (%0)
fix_lifetime %2
destroy_value %2
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @hoist_store_borrow :
// CHECK: bb0(%0 : @guaranteed $S):
// CHECK-NEXT: alloc_stack
// CHECK-NEXT: store_borrow
// CHECK: bb3:
// CHECK-NEXT: end_borrow
// CHECK-NEXT: dealloc_stack
// CHECK: } // end sil function 'hoist_store_borrow'
sil [ossa] @hoist_store_borrow : $@convention(thin) (@guaranteed S) -> () {
bb0(%0 : @guaranteed $S):
br bb1
bb1:
%s = alloc_stack $S
%2 = store_borrow %0 to %s
fix_lifetime %2
end_borrow %2
dealloc_stack %s
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r : $()
}
sil @foo : $@convention(thin) (@guaranteed { var Int }) -> ()
// CHECK-LABEL: sil [ossa] @test_begin_access : $@convention(thin) (Int) -> () {
// CHECK: bb2:
// CHECK: begin_access
// CHECK: end_access
// CHECK: bb3
// CHECK-LABEL: } // end sil function 'test_begin_access'
sil [ossa] @test_begin_access : $@convention(thin) (Int) -> () {
bb0(%0 : $Int):
%1 = alloc_box ${ var Int }, var, name "now"
%2 = begin_borrow [lexical] [var_decl] %1
%3 = project_box %2, 0
store %0 to [trivial] %3
br bb2
bb1:
br bb2
bb2:
%7 = function_ref @foo : $@convention(thin) (@guaranteed { var Int }) -> ()
%8 = apply %7(%2) : $@convention(thin) (@guaranteed { var Int }) -> ()
%9 = begin_access [modify] [dynamic] %3
store %0 to [trivial] %9
end_access %9
cond_br undef, bb3, bb1
bb3:
end_borrow %2
destroy_value %1
%15 = tuple ()
return %15
}
// Hoisting non-trivial loads/stores is currently not supported in OSSA.
// CHECK-LABEL: sil [ossa] @store_of_optional_none :
// CHECK: bb2:
// CHECK: store %0 to [assign] %1
// CHECK: bb3:
// CHECK-LABEL: } // end sil function 'store_of_optional_none'
sil [ossa] @store_of_optional_none : $@convention(thin) () -> () {
bb0:
%0 = enum $Optional<String>, #Optional.none!enumelt
%1 = alloc_stack $Optional<String>
store %0 to [init] %1
br bb1
bb1:
cond_br undef, bb2, bb3
bb2:
store %0 to [assign] %1
br bb1
bb3:
destroy_addr %1
dealloc_stack %1
%r = tuple()
return %r : $()
}
// CHECK-LABEL: sil [ossa] @store_and_load_borrow :
// CHECK: bb1({{.*}}):
// CHECK: store %1 to [trivial]
// CHECK: load_borrow
// CHECK: bb2:
// CHECK-LABEL: } // end sil function 'store_and_load_borrow'
sil [ossa] @store_and_load_borrow : $@convention(thin) (@inout S, Int) -> () {
bb0(%0 : $*S, %1 : $Int):
%2 = load_borrow %0
%3 = struct_element_addr %0, #S.i
br bb1(%2)
bb1(%4 : @reborrow $S):
%5 = borrowed %4 from ()
end_borrow %5
store %1 to [trivial] %3
%10 = load_borrow %0
cond_br undef, bb2, bb3
bb2:
br bb1(%10)
bb3:
br bb4(%10)
bb4(%14 : @reborrow $S):
%15 = borrowed %14 from ()
end_borrow %15
%r = tuple()
return %r
}
// Just check that LICM doesn't produce invalid SIL because of a tuple type mismatch.
// CHECK-LABEL: sil [ossa] @split_load_of_labeld_tuples :
// CHECK-LABEL: } // end sil function 'split_load_of_labeld_tuples'
sil [ossa] @split_load_of_labeld_tuples : $@convention(thin) (@inout Pair, Int) -> () {
bb0(%0 : $*Pair, %1 : $Int):
br bb1
bb1:
cond_br undef, bb3, bb2
bb2:
%4 = load [trivial] %0
%5 = struct_element_addr %0, #Pair.t
%6 = tuple_element_addr %5, 0
%7 = alloc_stack $Int
store %1 to [trivial] %7
%9 = load [trivial] %7
store %9 to [trivial] %6
dealloc_stack %7
br bb1
bb3:
%13 = tuple ()
return %13
}
// Just check that LICM doesn't produce invalid SIL.
// CHECK-LABEL: sil [ossa] @uninitialized_at_entry_and_exit :
// CHECK-LABEL: } // end sil function 'uninitialized_at_entry_and_exit'
sil [ossa] @uninitialized_at_entry_and_exit : $@convention(thin) (@owned S) -> @out S {
bb0(%0 : $*S, %1 : @owned $S):
br bb1
bb1:
%5 = copy_value %1
store %5 to [init] %0
%7 = load [take] %0
destroy_value %7
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
store %1 to [init] %0
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @uninitialized_at_entry :
// CHECK: bb1:
// CHECK-NEXT: [[C:%.*]] = copy_value %1
// CHECK-NEXT: cond_br
// CHECK: bb2:
// CHECK-NEXT: destroy_value [[C]]
// CHECK: bb3:
// CHECK-NEXT: store [[C]] to [init] %0
// CHECK-LABEL: } // end sil function 'uninitialized_at_entry'
sil [ossa] @uninitialized_at_entry : $@convention(thin) (@owned S) -> @out S {
bb0(%0 : $*S, %1 : @owned $S):
br bb1
bb1:
%5 = copy_value %1
store %5 to [init] %0
cond_br undef, bb2, bb3
bb2:
%7 = load [take] %0
destroy_value %7
br bb1
bb3:
destroy_value %1
%r = tuple ()
return %r
}
// Just check that LICM doesn't produce invalid SIL.
// CHECK-LABEL: sil [ossa] @uninitialized_at_exit :
// CHECK-LABEL: } // end sil function 'uninitialized_at_exit'
sil [ossa] @uninitialized_at_exit : $@convention(thin) (@in S) -> () {
bb0(%0 : $*S):
br bb1
bb1:
%4 = load [take] %0
%5 = move_value %4
store %5 to [init] %0
%6 = load [take] %0
cond_br undef, bb2, bb3
bb2:
%8 = move_value %6
store %8 to [init] %0
br bb1
bb3:
destroy_value %6
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @projected_load_take :
// CHECK: bb1([[V:%.*]] : @owned $(S, Int)):
// CHECK-NEXT: ([[E:%.*]], {{%[0-9]+}}) = destructure_tuple [[V]]
// CHECK-NEXT: ({{%[0-9]+}}, [[S:%.*]]) = destructure_struct [[E]]
// CHECK-NEXT: = struct $S (%1 : $Int, [[S]] : $String)
// CHECK-LABEL: } // end sil function 'projected_load_take'
sil [ossa] @projected_load_take : $@convention(thin) (@inout (S, Int), Int) -> () {
bb0(%0 : $*(S, Int), %1 : $Int):
br bb1
bb1:
%2 = tuple_element_addr %0, 0
%3 = struct_element_addr %2, #S.s
%4 = load [take] %3
%5 = struct $S (%1, %4)
%6 = tuple (%5, %1)
store %6 to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @load_copy_followed_by_take :
// CHECK: bb1([[V:%.*]] : @owned $S):
// CHECK-NEXT: [[C:%.*]] = copy_value [[V]]
// CHECK-NEXT: destroy_value [[C]]
// CHECK-NEXT: = move_value [[V]]
// CHECK-LABEL: } // end sil function 'load_copy_followed_by_take'
sil [ossa] @load_copy_followed_by_take : $@convention(thin) (@inout S) -> () {
bb0(%0 : $*S):
br bb1
bb1:
%4 = load [copy] %0
destroy_value %4
%6 = load [take] %0
%7 = move_value %6
store %7 to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @projected_load_copy_followed_by_take :
// CHECK: bb1([[V:%.*]] : @owned $S):
// CHECK-NEXT: [[B:%.*]] = begin_borrow [[V]]
// CHECK-NEXT: [[SE:%.*]] = struct_extract [[B]]
// CHECK-NEXT: [[C:%.*]] = copy_value [[SE]]
// CHECK-NEXT: end_borrow [[B]]
// CHECK-NEXT: destroy_value [[C]]
// CHECK-NEXT: ({{%[0-9]+}}, [[S:%.*]]) = destructure_struct [[V]]
// CHECK-NEXT: = struct $S (%1 : $Int, [[S]] : $String)
// CHECK-LABEL: } // end sil function 'projected_load_copy_followed_by_take'
sil [ossa] @projected_load_copy_followed_by_take : $@convention(thin) (@inout S, Int) -> () {
bb0(%0 : $*S, %1 : $Int):
br bb1
bb1:
%43 = struct_element_addr %0, #S.s
%4 = load [copy] %43
destroy_value %4
%48 = load [take] %43
%49 = struct $S (%1, %48)
store %49 to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r
}
// Just check that LICM doesn't produce invalid SIL.
// CHECK-LABEL: sil [ossa] @partial_load_take_is_not_supported :
// CHECK-LABEL: } // end sil function 'partial_load_take_is_not_supported'
sil [ossa] @partial_load_take_is_not_supported : $@convention(thin) (@inout S2, Int) -> () {
bb0(%0 : $*S2, %1 : $Int):
br bb1
bb1:
%4 = struct_element_addr %0, #S2.s1
%5 = load [take] %4
%6 = struct_element_addr %0, #S2.s2
%7 = load [take] %6
%8 = struct $S2 (%1, %5, %7)
store %8 to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @partial_load_copy :
// CHECK: bb1([[V:%.*]] : @owned $S2):
// CHECK: [[B1:%.*]] = begin_borrow [[V]]
// CHECK-NEXT: [[SE1:%.*]] = struct_extract [[B1]] : $S2, #S2.s1
// CHECK-NEXT: = copy_value [[SE1]]
// CHECK: [[B2:%.*]] = begin_borrow [[V]]
// CHECK-NEXT: [[SE2:%.*]] = struct_extract [[B2]] : $S2, #S2.s2
// CHECK-NEXT: = copy_value [[SE2]]
// CHECK-LABEL: } // end sil function 'partial_load_copy'
sil [ossa] @partial_load_copy : $@convention(thin) (@inout S2, Int) -> () {
bb0(%0 : $*S2, %1 : $Int):
br bb1
bb1:
%4 = struct_element_addr %0, #S2.s1
%5 = load [copy] %4
%6 = struct_element_addr %0, #S2.s2
%7 = load [copy] %6
%8 = struct $S2 (%1, %5, %7)
%9 = load [take] %0
destroy_value %9
store %8 to [init] %0
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @hoist_load_with_idential_load_in_preheader :
// CHECK: store
// CHECK: load
// CHECK: store
// CHECK: [[L:%.*]] = load
// CHECK-NEXT: br bb1
// CHECK: bb1:
// CHECK-NEXT: apply undef([[L]])
// CHECK-LABEL: } // end sil function 'hoist_load_with_idential_load_in_preheader'
sil [ossa] @hoist_load_with_idential_load_in_preheader : $@convention(thin) (Int, Int) -> () {
bb0(%0 : $Int, %1 : $Int):
%2 = alloc_stack $Int
store %0 to [trivial] %2
%3 = load [trivial] %2
store %1 to [trivial] %2
br bb1
bb1:
%6 = load [trivial] %2
%7 = apply undef(%6) : $(Int) -> ()
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %2
%r = tuple ()
return %r
}
// CHECK-LABEL: sil [ossa] @dont_hoist_load_take :
// CHECK: bb1:
// CHECK-NEXT: load [take]
// CHECK-LABEL: } // end sil function 'dont_hoist_load_take'
sil [ossa] @dont_hoist_load_take : $@convention(thin) (@owned String) -> () {
bb0(%0 : @owned $String):
%1 = alloc_stack [dynamic_lifetime] $String
store %0 to [init] %1
br bb1
bb1:
%4 = load [take] %1
destroy_value %4
cond_br undef, bb3, bb2
bb2:
br bb1
bb3:
dealloc_stack %1
%9 = tuple ()
return %9
}
// CHECK-LABEL: sil [ossa] @begin_borrow_with_dead_end_exit :
// CHECK: %1 = begin_borrow %0
// CHECK: bb2:
// CHECK-NEXT: end_borrow %1
// CHECK: bb5:
// CHECK-NEXT: end_borrow %1
// CHECK-LABEL: } // end sil function 'begin_borrow_with_dead_end_exit'
sil [ossa] @begin_borrow_with_dead_end_exit : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject {
bb0(%0 : @owned $Builtin.NativeObject):
br bb1
bb1:
%2 = begin_borrow %0
fix_lifetime %2
end_borrow %2
cond_br undef, bb2, bb3
bb2:
destroy_value [dead_end] %0
unreachable
bb3:
cond_br undef, bb4, bb5
bb4:
br bb1
bb5:
return %0
}