mirror of
https://github.com/apple/swift.git
synced 2025-12-21 12:14:44 +01:00
- Fix block to func reabstraction thunks block argument handling - Forward cast ownership - Fix applyPartiallyAppliedSuperMethod ownership for @callee_guaranteed closures - Avoid a copy in buildBlockToFuncThunkBody - Update tests for callee_guaranteed closures SR-5441 rdar://33255593
345 lines
12 KiB
Plaintext
345 lines
12 KiB
Plaintext
// RUN: %target-sil-opt -assume-parsing-unqualified-ownership-sil -enable-sil-verify-all %s -temp-rvalue-opt | %FileCheck %s
|
|
|
|
sil_stage canonical
|
|
|
|
import Builtin
|
|
import Swift
|
|
|
|
struct GS<Base> {
|
|
var _base: Base
|
|
var _value: Builtin.Int64
|
|
}
|
|
|
|
sil @unknown : $@convention(thin) () -> ()
|
|
|
|
// CHECK-LABEL: sil @rvalue_simple
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>):
|
|
// CHECK: [[A1:%.*]] = struct_element_addr %0 : $*GS<B>, #GS._value
|
|
// CHECK: [[V1:%.*]] = load [[A1]] : $*Builtin.Int64
|
|
// CHECK-NOT: alloc_stack
|
|
// CHECK-NOT: copy_addr
|
|
// CHECK: [[A2:%.*]] = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
// CHECK: [[V2:%.*]] = load [[A2]] : $*Builtin.Int64
|
|
// CHECK: %{{.*}} = builtin "cmp_slt_Int64"([[V1]] : $Builtin.Int64, [[V2]] : $Builtin.Int64) : $Builtin.Int1
|
|
// CHECK-NOT: destroy_addr
|
|
// CHECK-NOT: dealloc_stack
|
|
// CHECK: return %{{.*}} : $()
|
|
// CHECK-LABEL: } // end sil function 'rvalue_simple'
|
|
sil @rvalue_simple : $@convention(thin) <B> (@in GS<B>, @inout GS<B>) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>):
|
|
%2 = struct_element_addr %0 : $*GS<B>, #GS._value
|
|
%3 = load %2 : $*Builtin.Int64
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%3 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @copy_from_temp
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: copy_addr %1 to [initialization] %0 : $*GS<B>
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @copy_from_temp : $@convention(thin) <B> (@inout GS<B>, @inout GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%8 = builtin "cmp_slt_Int64"(%2 : $Builtin.Int64, %2 : $Builtin.Int64) : $Builtin.Int1
|
|
copy_addr %4 to [initialization] %0 : $*GS<B>
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @copy_back_to_src
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>):
|
|
// CHECK-NEXT: struct_element_addr %1
|
|
// CHECK-NEXT: load
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @copy_back_to_src : $@convention(thin) <B> (@in GS<B>, @inout GS<B>) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>):
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
copy_addr %4 to %1 : $*GS<B>
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// Currently not supported
|
|
// CHECK-LABEL: sil @take_from_temp
|
|
// CHECK: bb0(%0 : $*B, %1 : $*GS<B>):
|
|
// CHECK-NEXT: alloc_stack
|
|
// CHECK-NEXT: copy_addr
|
|
// CHECK-NEXT: struct_element_addr
|
|
// CHECK-NEXT: copy_addr
|
|
// CHECK-NEXT: dealloc_stack
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @take_from_temp : $@convention(thin) <B> (@inout B, @inout GS<B>) -> () {
|
|
bb0(%0 : $*B, %1 : $*GS<B>):
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%7 = struct_element_addr %4 : $*GS<B>, #GS._base
|
|
copy_addr [take] %7 to %0: $*B
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @load_in_wrong_block
|
|
// CHECK: bb0(%0 : $*GS<B>):
|
|
// CHECK-NEXT: alloc_stack
|
|
// CHECK-NEXT: copy_addr
|
|
// CHECK-NEXT: struct_element_addr
|
|
// CHECK-NEXT: br bb1
|
|
// CHECK: return
|
|
sil @load_in_wrong_block : $@convention(thin) <B> (@in GS<B>) -> () {
|
|
bb0(%0 : $*GS<B>):
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %0 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
br bb1
|
|
|
|
bb1:
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @projection_in_wrong_block
|
|
// CHECK: bb0(%0 : $*GS<B>):
|
|
// CHECK-NEXT: alloc_stack
|
|
// CHECK-NEXT: copy_addr
|
|
// CHECK-NEXT: br bb1
|
|
// CHECK: return
|
|
sil @projection_in_wrong_block : $@convention(thin) <B> (@in GS<B>) -> () {
|
|
bb0(%0 : $*GS<B>):
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %0 to [initialization] %4 : $*GS<B>
|
|
br bb1
|
|
|
|
bb1:
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @store_after_load
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: [[A1:%.*]] = struct_element_addr %1
|
|
// CHECK-NEXT: [[A2:%.*]] = struct_element_addr %1
|
|
// CHECK-NEXT: load [[A2]]
|
|
// CHECK-NEXT: store %2 to [[A1]]
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @store_after_load : $@convention(thin) <B> (@in GS<B>, @inout GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%3 = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
store %2 to %3 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @store_after_two_loads
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: [[A1:%.*]] = struct_element_addr %1
|
|
// CHECK-NEXT: [[A2:%.*]] = struct_element_addr %1
|
|
// CHECK-NEXT: load [[A2]]
|
|
// CHECK-NEXT: load [[A2]]
|
|
// CHECK-NEXT: store %2 to [[A1]]
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @store_after_two_loads : $@convention(thin) <B> (@in GS<B>, @inout GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%3 = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = load %6 : $*Builtin.Int64
|
|
store %2 to %3 : $*Builtin.Int64
|
|
%9 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %8 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @store_before_load
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: struct_element_addr %1
|
|
// CHECK-NEXT: [[T:%.*]] = alloc_stack
|
|
// CHECK-NEXT: copy_addr %1 to [initialization] [[T]]
|
|
// CHECK-NEXT: [[A:%.*]] = struct_element_addr [[T]]
|
|
// CHECK-NEXT: store
|
|
// CHECK-NEXT: load [[A]]
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: destroy_addr [[T]]
|
|
// CHECK-NEXT: dealloc_stack [[T]]
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @store_before_load : $@convention(thin) <B> (@in GS<B>, @inout GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%3 = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
store %2 to %3 : $*Builtin.Int64
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @store_between_loads
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: struct_element_addr %1
|
|
// CHECK-NEXT: [[T:%.*]] = alloc_stack
|
|
// CHECK-NEXT: copy_addr %1 to [initialization] [[T]]
|
|
// CHECK-NEXT: [[A:%.*]] = struct_element_addr [[T]]
|
|
// CHECK-NEXT: load [[A]]
|
|
// CHECK-NEXT: store
|
|
// CHECK-NEXT: load [[A]]
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: destroy_addr [[T]]
|
|
// CHECK-NEXT: dealloc_stack [[T]]
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @store_between_loads : $@convention(thin) <B> (@in GS<B>, @inout GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%3 = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%7 = load %6 : $*Builtin.Int64
|
|
store %2 to %3 : $*Builtin.Int64
|
|
%8 = load %6 : $*Builtin.Int64
|
|
%9 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %8 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// CHECK-LABEL: sil @potential_store_before_load
|
|
// CHECK: bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
// CHECK-NEXT: struct_element_addr %1
|
|
// CHECK-NEXT: [[T:%.*]] = alloc_stack
|
|
// CHECK-NEXT: copy_addr %1 to [initialization] [[T]]
|
|
// CHECK-NEXT: [[A:%.*]] = struct_element_addr [[T]]
|
|
// CHECK: apply
|
|
// CHECK-NEXT: load [[A]]
|
|
// CHECK-NEXT: builtin
|
|
// CHECK-NEXT: destroy_addr [[T]]
|
|
// CHECK-NEXT: dealloc_stack [[T]]
|
|
// CHECK-NEXT: tuple
|
|
// CHECK-NEXT: return
|
|
sil @potential_store_before_load : $@convention(thin) <B> (@in GS<B>, @inout_aliasable GS<B>, Builtin.Int64) -> () {
|
|
bb0(%0 : $*GS<B>, %1 : $*GS<B>, %2 : $Builtin.Int64):
|
|
%3 = struct_element_addr %1 : $*GS<B>, #GS._value
|
|
%4 = alloc_stack $GS<B>
|
|
copy_addr %1 to [initialization] %4 : $*GS<B>
|
|
%6 = struct_element_addr %4 : $*GS<B>, #GS._value
|
|
%f = function_ref @unknown : $@convention(thin) () -> ()
|
|
%a = apply %f() : $@convention(thin) () -> ()
|
|
%7 = load %6 : $*Builtin.Int64
|
|
%8 = builtin "cmp_slt_Int64"(%7 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
|
|
destroy_addr %4 : $*GS<B>
|
|
dealloc_stack %4 : $*GS<B>
|
|
%9999 = tuple()
|
|
return %9999 : $()
|
|
}
|
|
|
|
// Test temp RValue elimination on switches.
|
|
// CHECK-LABEL: sil @rvalueSwitch
|
|
// CHECK: bb1:
|
|
// CHECK-NEXT: struct_element_addr %1
|
|
// CHECK-NEXT: load
|
|
// CHECK-NOT: alloc_stack $UnfoldSequence
|
|
// CHECK: return
|
|
sil @rvalueSwitch : $@convention(method) <Element, State> (@inout UnfoldSequence<Element, State>) -> @out Optional<Element> {
|
|
bb0(%0 : $*Optional<Element>, %1 : $*UnfoldSequence<Element, State>):
|
|
%2 = struct_element_addr %1 : $*UnfoldSequence<Element, State>, #UnfoldSequence._done
|
|
%3 = struct_element_addr %2 : $*Bool, #Bool._value
|
|
%4 = load %3 : $*Builtin.Int1
|
|
cond_br %4, bb4, bb1
|
|
|
|
bb1:
|
|
%6 = alloc_stack $UnfoldSequence<Element, State>
|
|
copy_addr %1 to [initialization] %6 : $*UnfoldSequence<Element, State>
|
|
%8 = struct_element_addr %6 : $*UnfoldSequence<Element, State>, #UnfoldSequence._next
|
|
%9 = load %8 : $*@callee_guaranteed (@inout State) -> @out Optional<Element>
|
|
%10 = alloc_stack $Optional<Element>
|
|
%11 = struct_element_addr %1 : $*UnfoldSequence<Element, State>, #UnfoldSequence._state
|
|
strong_retain %9 : $@callee_guaranteed (@inout State) -> @out Optional<Element>
|
|
%13 = apply %9(%10, %11) : $@callee_guaranteed (@inout State) -> @out Optional<Element>
|
|
switch_enum_addr %10 : $*Optional<Element>, case #Optional.some!enumelt.1: bb3, case #Optional.none!enumelt: bb2
|
|
|
|
bb2:
|
|
destroy_addr %10 : $*Optional<Element>
|
|
dealloc_stack %10 : $*Optional<Element>
|
|
destroy_addr %6 : $*UnfoldSequence<Element, State>
|
|
dealloc_stack %6 : $*UnfoldSequence<Element, State>
|
|
%19 = integer_literal $Builtin.Int1, -1
|
|
%20 = struct $Bool (%19 : $Builtin.Int1)
|
|
store %20 to %2 : $*Bool
|
|
%22 = alloc_stack $Optional<Element>
|
|
inject_enum_addr %22 : $*Optional<Element>, #Optional.none!enumelt
|
|
copy_addr [take] %22 to [initialization] %0 : $*Optional<Element>
|
|
dealloc_stack %22 : $*Optional<Element>
|
|
br bb5
|
|
|
|
bb3:
|
|
%27 = unchecked_take_enum_data_addr %10 : $*Optional<Element>, #Optional.some!enumelt.1
|
|
%28 = init_enum_data_addr %0 : $*Optional<Element>, #Optional.some!enumelt.1
|
|
copy_addr [take] %27 to [initialization] %28 : $*Element
|
|
dealloc_stack %10 : $*Optional<Element>
|
|
destroy_addr %6 : $*UnfoldSequence<Element, State>
|
|
dealloc_stack %6 : $*UnfoldSequence<Element, State>
|
|
inject_enum_addr %0 : $*Optional<Element>, #Optional.some!enumelt.1
|
|
br bb5
|
|
|
|
bb4:
|
|
%35 = alloc_stack $Optional<Element>
|
|
inject_enum_addr %35 : $*Optional<Element>, #Optional.none!enumelt
|
|
copy_addr [take] %35 to [initialization] %0 : $*Optional<Element>
|
|
dealloc_stack %35 : $*Optional<Element>
|
|
br bb5
|
|
|
|
bb5:
|
|
%40 = tuple ()
|
|
return %40 : $()
|
|
}
|