Files
swift-mirror/test/SILOptimizer/optimize_never.sil
Nate Chandler e5d87f75a8 [SIL] Add source formal type to checked_cast_br.
It is necessary for opaque values where for casts that will newly start
out as checked_cast_brs and be lowered to checked_cast_addr_brs, since
the latter has the source formal type, IRGen relies on being able to
access it, and there's no way in general to obtain the source formal
type from the source lowered type.
2023-07-27 15:04:15 -07:00

497 lines
18 KiB
Plaintext

// RUN: %target-sil-opt -enable-sil-verify-all -inline -generic-specializer -inline %s > %t.sil
// RUN: %target-sil-opt -sil-full-demangle -enable-sil-verify-all -specdevirt %t.sil | %FileCheck %s
// Check that the @_optimize(none) annotation prevents
// any optimizations of the annotated function:
// - No functions are inlined into the annotated function.
// - No calls are devirtualized in the body of the annotated function.
// - No generic specializations of the annotated function are generated.
// - The annotated function is not inlined into any other function.
sil_stage canonical
import Builtin
import Swift
import SwiftShims
public class C {
@inline(never) func foo() -> Int32
deinit
init()
}
public class D : C {
@inline(never) override func foo() -> Int32
deinit
override init()
}
public class Base {
@_optimize(none) func foo() -> Int32
func boo() -> Int32
deinit
init()
}
public class Derived1 : Base {
override func foo() -> Int32
@_optimize(none) override func boo() -> Int32
deinit
override init()
}
public class Derived2 : Base {
override func foo() -> Int32
override func boo() -> Int32
deinit
override init()
}
public func transform1<T>(x: T) -> Int32
@_optimize(none) public func foo1<T>(x: T, _ c: C) -> Int32
public func boo1(c: C) -> Int32
public func transform2(x: Int32) -> Int32
@_optimize(none) public func foo2(x: Int32, _ c: C) -> Int32
public func boo2(c: C) -> Int32
public func transform3<T>(x: T) -> Int32
public func foo3<T>(x: T, _ c: C) -> Int32
public func boo3(c: C) -> Int32
public func transform4(x: Int32) -> Int32
public func foo4(x: Int32, _ c: C) -> Int32
public func boo4(c: C) -> Int32
sil_global [serialized] @$ss7ProcessO5_argcs5Int32VvZ : $Int32
sil_global [serialized] @globalinit_33_1BDF70FFC18749BAB495A73B459ED2F0_token5 : $Builtin.Word
sil_global [serialized] @$ss7ProcessO11_unsafeArgvSpySpys4Int8VGGvZ : $UnsafeMutablePointer<UnsafeMutablePointer<Int8>>
sil hidden [noinline] @$s14optimize_never1CC3foos5Int32VyF : $@convention(method) (@guaranteed C) -> Int32 {
bb0(%0 : $C):
%2 = integer_literal $Builtin.Int32, 1
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil [transparent] [serialized] @$ss5Int32V22_builtinIntegerLiteralABBI__tcfC : $@convention(thin) (Builtin.IntLiteral, @thin Int32.Type) -> Int32
sil @$s14optimize_never1CCfD : $@convention(method) (@owned C) -> () {
bb0(%0 : $C):
%2 = function_ref @$s14optimize_never1CCfd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%3 = apply %2(%0) : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $C
dealloc_ref %4 : $C
%6 = tuple ()
return %6 : $()
}
sil @$s14optimize_never1CCfd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject {
bb0(%0 : $C):
%2 = unchecked_ref_cast %0 : $C to $Builtin.NativeObject
return %2 : $Builtin.NativeObject
}
sil hidden @$s14optimize_never1CCACycfc : $@convention(method) (@owned C) -> @owned C {
bb0(%0 : $C):
return %0 : $C
}
sil hidden [noinline] @$s14optimize_never1DC3foos5Int32VyF : $@convention(method) (@guaranteed D) -> Int32 {
bb0(%0 : $D):
%2 = integer_literal $Builtin.Int32, 11
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil @$s14optimize_never1DCfD : $@convention(method) (@owned D) -> () {
bb0(%0 : $D):
%2 = function_ref @$s14optimize_never1DCfd : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject
%3 = apply %2(%0) : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject
%4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $D
dealloc_ref %4 : $D
%6 = tuple ()
return %6 : $()
}
sil @$s14optimize_never1DCfd : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject {
bb0(%0 : $D):
%2 = upcast %0 : $D to $C
%3 = function_ref @$s14optimize_never1CCfd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%4 = apply %3(%2) : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
return %4 : $Builtin.NativeObject
}
sil hidden @$s14optimize_never1DCACycfc : $@convention(method) (@owned D) -> @owned D {
bb0(%0 : $D):
%1 = alloc_stack $D
store %0 to %1 : $*D
%3 = upcast %0 : $D to $C
%4 = function_ref @$s14optimize_never1CCACycfc : $@convention(method) (@owned C) -> @owned C
%5 = apply %4(%3) : $@convention(method) (@owned C) -> @owned C
%6 = unchecked_ref_cast %5 : $C to $D
store %6 to %1 : $*D
dealloc_stack %1 : $*D
return %6 : $D
}
sil @$s14optimize_never10transform1ys5Int32VxlF : $@convention(thin) <T> (@in T) -> Int32 {
bb0(%0 : $*T):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
destroy_addr %0 : $*T
return %3 : $Int32
}
// Check that both calls are not inlined or devirtualized.
// CHECK-LABEL: sil [Onone] @$s14optimize_never4foo1ys5Int32Vx_AA1CCtlF
// CHECK-NOT: checked_cast_br
// CHECK: function_ref @$s14optimize_never10transform1ys5Int32VxlF
// CHECK: apply
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%.*}} : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
// CHECK: apply
// CHECK: return
sil [Onone] @$s14optimize_never4foo1ys5Int32Vx_AA1CCtlF : $@convention(thin) <T> (@in T, @owned C) -> Int32 {
bb0(%0 : $*T, %1 : $C):
%4 = function_ref @$s14optimize_never10transform1ys5Int32VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int32
%5 = alloc_stack $T
copy_addr %0 to [init] %5 : $*T
%7 = apply %4<T>(%5) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int32
%8 = class_method %1 : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
%9 = apply %8(%1) : $@convention(method) (@guaranteed C) -> Int32
%10 = struct_extract %7 : $Int32, #Int32._value
%11 = struct_extract %9 : $Int32, #Int32._value
%12 = integer_literal $Builtin.Int1, -1
%13 = builtin "sadd_with_overflow_Int32"(%10 : $Builtin.Int32, %11 : $Builtin.Int32, %12 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%14 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 0
%15 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %15 : $Builtin.Int1
%17 = struct $Int32 (%14 : $Builtin.Int32)
dealloc_stack %5 : $*T
strong_release %1 : $C
destroy_addr %0 : $*T
return %17 : $Int32
}
sil [transparent] [serialized] @$ss1poiys5Int32VAC_ACtFZ : $@convention(thin) (Int32, Int32) -> Int32
// CHECK-LABEL: sil @$s14optimize_never4boo1ys5Int32VAA1CCF
// CHECK-NOT: return
// This call should not be inlined, because the callee is annotated with @_optimize(none)
// CHECK: function_ref @$s14optimize_never4foo1ys5Int32Vx_AA1CCtlF
// CHECK: apply
// CHECK: return
sil @$s14optimize_never4boo1ys5Int32VAA1CCF : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @$s14optimize_never4foo1ys5Int32Vx_AA1CCtlF : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
%5 = alloc_stack $Int32
store %4 to %5 : $*Int32
strong_retain %0 : $C
%8 = apply %2<Int32>(%5, %0) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> Int32
dealloc_stack %5 : $*Int32
strong_release %0 : $C
return %8 : $Int32
}
sil [transparent] [serialized] @$sSi22_builtinIntegerLiteralSiBI__tcfC : $@convention(thin) (Builtin.IntLiteral, @thin Int.Type) -> Int
sil @$s14optimize_never10transform2ys5Int32VADF : $@convention(thin) (Int32) -> Int32 {
bb0(%0 : $Int32):
return %0 : $Int32
}
// Check that both calls are not inlined or devirtualized.
// CHECK-LABEL: sil [Onone] @$s14optimize_never4foo2ys5Int32VAD_AA1CCtF
// CHECK-NOT: checked_cast_br
// CHECK: function_ref @$s14optimize_never10transform2ys5Int32VADF
// CHECK: apply
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%.*}} : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
// CHECK: apply
// CHECK: return
sil [Onone] @$s14optimize_never4foo2ys5Int32VAD_AA1CCtF : $@convention(thin) (Int32, @owned C) -> Int32 {
bb0(%0 : $Int32, %1 : $C):
%4 = function_ref @$s14optimize_never10transform2ys5Int32VADF : $@convention(thin) (Int32) -> Int32
%5 = apply %4(%0) : $@convention(thin) (Int32) -> Int32
%6 = class_method %1 : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
%7 = apply %6(%1) : $@convention(method) (@guaranteed C) -> Int32
%8 = struct_extract %5 : $Int32, #Int32._value
%9 = struct_extract %7 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, -1
%11 = builtin "sadd_with_overflow_Int32"(%8 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%12 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 0
%13 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %13 : $Builtin.Int1
%15 = struct $Int32 (%12 : $Builtin.Int32)
strong_release %1 : $C
return %15 : $Int32
}
// CHECK-LABEL: sil @$s14optimize_never4boo2ys5Int32VAA1CCF
// CHECK-NOT: return
// This call should not be inlined, because the callee is annotated with @_optimize(none)
// CHECK: function_ref @$s14optimize_never4foo2ys5Int32VAD_AA1CCtF
// CHECK: apply
// CHECK: return
sil @$s14optimize_never4boo2ys5Int32VAA1CCF : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @$s14optimize_never4foo2ys5Int32VAD_AA1CCtF : $@convention(thin) (Int32, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
strong_retain %0 : $C
%6 = apply %2(%4, %0) : $@convention(thin) (Int32, @owned C) -> Int32
strong_release %0 : $C
return %6 : $Int32
}
// Check that no generic specialization was produced for foo1 because it is excluded from optimizations.
// CHECK-NOT: generic specialization <Swift.Int> of {{.*}}.foo1
// But foo3 was specialized, because it is not annotated with @_optimize(none)
// CHECK: generic specialization <Swift.Int32> of {{.*}}.foo3
sil @$s14optimize_never10transform3ys5Int32VxlF : $@convention(thin) <T> (@in T) -> Int32 {
bb0(%0 : $*T):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
destroy_addr %0 : $*T
return %3 : $Int32
}
sil @$s14optimize_never4foo3ys5Int32Vx_AA1CCtlF : $@convention(thin) <T> (@in T, @owned C) -> Int32 {
bb0(%0 : $*T, %1 : $C):
%4 = function_ref @$s14optimize_never10transform3ys5Int32VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int32
%5 = alloc_stack $T
copy_addr %0 to [init] %5 : $*T
%7 = apply %4<T>(%5) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int32
%8 = class_method %1 : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
%9 = apply %8(%1) : $@convention(method) (@guaranteed C) -> Int32
%10 = struct_extract %7 : $Int32, #Int32._value
%11 = struct_extract %9 : $Int32, #Int32._value
%12 = integer_literal $Builtin.Int1, -1
%13 = builtin "sadd_with_overflow_Int32"(%10 : $Builtin.Int32, %11 : $Builtin.Int32, %12 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%14 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 0
%15 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %15 : $Builtin.Int1
%17 = struct $Int32 (%14 : $Builtin.Int32)
dealloc_stack %5 : $*T
strong_release %1 : $C
destroy_addr %0 : $*T
return %17 : $Int32
}
// Everything in this function should get devirtualized, inlined and folded together,
// because neither this function nor any of its callees are annotated to be
// excluded from the optimization.
// CHECK-LABEL: sil @$s14optimize_never4boo3ys5Int32VAA1CCF
// CHECK: bb0
// CHECK-NOT: function_ref @$s14optimize_never4foo3ys5Int32Vx_AA1CCtlF
// Presence of checked_cast_br indicates that the speculative devirtualization
// was performed. And this is only possible, if the original call is inlined.
// CHECK: checked_cast_br [exact] C in {{%.*}} : $C to C, bb2, bb3
// CHECK: return
sil @$s14optimize_never4boo3ys5Int32VAA1CCF : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @$s14optimize_never4foo3ys5Int32Vx_AA1CCtlF : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
%5 = alloc_stack $Int32
store %4 to %5 : $*Int32
strong_retain %0 : $C
%8 = apply %2<Int32>(%5, %0) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> Int32
dealloc_stack %5 : $*Int32
strong_release %0 : $C
return %8 : $Int32
}
sil @$s14optimize_never10transform4ys5Int32VADF : $@convention(thin) (Int32) -> Int32 {
bb0(%0 : $Int32):
return %0 : $Int32
}
sil @$s14optimize_never4foo4ys5Int32VAD_AA1CCtF : $@convention(thin) (Int32, @owned C) -> Int32 {
bb0(%0 : $Int32, %1 : $C):
%4 = function_ref @$s14optimize_never10transform4ys5Int32VADF : $@convention(thin) (Int32) -> Int32
%5 = apply %4(%0) : $@convention(thin) (Int32) -> Int32
%6 = class_method %1 : $C, #C.foo : (C) -> () -> Int32, $@convention(method) (@guaranteed C) -> Int32
%7 = apply %6(%1) : $@convention(method) (@guaranteed C) -> Int32
%8 = struct_extract %5 : $Int32, #Int32._value
%9 = struct_extract %7 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, -1
%11 = builtin "sadd_with_overflow_Int32"(%8 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%12 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 0
%13 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %13 : $Builtin.Int1
%15 = struct $Int32 (%12 : $Builtin.Int32)
strong_release %1 : $C
return %15 : $Int32
}
// Everything in this function should get devirtualized and inlined,
// because neither this function nor any of its callees are annotated to be
// excluded from the optimization.
// CHECK-LABEL: sil @$s14optimize_never4boo4ys5Int32VAA1CCF
// CHECK: bb0
// CHECK-NOT: function_ref @$s14optimize_never4foo4ys5Int32VAD_AA1CCtF
// Presence of checked_cast_br indicates that the speculative devirtualization
// was performed. And this is only possible, if the original call is inlined.
// CHECK: checked_cast_br [exact] C in {{%.*}} : $C to C, bb2, bb3
// CHECK: return
sil @$s14optimize_never4boo4ys5Int32VAA1CCF : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @$s14optimize_never4foo4ys5Int32VAD_AA1CCtF : $@convention(thin) (Int32, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
strong_retain %0 : $C
%6 = apply %2(%4, %0) : $@convention(thin) (Int32, @owned C) -> Int32
strong_release %0 : $C
return %6 : $Int32
}
sil hidden [Onone] @$s14optimize_never4BaseC3foos5Int32VyF : $@convention(method) (@guaranteed Base) -> Int32 {
bb0(%0 : $Base):
%2 = integer_literal $Builtin.Int32, 1
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @$s14optimize_never4BaseC3boos5Int32VyF : $@convention(method) (@guaranteed Base) -> Int32 {
bb0(%0 : $Base):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @$s14optimize_never8Derived1C3foos5Int32VyF : $@convention(method) (@guaranteed Derived1) -> Int32 {
bb0(%0 : $Derived1):
%2 = integer_literal $Builtin.Int32, 3
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden [Onone] @$s14optimize_never8Derived1C3boos5Int32VyF : $@convention(method) (@guaranteed Derived1) -> Int32 {
bb0(%0 : $Derived1):
%2 = integer_literal $Builtin.Int32, 4
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @$s14optimize_never8Derived2C3foos5Int32VyF : $@convention(method) (@guaranteed Derived2) -> Int32 {
bb0(%0 : $Derived2):
%2 = integer_literal $Builtin.Int32, 5
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @$s14optimize_never8Derived2C3boos5Int32VyF : $@convention(method) (@guaranteed Derived2) -> Int32 {
bb0(%0 : $Derived2):
%2 = integer_literal $Builtin.Int32, 6
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
// Check that class_method is not speculatively devirtualized, because the
// method foo in the class Base, which mentioned in the class_method
// instruction is marked with @_optimize(none).
//
// CHECK-LABEL: sil @$s14optimize_never19testDoNotDevirtBase1os5Int32VAA0G0C_tF
// CHECK: bb0
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%[0-9]+}} : $Base, #Base.foo : (Base) -> () -> Int32
// CHECK-NOT: checked_cast_br
// CHECK: return
sil @$s14optimize_never19testDoNotDevirtBase1os5Int32VAA0G0C_tF : $@convention(thin) (@owned Base) -> Int32 {
bb0(%0 : $Base):
%2 = class_method %0 : $Base, #Base.foo : (Base) -> () -> Int32, $@convention(method) (@guaranteed Base) -> Int32
%3 = apply %2(%0) : $@convention(method) (@guaranteed Base) -> Int32
strong_release %0 : $Base
return %3 : $Int32
}
// Check that Derived1.boo is not devirtualized, because this method
// is marked with @_optimize(none) and thus should not
// participate in speculative devirtualization.
//
// CHECK-LABEL: sil @$s14optimize_never23testDoNotDevirtDerived11os5Int32VAA4BaseC_tF
// CHECK: bb0
// CHECK: class_method {{%[0-9]+}} : $Base, #Base.boo : (Base) -> () -> Int32
// CHECK: checked_cast_br [exact] Base in {{%[0-9]+}} : $Base to Base
// CHECK: checked_cast_br [exact] Base in {{%[0-9]+}} : $Base to Derived2
// CHECK-NOT: class_method
// CHECK: }
sil @$s14optimize_never23testDoNotDevirtDerived11os5Int32VAA4BaseC_tF : $@convention(thin) (@owned Base) -> Int32 {
bb0(%0 : $Base):
%2 = class_method %0 : $Base, #Base.boo : (Base) -> () -> Int32, $@convention(method) (@guaranteed Base) -> Int32
%3 = apply %2(%0) : $@convention(method) (@guaranteed Base) -> Int32
strong_release %0 : $Base
return %3 : $Int32
}
sil_vtable C {
#C.foo: @$s14optimize_never1CC3foos5Int32VyF
#C.init!initializer: @$s14optimize_never1CCACycfc
#C.deinit!deallocator: @$s14optimize_never1CCfD
}
sil_vtable D {
#C.foo: @$s14optimize_never1DC3foos5Int32VyF [override]
#C.init!initializer: @$s14optimize_never1DCACycfc [override]
#D.deinit!deallocator: @$s14optimize_never1DCfD
}
sil_vtable Base {
#Base.foo: @$s14optimize_never4BaseC3foos5Int32VyF
#Base.boo: @$s14optimize_never4BaseC3boos5Int32VyF
}
sil_vtable Derived1 {
#Base.foo: @$s14optimize_never8Derived1C3foos5Int32VyF [override]
#Base.boo: @$s14optimize_never8Derived1C3boos5Int32VyF [override]
}
sil_vtable Derived2 {
#Base.foo: @$s14optimize_never8Derived2C3foos5Int32VyF [override]
#Base.boo: @$s14optimize_never8Derived2C3boos5Int32VyF [override]
}