// RUN: %target-swift-frontend -assume-parsing-unqualified-ownership-sil -parse-stdlib -primary-file %s -emit-ir -o - -disable-objc-attr-requires-foundation-module | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-runtime // REQUIRES: CPU=x86_64 import Swift // CHECK-DAG: [[REFCOUNT:%swift.refcounted.*]] = type // CHECK-DAG: [[X:%T8builtins1XC]] = type // CHECK-DAG: [[Y:%T8builtins1YC]] = type typealias Int = Builtin.Int32 typealias Bool = Builtin.Int1 // CHECK: call swiftcc void @swift_errorInMain( infix operator * { associativity left precedence 200 } infix operator / { associativity left precedence 200 } infix operator % { associativity left precedence 200 } infix operator + { associativity left precedence 190 } infix operator - { associativity left precedence 190 } infix operator << { associativity none precedence 180 } infix operator >> { associativity none precedence 180 } infix operator ... { associativity none precedence 175 } infix operator < { associativity none precedence 170 } infix operator <= { associativity none precedence 170 } infix operator > { associativity none precedence 170 } infix operator >= { associativity none precedence 170 } infix operator == { associativity none precedence 160 } infix operator != { associativity none precedence 160 } func * (lhs: Int, rhs: Int) -> Int { return Builtin.mul_Int32(lhs, rhs) // CHECK: mul i32 } func / (lhs: Int, rhs: Int) -> Int { return Builtin.sdiv_Int32(lhs, rhs) // CHECK: sdiv i32 } func % (lhs: Int, rhs: Int) -> Int { return Builtin.srem_Int32(lhs, rhs) // CHECK: srem i32 } func + (lhs: Int, rhs: Int) -> Int { return Builtin.add_Int32(lhs, rhs) // CHECK: add i32 } func - (lhs: Int, rhs: Int) -> Int { return Builtin.sub_Int32(lhs, rhs) // CHECK: sub i32 } // In C, 180 is <<, >> func < (lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_slt_Int32(lhs, rhs) // CHECK: icmp slt i32 } func > (lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_sgt_Int32(lhs, rhs) // CHECK: icmp sgt i32 } func <=(lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_sle_Int32(lhs, rhs) // CHECK: icmp sle i32 } func >=(lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_sge_Int32(lhs, rhs) // CHECK: icmp sge i32 } func ==(lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_eq_Int32(lhs, rhs) // CHECK: icmp eq i32 } func !=(lhs: Int, rhs: Int) -> Bool { return Builtin.cmp_ne_Int32(lhs, rhs) // CHECK: icmp ne i32 } func gepRaw_test(_ ptr: Builtin.RawPointer, offset: Builtin.Int64) -> Builtin.RawPointer { return Builtin.gepRaw_Int64(ptr, offset) // CHECK: getelementptr inbounds i8, i8* } // CHECK: define hidden {{.*}}i64 @_T08builtins9load_test{{[_0-9a-zA-Z]*}}F func load_test(_ ptr: Builtin.RawPointer) -> Builtin.Int64 { // CHECK: [[CASTPTR:%.*]] = bitcast i8* [[PTR:%.*]] to i64* // CHECK-NEXT: load i64, i64* [[CASTPTR]] // CHECK: ret return Builtin.load(ptr) } // CHECK: define hidden {{.*}}i64 @_T08builtins13load_raw_test{{[_0-9a-zA-Z]*}}F func load_raw_test(_ ptr: Builtin.RawPointer) -> Builtin.Int64 { // CHECK: [[CASTPTR:%.*]] = bitcast i8* [[PTR:%.*]] to i64* // CHECK-NEXT: load i64, i64* [[CASTPTR]] // CHECK: ret return Builtin.loadRaw(ptr) } // CHECK: define hidden {{.*}}void @_T08builtins11assign_test{{[_0-9a-zA-Z]*}}F func assign_test(_ value: Builtin.Int64, ptr: Builtin.RawPointer) { Builtin.assign(value, ptr) // CHECK: ret } // CHECK: define hidden {{.*}}%swift.refcounted* @_T08builtins16load_object_test{{[_0-9a-zA-Z]*}}F func load_object_test(_ ptr: Builtin.RawPointer) -> Builtin.NativeObject { // CHECK: [[T0:%.*]] = load [[REFCOUNT]]*, [[REFCOUNT]]** // CHECK: call [[REFCOUNT]]* @swift_rt_swift_retain([[REFCOUNT]]* returned [[T0]]) // CHECK: ret [[REFCOUNT]]* [[T0]] return Builtin.load(ptr) } // CHECK: define hidden {{.*}}%swift.refcounted* @_T08builtins20load_raw_object_test{{[_0-9a-zA-Z]*}}F func load_raw_object_test(_ ptr: Builtin.RawPointer) -> Builtin.NativeObject { // CHECK: [[T0:%.*]] = load [[REFCOUNT]]*, [[REFCOUNT]]** // CHECK: call [[REFCOUNT]]* @swift_rt_swift_retain([[REFCOUNT]]* returned [[T0]]) // CHECK: ret [[REFCOUNT]]* [[T0]] return Builtin.loadRaw(ptr) } // CHECK: define hidden {{.*}}void @_T08builtins18assign_object_test{{[_0-9a-zA-Z]*}}F func assign_object_test(_ value: Builtin.NativeObject, ptr: Builtin.RawPointer) { Builtin.assign(value, ptr) } // CHECK: define hidden {{.*}}void @_T08builtins16init_object_test{{[_0-9a-zA-Z]*}}F func init_object_test(_ value: Builtin.NativeObject, ptr: Builtin.RawPointer) { // CHECK: [[DEST:%.*]] = bitcast i8* {{%.*}} to %swift.refcounted** // CHECK-NEXT: store [[REFCOUNT]]* {{%.*}}, [[REFCOUNT]]** [[DEST]] Builtin.initialize(value, ptr) } func cast_test(_ ptr: inout Builtin.RawPointer, i8: inout Builtin.Int8, i64: inout Builtin.Int64, f: inout Builtin.FPIEEE32, d: inout Builtin.FPIEEE64 ) { // CHECK: cast_test i8 = Builtin.trunc_Int64_Int8(i64) // CHECK: trunc i64 = Builtin.zext_Int8_Int64(i8) // CHECK: zext i64 = Builtin.sext_Int8_Int64(i8) // CHECK: sext i64 = Builtin.ptrtoint_Int64(ptr) // CHECK: ptrtoint ptr = Builtin.inttoptr_Int64(i64) // CHECK: inttoptr i64 = Builtin.fptoui_FPIEEE64_Int64(d) // CHECK: fptoui i64 = Builtin.fptosi_FPIEEE64_Int64(d) // CHECK: fptosi d = Builtin.uitofp_Int64_FPIEEE64(i64) // CHECK: uitofp d = Builtin.sitofp_Int64_FPIEEE64(i64) // CHECK: sitofp d = Builtin.fpext_FPIEEE32_FPIEEE64(f) // CHECK: fpext f = Builtin.fptrunc_FPIEEE64_FPIEEE32(d) // CHECK: fptrunc i64 = Builtin.bitcast_FPIEEE64_Int64(d) // CHECK: bitcast d = Builtin.bitcast_Int64_FPIEEE64(i64) // CHECK: bitcast } func intrinsic_test(_ i32: inout Builtin.Int32, i16: inout Builtin.Int16) { i32 = Builtin.int_bswap_Int32(i32) // CHECK: llvm.bswap.i32( i16 = Builtin.int_bswap_Int16(i16) // CHECK: llvm.bswap.i16( var x = Builtin.int_sadd_with_overflow_Int16(i16, i16) // CHECK: call { i16, i1 } @llvm.sadd.with.overflow.i16( Builtin.int_trap() // CHECK: llvm.trap() } // CHECK: define hidden {{.*}}void @_T08builtins19sizeof_alignof_testyyF() func sizeof_alignof_test() { // CHECK: store i64 4, i64* var xs = Builtin.sizeof(Int.self) // CHECK: store i64 4, i64* var xa = Builtin.alignof(Int.self) // CHECK: store i64 1, i64* var ys = Builtin.sizeof(Bool.self) // CHECK: store i64 1, i64* var ya = Builtin.alignof(Bool.self) } // CHECK: define hidden {{.*}}void @_T08builtins27generic_sizeof_alignof_testyyxlF func generic_sizeof_alignof_test(_: T) { // CHECK: [[T0:%.*]] = getelementptr inbounds i8*, i8** [[T:%.*]], i32 9 // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: [[SIZE:%.*]] = ptrtoint i8* [[T1]] to i64 // CHECK-NEXT: store i64 [[SIZE]], i64* [[S:%.*]] var s = Builtin.sizeof(T.self) // CHECK: [[T0:%.*]] = getelementptr inbounds i8*, i8** [[T:%.*]], i32 10 // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: [[T2:%.*]] = ptrtoint i8* [[T1]] to i64 // CHECK-NEXT: [[T3:%.*]] = and i64 [[T2]], 65535 // CHECK-NEXT: [[ALIGN:%.*]] = add i64 [[T3]], 1 // CHECK-NEXT: store i64 [[ALIGN]], i64* [[A:%.*]] var a = Builtin.alignof(T.self) } // CHECK: define hidden {{.*}}void @_T08builtins21generic_strideof_testyyxlF func generic_strideof_test(_: T) { // CHECK: [[T0:%.*]] = getelementptr inbounds i8*, i8** [[T:%.*]], i32 11 // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]] // CHECK-NEXT: [[STRIDE:%.*]] = ptrtoint i8* [[T1]] to i64 // CHECK-NEXT: store i64 [[STRIDE]], i64* [[S:%.*]] var s = Builtin.strideof(T.self) } class X {} class Y {} func move(_ ptr: Builtin.RawPointer) { var temp : Y = Builtin.take(ptr) // CHECK: define hidden {{.*}}void @_T08builtins4move{{[_0-9a-zA-Z]*}}F // CHECK: [[SRC:%.*]] = bitcast i8* {{%.*}} to [[Y]]** // CHECK-NEXT: [[VAL:%.*]] = load [[Y]]*, [[Y]]** [[SRC]] // CHECK-NEXT: store [[Y]]* [[VAL]], [[Y]]** {{%.*}} } func allocDealloc(_ size: Builtin.Word, align: Builtin.Word) { var ptr = Builtin.allocRaw(size, align) Builtin.deallocRaw(ptr, size, align) } func fence_test() { // CHECK: fence acquire Builtin.fence_acquire() // CHECK: fence syncscope("singlethread") acq_rel Builtin.fence_acqrel_singlethread() } func cmpxchg_test(_ ptr: Builtin.RawPointer, a: Builtin.Int32, b: Builtin.Int32) { // rdar://12939803 - ER: support atomic cmpxchg/xchg with pointers // CHECK: [[Z_RES:%.*]] = cmpxchg i32* {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire // CHECK: [[Z_VAL:%.*]] = extractvalue { i32, i1 } [[Z_RES]], 0 // CHECK: [[Z_SUCCESS:%.*]] = extractvalue { i32, i1 } [[Z_RES]], 1 // CHECK: store i32 [[Z_VAL]], i32* {{.*}}, align 4 // CHECK: store i1 [[Z_SUCCESS]], i1* {{.*}}, align 1 var (z, zSuccess) = Builtin.cmpxchg_acquire_acquire_Int32(ptr, a, b) // CHECK: [[Y_RES:%.*]] = cmpxchg volatile i32* {{.*}}, i32 {{.*}}, i32 {{.*}} monotonic monotonic // CHECK: [[Y_VAL:%.*]] = extractvalue { i32, i1 } [[Y_RES]], 0 // CHECK: [[Y_SUCCESS:%.*]] = extractvalue { i32, i1 } [[Y_RES]], 1 // CHECK: store i32 [[Y_VAL]], i32* {{.*}}, align 4 // CHECK: store i1 [[Y_SUCCESS]], i1* {{.*}}, align 1 var (y, ySuccess) = Builtin.cmpxchg_monotonic_monotonic_volatile_Int32(ptr, a, b) // CHECK: [[X_RES:%.*]] = cmpxchg volatile i32* {{.*}}, i32 {{.*}}, i32 {{.*}} syncscope("singlethread") acquire monotonic // CHECK: [[X_VAL:%.*]] = extractvalue { i32, i1 } [[X_RES]], 0 // CHECK: [[X_SUCCESS:%.*]] = extractvalue { i32, i1 } [[X_RES]], 1 // CHECK: store i32 [[X_VAL]], i32* {{.*}}, align 4 // CHECK: store i1 [[X_SUCCESS]], i1* {{.*}}, align 1 var (x, xSuccess) = Builtin.cmpxchg_acquire_monotonic_volatile_singlethread_Int32(ptr, a, b) // CHECK: [[W_RES:%.*]] = cmpxchg volatile i64* {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst // CHECK: [[W_VAL:%.*]] = extractvalue { i64, i1 } [[W_RES]], 0 // CHECK: [[W_SUCCESS:%.*]] = extractvalue { i64, i1 } [[W_RES]], 1 // CHECK: [[W_VAL_PTR:%.*]] = inttoptr i64 [[W_VAL]] to i8* // CHECK: store i8* [[W_VAL_PTR]], i8** {{.*}}, align 8 // CHECK: store i1 [[W_SUCCESS]], i1* {{.*}}, align 1 var (w, wSuccess) = Builtin.cmpxchg_seqcst_seqcst_volatile_singlethread_RawPointer(ptr, ptr, ptr) // CHECK: [[V_RES:%.*]] = cmpxchg weak volatile i64* {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst // CHECK: [[V_VAL:%.*]] = extractvalue { i64, i1 } [[V_RES]], 0 // CHECK: [[V_SUCCESS:%.*]] = extractvalue { i64, i1 } [[V_RES]], 1 // CHECK: [[V_VAL_PTR:%.*]] = inttoptr i64 [[V_VAL]] to i8* // CHECK: store i8* [[V_VAL_PTR]], i8** {{.*}}, align 8 // CHECK: store i1 [[V_SUCCESS]], i1* {{.*}}, align 1 var (v, vSuccess) = Builtin.cmpxchg_seqcst_seqcst_weak_volatile_singlethread_RawPointer(ptr, ptr, ptr) } func atomicrmw_test(_ ptr: Builtin.RawPointer, a: Builtin.Int32, ptr2: Builtin.RawPointer) { // CHECK: atomicrmw add i32* {{.*}}, i32 {{.*}} acquire var z = Builtin.atomicrmw_add_acquire_Int32(ptr, a) // CHECK: atomicrmw volatile max i32* {{.*}}, i32 {{.*}} monotonic var y = Builtin.atomicrmw_max_monotonic_volatile_Int32(ptr, a) // CHECK: atomicrmw volatile xchg i32* {{.*}}, i32 {{.*}} syncscope("singlethread") acquire var x = Builtin.atomicrmw_xchg_acquire_volatile_singlethread_Int32(ptr, a) // rdar://12939803 - ER: support atomic cmpxchg/xchg with pointers // CHECK: atomicrmw volatile xchg i64* {{.*}}, i64 {{.*}} syncscope("singlethread") acquire var w = Builtin.atomicrmw_xchg_acquire_volatile_singlethread_RawPointer(ptr, ptr2) } func addressof_test(_ a: inout Int, b: inout Bool) { // CHECK: bitcast i32* {{.*}} to i8* var ap : Builtin.RawPointer = Builtin.addressof(&a) // CHECK: bitcast i1* {{.*}} to i8* var bp : Builtin.RawPointer = Builtin.addressof(&b) } func fneg_test(_ half: Builtin.FPIEEE16, single: Builtin.FPIEEE32, double: Builtin.FPIEEE64) -> (Builtin.FPIEEE16, Builtin.FPIEEE32, Builtin.FPIEEE64) { // CHECK: fsub half 0xH8000, {{%.*}} // CHECK: fsub float -0.000000e+00, {{%.*}} // CHECK: fsub double -0.000000e+00, {{%.*}} return (Builtin.fneg_FPIEEE16(half), Builtin.fneg_FPIEEE32(single), Builtin.fneg_FPIEEE64(double)) } // The call to the builtins should get removed before we reach IRGen. func testStaticReport(_ b: Bool, ptr: Builtin.RawPointer) -> () { Builtin.staticReport(b, b, ptr); return Builtin.staticReport(b, b, ptr); } // CHECK-LABEL: define hidden {{.*}}void @_T08builtins12testCondFail{{[_0-9a-zA-Z]*}}F(i1, i1) func testCondFail(_ b: Bool, c: Bool) { // CHECK: br i1 %0, label %[[FAIL:.*]], label %[[CONT:.*]] Builtin.condfail(b) // CHECK: