// RUN: %target-swift-frontend %s -emit-ir | %FileCheck %s // REQUIRES: CPU=x86_64 sil_stage canonical import Builtin // This type has equal stride and size. struct SameSizeStride { var x, y: Builtin.Int32 } // This type has unequal stride and size. struct DifferentSizeStride { var x: Builtin.Int32, y: Builtin.Int16 } // CHECK: define{{( protected)?}} {{.*}}void @same_size_stride(%T8indexing14SameSizeStrideV* noalias nocapture dereferenceable({{.*}}) %0, i64 %1) {{.*}} { // CHECK: getelementptr inbounds %T8indexing14SameSizeStrideV, %T8indexing14SameSizeStrideV* %0, i64 %1 sil @same_size_stride : $@convention(thin) (@in SameSizeStride, Builtin.Word) -> () { entry(%p : $*SameSizeStride, %i: $Builtin.Word): %q = index_addr %p : $*SameSizeStride, %i : $Builtin.Word return undef : $() } // CHECK: define{{( protected)?}} {{.*}}void @different_size_stride(%T8indexing19DifferentSizeStrideV* noalias nocapture dereferenceable({{.*}}) %0, i64 %1) {{.*}} { // CHECK: %2 = bitcast %T8indexing19DifferentSizeStrideV* %0 to i8* // CHECK-NEXT: %3 = mul nsw i64 %1, 8 // CHECK-NEXT: %4 = getelementptr inbounds i8, i8* %2, i64 %3 // CHECK-NEXT: %5 = bitcast i8* %4 to %T8indexing19DifferentSizeStrideV* sil @different_size_stride : $@convention(thin) (@in DifferentSizeStride, Builtin.Word) -> () { entry(%p : $*DifferentSizeStride, %i: $Builtin.Word): %q = index_addr %p : $*DifferentSizeStride, %i : $Builtin.Word return undef : $() } // CHECK: define{{( protected)?}} {{.*}}void @zero_size(%swift.opaque* noalias nocapture %0, i64 %1) {{.*}} { // CHECK: %2 = bitcast %swift.opaque* %0 to i8* // CHECK-NEXT: %3 = mul nsw i64 %1, 1 // CHECK-NEXT: %4 = getelementptr inbounds i8, i8* %2, i64 %3 sil @zero_size : $@convention(thin) (@in (), Builtin.Word) -> () { entry(%p : $*(), %i: $Builtin.Word): %q = index_addr %p : $*(), %i : $Builtin.Word return undef : $() } // CHECK: define{{( protected)?}} {{.*}}void @dynamic_size(%swift.opaque* noalias nocapture %0, i64 %1, %swift.type* %T) {{.*}} { // CHECK: [[T0:%.*]] = bitcast %swift.type* %T to i8*** // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8**, i8*** [[T0]], i64 -1 // CHECK-NEXT: [[VWT:%T.valueWitnesses]] = load i8**, i8*** [[T1]], align 8 // CHECK: [[VWT_CAST:%.*]] = bitcast i8** [[VWT]] to %swift.vwtable* // CHECK: [[STRIDE_ADDR:%.*]] = getelementptr inbounds %swift.vwtable, %swift.vwtable* [[VWT_CAST]], i32 0, i32 9 // CHECK: [[STRIDE:%.*]] = load i64, i64* [[STRIDE_ADDR]] // CHECK-NEXT: [[T0:%.*]] = mul nsw i64 %1, [[STRIDE]] // CHECK-NEXT: getelementptr inbounds i8, i8* %2, i64 [[T0]] sil @dynamic_size : $@convention(thin) (@in T, Builtin.Word) -> () { entry(%p : $*T, %i: $Builtin.Word): %q = index_addr %p : $*T, %i : $Builtin.Word return undef : $() }