mirror of
https://github.com/apple/swift.git
synced 2025-12-21 12:14:44 +01:00
TLDR: This patch introduces a new kind of builtin, "a polymorphic builtin". One calls it like any other builtin, e.x.: ``` Builtin.generic_add(x, y) ``` but it has a contract: it must be specialized to a concrete builtin by the time we hit Lowered SIL. In this commit, I add support for the following generic operations: Type | Op ------------------------ FloatOrVector |FAdd FloatOrVector |FDiv FloatOrVector |FMul FloatOrVector |FRem FloatOrVector |FSub IntegerOrVector|AShr IntegerOrVector|Add IntegerOrVector|And IntegerOrVector|ExactSDiv IntegerOrVector|ExactUDiv IntegerOrVector|LShr IntegerOrVector|Mul IntegerOrVector|Or IntegerOrVector|SDiv IntegerOrVector|SRem IntegerOrVector|Shl IntegerOrVector|Sub IntegerOrVector|UDiv IntegerOrVector|Xor Integer |URem NOTE: I only implemented support for the builtins in SIL and in SILGen. I am going to implement the optimizer parts of this in a separate series of commits. DISCUSSION ---------- Today there are polymorphic like instructions in LLVM-IR. Yet, at the swift and SIL level we represent these operations instead as Builtins whose names are resolved by splatting the builtin into the name. For example, adding two things in LLVM: ``` %2 = add i64 %0, %1 %2 = add <2 x i64> %0, %1 %2 = add <4 x i64> %0, %1 %2 = add <8 x i64> %0, %1 ``` Each of the add operations are done by the same polymorphic instruction. In constrast, we splat out these Builtins in swift today, i.e.: ``` let x, y: Builtin.Int32 Builtin.add_Int32(x, y) let x, y: Builtin.Vec4xInt32 Builtin.add_Vec4xInt32(x, y) ... ``` In SIL, we translate these verbatim and then IRGen just lowers them to the appropriate polymorphic instruction. Beyond being verbose, these prevent these Builtins (which need static types) from being used in polymorphic contexts where we can guarantee that eventually a static type will be provided. In contrast, the polymorphic builtins introduced in this commit can be passed any type, with the proviso that the expert user using this feature can guarantee that before we reach Lowered SIL, the generic_add has been eliminated. This is enforced by IRGen asserting if passed such a builtin and by the SILVerifier checking that the underlying builtin is never called once the module is in Lowered SIL. In forthcoming commits, I am going to add two optimizations that give the stdlib tool writer the tools needed to use this builtin: 1. I am going to add an optimization to constant propagation that changes a "generic_*" op to the type of its argument if the argument is a type that is valid for the builtin (i.e. integer or vector). 2. I am going to teach the SILCloner how to specialize these as it inlines. This ensures that when we transparent inline, we specialize the builtin automatically and can then form SSA at -Onone using predictable memory access operations. The main implication around these polymorphic builtins are that if an author is not able to specialize the builtin, they need to ensure that after constant propagation, the generic builtin has been DCEed. The general rules are that the -Onone optimizer will constant fold branches with constant integer operands. So if one can use a bool of some sort to trigger the operation, one can be guaranteed that the code will not codegen. I am considering putting in some sort of diagnostic to ensure that the stdlib writer has a good experience (e.x. get an error instead of crashing the compiler).
1033 lines
41 KiB
C++
1033 lines
41 KiB
C++
//===--- GenBuiltin.cpp - IR Generation for calls to builtin functions ----===//
|
|
//
|
|
// This source file is part of the Swift.org open source project
|
|
//
|
|
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
|
|
// Licensed under Apache License v2.0 with Runtime Library Exception
|
|
//
|
|
// See https://swift.org/LICENSE.txt for license information
|
|
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements IR generation for the assorted operations that
|
|
// are performed by builtin functions.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "GenBuiltin.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
#include "llvm/IR/Module.h"
|
|
#include "llvm/ADT/StringSwitch.h"
|
|
#include "swift/AST/Builtins.h"
|
|
#include "swift/AST/Types.h"
|
|
#include "swift/SIL/SILModule.h"
|
|
#include "clang/AST/ASTContext.h"
|
|
|
|
#include "Explosion.h"
|
|
#include "GenCall.h"
|
|
#include "GenCast.h"
|
|
#include "GenIntegerLiteral.h"
|
|
#include "IRGenFunction.h"
|
|
#include "IRGenModule.h"
|
|
#include "LoadableTypeInfo.h"
|
|
|
|
using namespace swift;
|
|
using namespace irgen;
|
|
|
|
static void emitCastBuiltin(IRGenFunction &IGF, SILType destType,
|
|
Explosion &result,
|
|
Explosion &args,
|
|
llvm::Instruction::CastOps opcode) {
|
|
llvm::Value *input = args.claimNext();
|
|
assert(args.empty() && "wrong operands to cast operation");
|
|
|
|
llvm::Type *destTy = IGF.IGM.getStorageType(destType);
|
|
llvm::Value *output = IGF.Builder.CreateCast(opcode, input, destTy);
|
|
result.add(output);
|
|
}
|
|
|
|
static void emitCastOrBitCastBuiltin(IRGenFunction &IGF,
|
|
SILType destType,
|
|
Explosion &result,
|
|
Explosion &args,
|
|
BuiltinValueKind BV) {
|
|
llvm::Value *input = args.claimNext();
|
|
assert(args.empty() && "wrong operands to cast operation");
|
|
|
|
llvm::Type *destTy = IGF.IGM.getStorageType(destType);
|
|
llvm::Value *output;
|
|
switch (BV) {
|
|
default: llvm_unreachable("Not a cast-or-bitcast operation");
|
|
case BuiltinValueKind::TruncOrBitCast:
|
|
output = IGF.Builder.CreateTruncOrBitCast(input, destTy); break;
|
|
case BuiltinValueKind::ZExtOrBitCast:
|
|
output = IGF.Builder.CreateZExtOrBitCast(input, destTy); break;
|
|
case BuiltinValueKind::SExtOrBitCast:
|
|
output = IGF.Builder.CreateSExtOrBitCast(input, destTy); break;
|
|
}
|
|
result.add(output);
|
|
}
|
|
|
|
static void emitCompareBuiltin(IRGenFunction &IGF, Explosion &result,
|
|
Explosion &args, llvm::CmpInst::Predicate pred) {
|
|
llvm::Value *lhs = args.claimNext();
|
|
llvm::Value *rhs = args.claimNext();
|
|
|
|
llvm::Value *v;
|
|
if (lhs->getType()->isFPOrFPVectorTy())
|
|
v = IGF.Builder.CreateFCmp(pred, lhs, rhs);
|
|
else
|
|
v = IGF.Builder.CreateICmp(pred, lhs, rhs);
|
|
|
|
result.add(v);
|
|
}
|
|
|
|
static void emitTypeTraitBuiltin(IRGenFunction &IGF,
|
|
Explosion &out,
|
|
Explosion &args,
|
|
SubstitutionMap substitutions,
|
|
TypeTraitResult (TypeBase::*trait)()) {
|
|
assert(substitutions.getReplacementTypes().size() == 1
|
|
&& "type trait should have gotten single type parameter");
|
|
args.claimNext();
|
|
|
|
// Lower away the trait to a tristate 0 = no, 1 = yes, 2 = maybe.
|
|
unsigned result;
|
|
switch ((substitutions.getReplacementTypes()[0].getPointer()->*trait)()) {
|
|
case TypeTraitResult::IsNot:
|
|
result = 0;
|
|
break;
|
|
case TypeTraitResult::Is:
|
|
result = 1;
|
|
break;
|
|
case TypeTraitResult::CanBe:
|
|
result = 2;
|
|
break;
|
|
}
|
|
|
|
out.add(llvm::ConstantInt::get(IGF.IGM.Int8Ty, result));
|
|
}
|
|
|
|
static std::pair<SILType, const TypeInfo &>
|
|
getLoweredTypeAndTypeInfo(IRGenModule &IGM, Type unloweredType) {
|
|
auto lowered = IGM.getLoweredType(unloweredType);
|
|
return {lowered, IGM.getTypeInfo(lowered)};
|
|
}
|
|
|
|
/// emitBuiltinCall - Emit a call to a builtin function.
|
|
void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin,
|
|
Identifier FnId, SILType resultType,
|
|
Explosion &args, Explosion &out,
|
|
SubstitutionMap substitutions) {
|
|
if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteedEnd) {
|
|
// Just consume the incoming argument.
|
|
assert(args.size() == 1 && "Expecting one incoming argument");
|
|
(void)args.claimAll();
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteed) {
|
|
// Just forward the incoming argument.
|
|
assert(args.size() == 1 && "Expecting one incoming argument");
|
|
out = std::move(args);
|
|
// This is a token.
|
|
out.add(llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::OnFastPath) {
|
|
// The onFastPath builtin has only an effect on SIL level, so we lower it
|
|
// to a no-op.
|
|
return;
|
|
}
|
|
|
|
// These builtins don't care about their argument:
|
|
if (Builtin.ID == BuiltinValueKind::Sizeof) {
|
|
(void)args.claimAll();
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
out.add(valueTy.second.getSize(IGF, valueTy.first));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::Strideof) {
|
|
(void)args.claimAll();
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
out.add(valueTy.second.getStride(IGF, valueTy.first));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::Alignof) {
|
|
(void)args.claimAll();
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
// The alignof value is one greater than the alignment mask.
|
|
out.add(IGF.Builder.CreateAdd(
|
|
valueTy.second.getAlignmentMask(IGF, valueTy.first),
|
|
IGF.IGM.getSize(Size(1))));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::IsPOD) {
|
|
(void)args.claimAll();
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
out.add(valueTy.second.getIsPOD(IGF, valueTy.first));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::IsConcrete) {
|
|
(void)args.claimAll();
|
|
auto isConcrete = !substitutions.getReplacementTypes()[0]->hasArchetype();
|
|
out.add(llvm::ConstantInt::get(IGF.IGM.Int1Ty, isConcrete));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::IsBitwiseTakable) {
|
|
(void)args.claimAll();
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
out.add(valueTy.second.getIsBitwiseTakable(IGF, valueTy.first));
|
|
return;
|
|
}
|
|
|
|
// addressof expects an lvalue argument.
|
|
if (Builtin.ID == BuiltinValueKind::AddressOf) {
|
|
llvm::Value *address = args.claimNext();
|
|
llvm::Value *value = IGF.Builder.CreateBitCast(address,
|
|
IGF.IGM.Int8PtrTy);
|
|
out.add(value);
|
|
return;
|
|
}
|
|
|
|
// Everything else cares about the (rvalue) argument.
|
|
|
|
// If this is an LLVM IR intrinsic, lower it to an intrinsic call.
|
|
const IntrinsicInfo &IInfo = IGF.getSILModule().getIntrinsicInfo(FnId);
|
|
llvm::Intrinsic::ID IID = IInfo.ID;
|
|
|
|
// Emit non-mergeable traps only.
|
|
if (IGF.Builder.isTrapIntrinsic(IID)) {
|
|
IGF.Builder.CreateNonMergeableTrap(IGF.IGM, StringRef());
|
|
return;
|
|
}
|
|
|
|
// Calls to the int_instrprof_increment intrinsic are emitted during SILGen.
|
|
// At that stage, the function name GV used by the profiling pass is hidden.
|
|
// Fix the intrinsic call here by pointing it to the correct GV.
|
|
if (IID == llvm::Intrinsic::instrprof_increment) {
|
|
// If we import profiling intrinsics from a swift module but profiling is
|
|
// not enabled, ignore the increment.
|
|
SILModule &SILMod = IGF.getSILModule();
|
|
const auto &Opts = SILMod.getOptions();
|
|
if (!Opts.GenerateProfile) {
|
|
(void)args.claimAll();
|
|
return;
|
|
}
|
|
|
|
// Extract the PGO function name.
|
|
auto *NameGEP = cast<llvm::User>(args.claimNext());
|
|
auto *NameGV = dyn_cast<llvm::GlobalVariable>(NameGEP->stripPointerCasts());
|
|
|
|
// TODO: The SIL optimizer may rewrite the name argument in a way that
|
|
// makes it impossible to lower. Until that issue is fixed, defensively
|
|
// refuse to lower ill-formed intrinsics (rdar://39146527).
|
|
if (!NameGV) {
|
|
(void)args.claimAll();
|
|
return;
|
|
}
|
|
|
|
auto *NameC = NameGV->getInitializer();
|
|
StringRef Name = cast<llvm::ConstantDataArray>(NameC)->getRawDataValues();
|
|
StringRef PGOFuncName = Name.rtrim(StringRef("\0", 1));
|
|
|
|
// Point the increment call to the right function name variable.
|
|
std::string PGOFuncNameVar = llvm::getPGOFuncNameVarName(
|
|
PGOFuncName, llvm::GlobalValue::LinkOnceAnyLinkage);
|
|
auto *FuncNamePtr = IGF.IGM.Module.getNamedGlobal(PGOFuncNameVar);
|
|
if (!FuncNamePtr)
|
|
FuncNamePtr = llvm::createPGOFuncNameVar(
|
|
*IGF.IGM.getModule(), llvm::GlobalValue::LinkOnceAnyLinkage,
|
|
PGOFuncName);
|
|
|
|
llvm::SmallVector<llvm::Value *, 2> Indices(2, NameGEP->getOperand(1));
|
|
NameGEP = llvm::ConstantExpr::getGetElementPtr(
|
|
((llvm::PointerType *)FuncNamePtr->getType())->getElementType(),
|
|
FuncNamePtr, makeArrayRef(Indices));
|
|
|
|
// Replace the placeholder value with the new GEP.
|
|
Explosion replacement;
|
|
replacement.add(NameGEP);
|
|
replacement.add(args.claimAll());
|
|
args = std::move(replacement);
|
|
}
|
|
|
|
if (IID != llvm::Intrinsic::not_intrinsic) {
|
|
SmallVector<llvm::Type*, 4> ArgTys;
|
|
for (auto T : IInfo.Types)
|
|
ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(T->getCanonicalType()));
|
|
|
|
auto F = llvm::Intrinsic::getDeclaration(&IGF.IGM.Module,
|
|
(llvm::Intrinsic::ID)IID, ArgTys);
|
|
llvm::FunctionType *FT = F->getFunctionType();
|
|
SmallVector<llvm::Value*, 8> IRArgs;
|
|
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
|
|
IRArgs.push_back(args.claimNext());
|
|
llvm::Value *TheCall = IGF.Builder.CreateCall(F, IRArgs);
|
|
|
|
if (!TheCall->getType()->isVoidTy())
|
|
extractScalarResults(IGF, TheCall->getType(), TheCall, out);
|
|
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::StringObjectOr) {
|
|
llvm::Value *lhs = args.claimNext();
|
|
llvm::Value *rhs = args.claimNext();
|
|
llvm::Value *v = IGF.Builder.CreateOr(lhs, rhs);
|
|
return out.add(v);
|
|
}
|
|
|
|
// TODO: A linear series of ifs is suboptimal.
|
|
#define BUILTIN_SIL_OPERATION(id, name, overload) \
|
|
if (Builtin.ID == BuiltinValueKind::id) \
|
|
llvm_unreachable(name " builtin should be lowered away by SILGen!");
|
|
|
|
#define BUILTIN_CAST_OPERATION(id, name, attrs) \
|
|
if (Builtin.ID == BuiltinValueKind::id) \
|
|
return emitCastBuiltin(IGF, resultType, out, args, \
|
|
llvm::Instruction::id);
|
|
|
|
#define BUILTIN_CAST_OR_BITCAST_OPERATION(id, name, attrs) \
|
|
if (Builtin.ID == BuiltinValueKind::id) \
|
|
return emitCastOrBitCastBuiltin(IGF, resultType, out, args, \
|
|
BuiltinValueKind::id);
|
|
|
|
#define BUILTIN_BINARY_OPERATION_OVERLOADED_STATIC(id, name, attrs, overload) \
|
|
if (Builtin.ID == BuiltinValueKind::id) { \
|
|
llvm::Value *lhs = args.claimNext(); \
|
|
llvm::Value *rhs = args.claimNext(); \
|
|
llvm::Value *v = IGF.Builder.Create##id(lhs, rhs); \
|
|
return out.add(v); \
|
|
}
|
|
#define BUILTIN_BINARY_OPERATION_POLYMORPHIC(id, name, attrs) \
|
|
assert(Builtin.ID != BuiltinValueKind::id && \
|
|
"This builtin should never be seen by IRGen. It is invalid in " \
|
|
"Lowered sil");
|
|
|
|
#define BUILTIN_RUNTIME_CALL(id, name, attrs) \
|
|
if (Builtin.ID == BuiltinValueKind::id) { \
|
|
auto *fn = cast<llvm::Function>(IGF.IGM.get##id##Fn()); \
|
|
llvm::CallInst *call = IGF.Builder.CreateCall(fn, args.claimNext()); \
|
|
call->setCallingConv(fn->getCallingConv()); \
|
|
call->setAttributes(fn->getAttributes()); \
|
|
return out.add(call); \
|
|
}
|
|
|
|
#define BUILTIN_BINARY_OPERATION_WITH_OVERFLOW(id, name, uncheckedID, attrs, overload) \
|
|
if (Builtin.ID == BuiltinValueKind::id) { \
|
|
SmallVector<llvm::Type*, 2> ArgTys; \
|
|
auto opType = Builtin.Types[0]->getCanonicalType(); \
|
|
ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(opType)); \
|
|
auto F = llvm::Intrinsic::getDeclaration(&IGF.IGM.Module, \
|
|
getLLVMIntrinsicIDForBuiltinWithOverflow(Builtin.ID), ArgTys); \
|
|
SmallVector<llvm::Value*, 2> IRArgs; \
|
|
IRArgs.push_back(args.claimNext()); \
|
|
IRArgs.push_back(args.claimNext()); \
|
|
args.claimNext();\
|
|
llvm::Value *TheCall = IGF.Builder.CreateCall(F, IRArgs); \
|
|
extractScalarResults(IGF, TheCall->getType(), TheCall, out); \
|
|
return; \
|
|
}
|
|
// FIXME: We could generate the code to dynamically report the overflow if the
|
|
// third argument is true. Now, we just ignore it.
|
|
|
|
#define BUILTIN_BINARY_PREDICATE(id, name, attrs, overload) \
|
|
if (Builtin.ID == BuiltinValueKind::id) \
|
|
return emitCompareBuiltin(IGF, out, args, llvm::CmpInst::id);
|
|
|
|
#define BUILTIN_TYPE_TRAIT_OPERATION(id, name) \
|
|
if (Builtin.ID == BuiltinValueKind::id) \
|
|
return emitTypeTraitBuiltin(IGF, out, args, substitutions, &TypeBase::name);
|
|
|
|
#define BUILTIN(ID, Name, Attrs) // Ignore the rest.
|
|
#include "swift/AST/Builtins.def"
|
|
|
|
if (Builtin.ID == BuiltinValueKind::GlobalStringTablePointer) {
|
|
// This builtin should be used only on strings constructed from a
|
|
// string literal. If we ever get to the point of executing this builtin
|
|
// at run time, it implies an incorrect use of the builtin and must result
|
|
// in a trap.
|
|
IGF.emitTrap("invalid use of globalStringTablePointer",
|
|
/*Unreachable=*/false);
|
|
auto returnValue = llvm::UndefValue::get(IGF.IGM.Int8PtrTy);
|
|
// Consume the arguments of the builtin.
|
|
(void)args.claimAll();
|
|
return out.add(returnValue);
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::WillThrow) {
|
|
// willThrow is emitted like a Swift function call with the error in
|
|
// the error return register. We also have to pass a fake context
|
|
// argument due to how swiftcc works in clang.
|
|
|
|
auto *fn = cast<llvm::Function>(IGF.IGM.getWillThrowFn());
|
|
auto error = args.claimNext();
|
|
auto errorBuffer = IGF.getErrorResultSlot(
|
|
SILType::getPrimitiveObjectType(IGF.IGM.Context.getErrorDecl()
|
|
->getDeclaredType()
|
|
->getCanonicalType()));
|
|
IGF.Builder.CreateStore(error, errorBuffer);
|
|
|
|
auto context = llvm::UndefValue::get(IGF.IGM.Int8PtrTy);
|
|
|
|
llvm::CallInst *call = IGF.Builder.CreateCall(fn,
|
|
{context, errorBuffer.getAddress()});
|
|
call->setCallingConv(IGF.IGM.SwiftCC);
|
|
call->addAttribute(llvm::AttributeList::FunctionIndex,
|
|
llvm::Attribute::NoUnwind);
|
|
call->addAttribute(llvm::AttributeList::FirstArgIndex + 1,
|
|
llvm::Attribute::ReadOnly);
|
|
|
|
auto attrs = call->getAttributes();
|
|
IGF.IGM.addSwiftSelfAttributes(attrs, 0);
|
|
IGF.IGM.addSwiftErrorAttributes(attrs, 1);
|
|
call->setAttributes(attrs);
|
|
|
|
IGF.Builder.CreateStore(llvm::ConstantPointerNull::get(IGF.IGM.ErrorPtrTy),
|
|
errorBuffer);
|
|
|
|
return out.add(call);
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::FNeg) {
|
|
llvm::Value *rhs = args.claimNext();
|
|
llvm::Value *lhs = llvm::ConstantFP::get(rhs->getType(), "-0.0");
|
|
llvm::Value *v = IGF.Builder.CreateFSub(lhs, rhs);
|
|
return out.add(v);
|
|
}
|
|
if (Builtin.ID == BuiltinValueKind::AssumeTrue) {
|
|
llvm::Value *v = args.claimNext();
|
|
if (v->getType() == IGF.IGM.Int1Ty) {
|
|
IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::ID::assume, v);
|
|
}
|
|
return;
|
|
}
|
|
if (Builtin.ID == BuiltinValueKind::AssumeNonNegative) {
|
|
llvm::Value *v = args.claimNext();
|
|
// Set a value range on the load instruction, which must be the argument of
|
|
// the builtin.
|
|
if (isa<llvm::LoadInst>(v) || isa<llvm::CallInst>(v)) {
|
|
// The load must be post-dominated by the builtin. Otherwise we would get
|
|
// a wrong assumption in the else-branch in this example:
|
|
// x = f()
|
|
// if condition {
|
|
// y = assumeNonNegative(x)
|
|
// } else {
|
|
// // x might be negative here!
|
|
// }
|
|
// For simplicity we just enforce that both the load and the builtin must
|
|
// be in the same block.
|
|
llvm::Instruction *I = static_cast<llvm::Instruction *>(v);
|
|
if (I->getParent() == IGF.Builder.GetInsertBlock()) {
|
|
llvm::LLVMContext &ctx = IGF.IGM.Module.getContext();
|
|
auto *intType = dyn_cast<llvm::IntegerType>(v->getType());
|
|
llvm::Metadata *rangeElems[] = {
|
|
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(intType, 0)),
|
|
llvm::ConstantAsMetadata::get(
|
|
llvm::ConstantInt::get(intType,
|
|
APInt::getSignedMaxValue(intType->getBitWidth())))
|
|
};
|
|
llvm::MDNode *range = llvm::MDNode::get(ctx, rangeElems);
|
|
I->setMetadata(llvm::LLVMContext::MD_range, range);
|
|
}
|
|
}
|
|
// Don't generate any code for the builtin.
|
|
return out.add(v);
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::AllocRaw) {
|
|
auto size = args.claimNext();
|
|
auto align = args.claimNext();
|
|
// Translate the alignment to a mask.
|
|
auto alignMask = IGF.Builder.CreateSub(align, IGF.IGM.getSize(Size(1)));
|
|
auto alloc = IGF.emitAllocRawCall(size, alignMask, "builtin-allocRaw");
|
|
out.add(alloc);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::DeallocRaw) {
|
|
auto pointer = args.claimNext();
|
|
auto size = args.claimNext();
|
|
auto align = args.claimNext();
|
|
// Translate the alignment to a mask.
|
|
auto alignMask = IGF.Builder.CreateSub(align, IGF.IGM.getSize(Size(1)));
|
|
IGF.emitDeallocRawCall(pointer, size, alignMask);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::Fence) {
|
|
SmallVector<Type, 4> Types;
|
|
StringRef BuiltinName =
|
|
getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types);
|
|
BuiltinName = BuiltinName.drop_front(strlen("fence_"));
|
|
// Decode the ordering argument, which is required.
|
|
auto underscore = BuiltinName.find('_');
|
|
auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
|
|
assert(ordering != llvm::AtomicOrdering::NotAtomic);
|
|
BuiltinName = BuiltinName.substr(underscore);
|
|
|
|
// Accept singlethread if present.
|
|
bool isSingleThread = BuiltinName.startswith("_singlethread");
|
|
if (isSingleThread)
|
|
BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
|
|
assert(BuiltinName.empty() && "Mismatch with sema");
|
|
|
|
IGF.Builder.CreateFence(ordering, isSingleThread
|
|
? llvm::SyncScope::SingleThread
|
|
: llvm::SyncScope::System);
|
|
return;
|
|
}
|
|
|
|
|
|
if (Builtin.ID == BuiltinValueKind::CmpXChg) {
|
|
SmallVector<Type, 4> Types;
|
|
StringRef BuiltinName =
|
|
getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types);
|
|
BuiltinName = BuiltinName.drop_front(strlen("cmpxchg_"));
|
|
|
|
// Decode the success- and failure-ordering arguments, which are required.
|
|
SmallVector<StringRef, 4> Parts;
|
|
BuiltinName.split(Parts, "_");
|
|
assert(Parts.size() >= 2 && "Mismatch with sema");
|
|
auto successOrdering = decodeLLVMAtomicOrdering(Parts[0]);
|
|
auto failureOrdering = decodeLLVMAtomicOrdering(Parts[1]);
|
|
assert(successOrdering != llvm::AtomicOrdering::NotAtomic);
|
|
assert(failureOrdering != llvm::AtomicOrdering::NotAtomic);
|
|
auto NextPart = Parts.begin() + 2;
|
|
|
|
// Accept weak, volatile, and singlethread if present.
|
|
bool isWeak = false, isVolatile = false, isSingleThread = false;
|
|
if (NextPart != Parts.end() && *NextPart == "weak") {
|
|
isWeak = true;
|
|
NextPart++;
|
|
}
|
|
if (NextPart != Parts.end() && *NextPart == "volatile") {
|
|
isVolatile = true;
|
|
NextPart++;
|
|
}
|
|
if (NextPart != Parts.end() && *NextPart == "singlethread") {
|
|
isSingleThread = true;
|
|
NextPart++;
|
|
}
|
|
assert(NextPart == Parts.end() && "Mismatch with sema");
|
|
|
|
auto pointer = args.claimNext();
|
|
auto cmp = args.claimNext();
|
|
auto newval = args.claimNext();
|
|
|
|
llvm::Type *origTy = cmp->getType();
|
|
if (origTy->isPointerTy()) {
|
|
cmp = IGF.Builder.CreatePtrToInt(cmp, IGF.IGM.IntPtrTy);
|
|
newval = IGF.Builder.CreatePtrToInt(newval, IGF.IGM.IntPtrTy);
|
|
}
|
|
|
|
pointer = IGF.Builder.CreateBitCast(pointer,
|
|
llvm::PointerType::getUnqual(cmp->getType()));
|
|
llvm::Value *value = IGF.Builder.CreateAtomicCmpXchg(
|
|
pointer, cmp, newval, successOrdering, failureOrdering,
|
|
isSingleThread ? llvm::SyncScope::SingleThread
|
|
: llvm::SyncScope::System);
|
|
cast<llvm::AtomicCmpXchgInst>(value)->setVolatile(isVolatile);
|
|
cast<llvm::AtomicCmpXchgInst>(value)->setWeak(isWeak);
|
|
|
|
auto valueLoaded = IGF.Builder.CreateExtractValue(value, {0});
|
|
auto loadSuccessful = IGF.Builder.CreateExtractValue(value, {1});
|
|
|
|
if (origTy->isPointerTy())
|
|
valueLoaded = IGF.Builder.CreateIntToPtr(valueLoaded, origTy);
|
|
|
|
out.add(valueLoaded);
|
|
out.add(loadSuccessful);
|
|
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::AtomicRMW) {
|
|
using namespace llvm;
|
|
|
|
SmallVector<Type, 4> Types;
|
|
StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context,
|
|
FnId.str(), Types);
|
|
BuiltinName = BuiltinName.drop_front(strlen("atomicrmw_"));
|
|
auto underscore = BuiltinName.find('_');
|
|
StringRef SubOp = BuiltinName.substr(0, underscore);
|
|
|
|
AtomicRMWInst::BinOp SubOpcode = StringSwitch<AtomicRMWInst::BinOp>(SubOp)
|
|
.Case("xchg", AtomicRMWInst::Xchg)
|
|
.Case("add", AtomicRMWInst::Add)
|
|
.Case("sub", AtomicRMWInst::Sub)
|
|
.Case("and", AtomicRMWInst::And)
|
|
.Case("nand", AtomicRMWInst::Nand)
|
|
.Case("or", AtomicRMWInst::Or)
|
|
.Case("xor", AtomicRMWInst::Xor)
|
|
.Case("max", AtomicRMWInst::Max)
|
|
.Case("min", AtomicRMWInst::Min)
|
|
.Case("umax", AtomicRMWInst::UMax)
|
|
.Case("umin", AtomicRMWInst::UMin);
|
|
BuiltinName = BuiltinName.drop_front(underscore+1);
|
|
|
|
// Decode the ordering argument, which is required.
|
|
underscore = BuiltinName.find('_');
|
|
auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
|
|
assert(ordering != llvm::AtomicOrdering::NotAtomic);
|
|
BuiltinName = BuiltinName.substr(underscore);
|
|
|
|
// Accept volatile and singlethread if present.
|
|
bool isVolatile = BuiltinName.startswith("_volatile");
|
|
if (isVolatile) BuiltinName = BuiltinName.drop_front(strlen("_volatile"));
|
|
|
|
bool isSingleThread = BuiltinName.startswith("_singlethread");
|
|
if (isSingleThread)
|
|
BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
|
|
assert(BuiltinName.empty() && "Mismatch with sema");
|
|
|
|
auto pointer = args.claimNext();
|
|
auto val = args.claimNext();
|
|
|
|
// Handle atomic ops on pointers by casting to intptr_t.
|
|
llvm::Type *origTy = val->getType();
|
|
if (origTy->isPointerTy())
|
|
val = IGF.Builder.CreatePtrToInt(val, IGF.IGM.IntPtrTy);
|
|
|
|
pointer = IGF.Builder.CreateBitCast(pointer,
|
|
llvm::PointerType::getUnqual(val->getType()));
|
|
llvm::Value *value = IGF.Builder.CreateAtomicRMW(
|
|
SubOpcode, pointer, val, ordering,
|
|
isSingleThread ? llvm::SyncScope::SingleThread
|
|
: llvm::SyncScope::System);
|
|
cast<AtomicRMWInst>(value)->setVolatile(isVolatile);
|
|
|
|
if (origTy->isPointerTy())
|
|
value = IGF.Builder.CreateIntToPtr(value, origTy);
|
|
|
|
out.add(value);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::AtomicLoad
|
|
|| Builtin.ID == BuiltinValueKind::AtomicStore) {
|
|
using namespace llvm;
|
|
|
|
SmallVector<Type, 4> Types;
|
|
StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context,
|
|
FnId.str(), Types);
|
|
auto underscore = BuiltinName.find('_');
|
|
BuiltinName = BuiltinName.substr(underscore+1);
|
|
|
|
underscore = BuiltinName.find('_');
|
|
auto ordering = decodeLLVMAtomicOrdering(BuiltinName.substr(0, underscore));
|
|
assert(ordering != llvm::AtomicOrdering::NotAtomic);
|
|
BuiltinName = BuiltinName.substr(underscore);
|
|
|
|
// Accept volatile and singlethread if present.
|
|
bool isVolatile = BuiltinName.startswith("_volatile");
|
|
if (isVolatile) BuiltinName = BuiltinName.drop_front(strlen("_volatile"));
|
|
|
|
bool isSingleThread = BuiltinName.startswith("_singlethread");
|
|
if (isSingleThread)
|
|
BuiltinName = BuiltinName.drop_front(strlen("_singlethread"));
|
|
assert(BuiltinName.empty() && "Mismatch with sema");
|
|
|
|
auto pointer = args.claimNext();
|
|
auto &valueTI = IGF.getTypeInfoForUnlowered(Types[0]);
|
|
auto schema = valueTI.getSchema();
|
|
assert(schema.size() == 1 && "not a scalar type?!");
|
|
auto origValueTy = schema[0].getScalarType();
|
|
|
|
// If the type is floating-point, then we need to bitcast to integer.
|
|
auto valueTy = origValueTy;
|
|
if (valueTy->isFloatingPointTy()) {
|
|
valueTy = llvm::IntegerType::get(IGF.IGM.LLVMContext,
|
|
valueTy->getPrimitiveSizeInBits());
|
|
}
|
|
|
|
pointer = IGF.Builder.CreateBitCast(pointer, valueTy->getPointerTo());
|
|
|
|
if (Builtin.ID == BuiltinValueKind::AtomicLoad) {
|
|
auto load = IGF.Builder.CreateLoad(pointer,
|
|
valueTI.getBestKnownAlignment());
|
|
load->setAtomic(ordering, isSingleThread ? llvm::SyncScope::SingleThread
|
|
: llvm::SyncScope::System);
|
|
load->setVolatile(isVolatile);
|
|
|
|
llvm::Value *value = load;
|
|
if (valueTy != origValueTy)
|
|
value = IGF.Builder.CreateBitCast(value, origValueTy);
|
|
out.add(value);
|
|
return;
|
|
} else if (Builtin.ID == BuiltinValueKind::AtomicStore) {
|
|
llvm::Value *value = args.claimNext();
|
|
if (valueTy != origValueTy)
|
|
value = IGF.Builder.CreateBitCast(value, valueTy);
|
|
auto store = IGF.Builder.CreateStore(value, pointer,
|
|
valueTI.getBestKnownAlignment());
|
|
store->setAtomic(ordering, isSingleThread ? llvm::SyncScope::SingleThread
|
|
: llvm::SyncScope::System);
|
|
store->setVolatile(isVolatile);
|
|
return;
|
|
} else {
|
|
llvm_unreachable("out of sync with outer conditional");
|
|
}
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::ExtractElement) {
|
|
using namespace llvm;
|
|
|
|
auto vector = args.claimNext();
|
|
auto index = args.claimNext();
|
|
out.add(IGF.Builder.CreateExtractElement(vector, index));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::InsertElement) {
|
|
using namespace llvm;
|
|
|
|
auto vector = args.claimNext();
|
|
auto newValue = args.claimNext();
|
|
auto index = args.claimNext();
|
|
out.add(IGF.Builder.CreateInsertElement(vector, newValue, index));
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
|
|
Builtin.ID == BuiltinValueKind::UToUCheckedTrunc ||
|
|
Builtin.ID == BuiltinValueKind::SToUCheckedTrunc) {
|
|
bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc);
|
|
|
|
auto FromType = Builtin.Types[0]->getCanonicalType();
|
|
auto ToTy = cast<llvm::IntegerType>(
|
|
IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType()));
|
|
|
|
// Handle the arbitrary-precision truncate specially.
|
|
if (isa<BuiltinIntegerLiteralType>(FromType)) {
|
|
emitIntegerLiteralCheckedTrunc(IGF, args, ToTy, Signed, out);
|
|
return;
|
|
}
|
|
|
|
auto FromTy =
|
|
IGF.IGM.getStorageTypeForLowered(FromType);
|
|
|
|
// Compute the result for SToSCheckedTrunc_IntFrom_IntTo(Arg):
|
|
// Res = trunc_IntTo(Arg)
|
|
// Ext = sext_IntFrom(Res)
|
|
// OverflowFlag = (Arg == Ext) ? 0 : 1
|
|
// return (resultVal, OverflowFlag)
|
|
//
|
|
// Compute the result for UToUCheckedTrunc_IntFrom_IntTo(Arg)
|
|
// and SToUCheckedTrunc_IntFrom_IntTo(Arg):
|
|
// Res = trunc_IntTo(Arg)
|
|
// Ext = zext_IntFrom(Res)
|
|
// OverflowFlag = (Arg == Ext) ? 0 : 1
|
|
// return (Res, OverflowFlag)
|
|
llvm::Value *Arg = args.claimNext();
|
|
llvm::Value *Res = IGF.Builder.CreateTrunc(Arg, ToTy);
|
|
llvm::Value *Ext = Signed ? IGF.Builder.CreateSExt(Res, FromTy) :
|
|
IGF.Builder.CreateZExt(Res, FromTy);
|
|
llvm::Value *OverflowCond = IGF.Builder.CreateICmpEQ(Arg, Ext);
|
|
llvm::Value *OverflowFlag = IGF.Builder.CreateSelect(OverflowCond,
|
|
llvm::ConstantInt::get(IGF.IGM.Int1Ty, 0),
|
|
llvm::ConstantInt::get(IGF.IGM.Int1Ty, 1));
|
|
// Return the tuple - the result + the overflow flag.
|
|
out.add(Res);
|
|
return out.add(OverflowFlag);
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::UToSCheckedTrunc) {
|
|
auto FromTy =
|
|
IGF.IGM.getStorageTypeForLowered(Builtin.Types[0]->getCanonicalType());
|
|
auto ToTy =
|
|
IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType());
|
|
llvm::Type *ToMinusOneTy =
|
|
llvm::Type::getIntNTy(ToTy->getContext(), ToTy->getIntegerBitWidth() - 1);
|
|
|
|
// Compute the result for UToSCheckedTrunc_IntFrom_IntTo(Arg):
|
|
// Res = trunc_IntTo(Arg)
|
|
// Trunc = trunc_'IntTo-1bit'(Arg)
|
|
// Ext = zext_IntFrom(Trunc)
|
|
// OverflowFlag = (Arg == Ext) ? 0 : 1
|
|
// return (Res, OverflowFlag)
|
|
llvm::Value *Arg = args.claimNext();
|
|
llvm::Value *Res = IGF.Builder.CreateTrunc(Arg, ToTy);
|
|
llvm::Value *Trunc = IGF.Builder.CreateTrunc(Arg, ToMinusOneTy);
|
|
llvm::Value *Ext = IGF.Builder.CreateZExt(Trunc, FromTy);
|
|
llvm::Value *OverflowCond = IGF.Builder.CreateICmpEQ(Arg, Ext);
|
|
llvm::Value *OverflowFlag = IGF.Builder.CreateSelect(OverflowCond,
|
|
llvm::ConstantInt::get(IGF.IGM.Int1Ty, 0),
|
|
llvm::ConstantInt::get(IGF.IGM.Int1Ty, 1));
|
|
// Return the tuple: (the result, the overflow flag).
|
|
out.add(Res);
|
|
return out.add(OverflowFlag);
|
|
}
|
|
|
|
// We are currently emitting code for '_convertFromBuiltinIntegerLiteral',
|
|
// which will call the builtin and pass it a non-compile-time-const parameter.
|
|
if (Builtin.ID == BuiltinValueKind::IntToFPWithOverflow) {
|
|
assert(Builtin.Types[0]->is<BuiltinIntegerLiteralType>());
|
|
auto toType =
|
|
IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType());
|
|
auto result = emitIntegerLiteralToFP(IGF, args, toType);
|
|
out.add(result);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::Once
|
|
|| Builtin.ID == BuiltinValueKind::OnceWithContext) {
|
|
// The input type is statically (Builtin.RawPointer, @convention(thin) () -> ()).
|
|
llvm::Value *PredPtr = args.claimNext();
|
|
// Cast the predicate to a OnceTy pointer.
|
|
PredPtr = IGF.Builder.CreateBitCast(PredPtr, IGF.IGM.OnceTy->getPointerTo());
|
|
llvm::Value *FnCode = args.claimNext();
|
|
// Get the context if any.
|
|
llvm::Value *Context;
|
|
if (Builtin.ID == BuiltinValueKind::OnceWithContext) {
|
|
Context = args.claimNext();
|
|
} else {
|
|
Context = llvm::UndefValue::get(IGF.IGM.Int8PtrTy);
|
|
}
|
|
|
|
// If we know the platform runtime's "done" value, emit the check inline.
|
|
llvm::BasicBlock *doneBB = nullptr;
|
|
|
|
llvm::BasicBlock *beforeBB = IGF.Builder.GetInsertBlock();
|
|
|
|
if (auto ExpectedPred = IGF.IGM.TargetInfo.OnceDonePredicateValue) {
|
|
auto PredValue = IGF.Builder.CreateLoad(PredPtr,
|
|
IGF.IGM.getPointerAlignment());
|
|
auto ExpectedPredValue = llvm::ConstantInt::getSigned(IGF.IGM.OnceTy,
|
|
*ExpectedPred);
|
|
auto PredIsDone = IGF.Builder.CreateICmpEQ(PredValue, ExpectedPredValue);
|
|
PredIsDone = IGF.Builder.CreateExpect(PredIsDone,
|
|
llvm::ConstantInt::get(IGF.IGM.Int1Ty, 1));
|
|
|
|
auto notDoneBB = IGF.createBasicBlock("once_not_done");
|
|
doneBB = IGF.createBasicBlock("once_done");
|
|
|
|
IGF.Builder.CreateCondBr(PredIsDone, doneBB, notDoneBB);
|
|
|
|
IGF.Builder.SetInsertPoint(&IGF.CurFn->back());
|
|
IGF.Builder.emitBlock(notDoneBB);
|
|
}
|
|
|
|
// Emit the runtime "once" call.
|
|
auto call
|
|
= IGF.Builder.CreateCall(IGF.IGM.getOnceFn(), {PredPtr, FnCode, Context});
|
|
call->setCallingConv(IGF.IGM.DefaultCC);
|
|
|
|
// If we emitted the "done" check inline, join the branches.
|
|
if (auto ExpectedPred = IGF.IGM.TargetInfo.OnceDonePredicateValue) {
|
|
IGF.Builder.CreateBr(doneBB);
|
|
IGF.Builder.SetInsertPoint(beforeBB);
|
|
IGF.Builder.emitBlock(doneBB);
|
|
// We can assume the once predicate is in the "done" state now.
|
|
auto PredValue = IGF.Builder.CreateLoad(PredPtr,
|
|
IGF.IGM.getPointerAlignment());
|
|
auto ExpectedPredValue = llvm::ConstantInt::getSigned(IGF.IGM.OnceTy,
|
|
*ExpectedPred);
|
|
auto PredIsDone = IGF.Builder.CreateICmpEQ(PredValue, ExpectedPredValue);
|
|
|
|
IGF.Builder.CreateAssumption(PredIsDone);
|
|
}
|
|
|
|
// No return value.
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::AssertConf) {
|
|
// Replace the call to assert_configuration by the Debug configuration
|
|
// value.
|
|
// TODO: assert(IGF.IGM.getOptions().AssertConfig ==
|
|
// SILOptions::DisableReplacement);
|
|
// Make sure this only happens in a mode where we build a library dylib.
|
|
|
|
llvm::Value *DebugAssert = IGF.Builder.getInt32(SILOptions::Debug);
|
|
out.add(DebugAssert);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::DestroyArray) {
|
|
// The input type is (T.Type, Builtin.RawPointer, Builtin.Word).
|
|
/* metatype (which may be thin) */
|
|
if (args.size() == 3)
|
|
args.claimNext();
|
|
llvm::Value *ptr = args.claimNext();
|
|
llvm::Value *count = args.claimNext();
|
|
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
|
|
ptr = IGF.Builder.CreateBitCast(ptr,
|
|
valueTy.second.getStorageType()->getPointerTo());
|
|
Address array = valueTy.second.getAddressForPointer(ptr);
|
|
valueTy.second.destroyArray(IGF, array, count, valueTy.first);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::CopyArray ||
|
|
Builtin.ID == BuiltinValueKind::TakeArrayNoAlias ||
|
|
Builtin.ID == BuiltinValueKind::TakeArrayFrontToBack ||
|
|
Builtin.ID == BuiltinValueKind::TakeArrayBackToFront ||
|
|
Builtin.ID == BuiltinValueKind::AssignCopyArrayNoAlias ||
|
|
Builtin.ID == BuiltinValueKind::AssignCopyArrayFrontToBack ||
|
|
Builtin.ID == BuiltinValueKind::AssignCopyArrayBackToFront ||
|
|
Builtin.ID == BuiltinValueKind::AssignTakeArray) {
|
|
// The input type is (T.Type, Builtin.RawPointer, Builtin.RawPointer, Builtin.Word).
|
|
/* metatype (which may be thin) */
|
|
if (args.size() == 4)
|
|
args.claimNext();
|
|
llvm::Value *dest = args.claimNext();
|
|
llvm::Value *src = args.claimNext();
|
|
llvm::Value *count = args.claimNext();
|
|
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
|
|
dest = IGF.Builder.CreateBitCast(dest,
|
|
valueTy.second.getStorageType()->getPointerTo());
|
|
src = IGF.Builder.CreateBitCast(src,
|
|
valueTy.second.getStorageType()->getPointerTo());
|
|
Address destArray = valueTy.second.getAddressForPointer(dest);
|
|
Address srcArray = valueTy.second.getAddressForPointer(src);
|
|
|
|
switch (Builtin.ID) {
|
|
case BuiltinValueKind::CopyArray:
|
|
valueTy.second.initializeArrayWithCopy(IGF, destArray, srcArray, count,
|
|
valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::TakeArrayNoAlias:
|
|
valueTy.second.initializeArrayWithTakeNoAlias(IGF, destArray, srcArray,
|
|
count, valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::TakeArrayFrontToBack:
|
|
valueTy.second.initializeArrayWithTakeFrontToBack(IGF, destArray, srcArray,
|
|
count, valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::TakeArrayBackToFront:
|
|
valueTy.second.initializeArrayWithTakeBackToFront(IGF, destArray, srcArray,
|
|
count, valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::AssignCopyArrayNoAlias:
|
|
valueTy.second.assignArrayWithCopyNoAlias(IGF, destArray, srcArray, count,
|
|
valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::AssignCopyArrayFrontToBack:
|
|
valueTy.second.assignArrayWithCopyFrontToBack(IGF, destArray, srcArray,
|
|
count, valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::AssignCopyArrayBackToFront:
|
|
valueTy.second.assignArrayWithCopyBackToFront(IGF, destArray, srcArray,
|
|
count, valueTy.first);
|
|
break;
|
|
case BuiltinValueKind::AssignTakeArray:
|
|
valueTy.second.assignArrayWithTake(IGF, destArray, srcArray, count,
|
|
valueTy.first);
|
|
break;
|
|
default:
|
|
llvm_unreachable("out of sync with if condition");
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::CondUnreachable) {
|
|
// conditionallyUnreachable is a no-op by itself. Since it's noreturn, there
|
|
// should be a true unreachable terminator right after.
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::ZeroInitializer) {
|
|
// Build a zero initializer of the result type.
|
|
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
|
|
substitutions.getReplacementTypes()[0]);
|
|
auto schema = valueTy.second.getSchema();
|
|
for (auto &elt : schema) {
|
|
out.add(llvm::Constant::getNullValue(elt.getScalarType()));
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::GetObjCTypeEncoding) {
|
|
(void)args.claimAll();
|
|
Type valueTy = substitutions.getReplacementTypes()[0];
|
|
// Get the type encoding for the associated clang type.
|
|
auto clangTy = IGF.IGM.getClangType(valueTy->getCanonicalType());
|
|
std::string encoding;
|
|
IGF.IGM.getClangASTContext().getObjCEncodingForType(clangTy, encoding);
|
|
|
|
auto globalString = IGF.IGM.getAddrOfGlobalString(encoding);
|
|
out.add(globalString);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::TSanInoutAccess) {
|
|
auto address = args.claimNext();
|
|
IGF.emitTSanInoutAccessCall(address);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::Swift3ImplicitObjCEntrypoint) {
|
|
llvm::Value *entrypointArgs[7];
|
|
auto argIter = IGF.CurFn->arg_begin();
|
|
|
|
// self
|
|
entrypointArgs[0] = &*argIter++;
|
|
if (entrypointArgs[0]->getType() != IGF.IGM.ObjCPtrTy)
|
|
entrypointArgs[0] = IGF.Builder.CreateBitCast(entrypointArgs[0], IGF.IGM.ObjCPtrTy);
|
|
|
|
// _cmd
|
|
entrypointArgs[1] = &*argIter;
|
|
if (entrypointArgs[1]->getType() != IGF.IGM.ObjCSELTy)
|
|
entrypointArgs[1] = IGF.Builder.CreateBitCast(entrypointArgs[1], IGF.IGM.ObjCSELTy);
|
|
|
|
// Filename pointer
|
|
entrypointArgs[2] = args.claimNext();
|
|
// Filename length
|
|
entrypointArgs[3] = args.claimNext();
|
|
// Line
|
|
entrypointArgs[4] = args.claimNext();
|
|
// Column
|
|
entrypointArgs[5] = args.claimNext();
|
|
|
|
// Create a flag variable so that this invocation logs only once.
|
|
auto flagStorageTy = llvm::ArrayType::get(IGF.IGM.Int8Ty,
|
|
IGF.IGM.getAtomicBoolSize().getValue());
|
|
auto flag = new llvm::GlobalVariable(IGF.IGM.Module, flagStorageTy,
|
|
/*constant*/ false,
|
|
llvm::GlobalValue::PrivateLinkage,
|
|
llvm::ConstantAggregateZero::get(flagStorageTy));
|
|
flag->setAlignment(IGF.IGM.getAtomicBoolAlignment().getValue());
|
|
entrypointArgs[6] = llvm::ConstantExpr::getBitCast(flag, IGF.IGM.Int8PtrTy);
|
|
|
|
IGF.Builder.CreateCall(IGF.IGM.getSwift3ImplicitObjCEntrypointFn(),
|
|
entrypointArgs);
|
|
return;
|
|
}
|
|
|
|
if (Builtin.ID == BuiltinValueKind::IsSameMetatype) {
|
|
auto metatypeLHS = args.claimNext();
|
|
auto metatypeRHS = args.claimNext();
|
|
(void)args.claimAll();
|
|
llvm::Value *metatypeLHSCasted =
|
|
IGF.Builder.CreateBitCast(metatypeLHS, IGF.IGM.Int8PtrTy);
|
|
llvm::Value *metatypeRHSCasted =
|
|
IGF.Builder.CreateBitCast(metatypeRHS, IGF.IGM.Int8PtrTy);
|
|
|
|
out.add(IGF.Builder.CreateICmpEQ(metatypeLHSCasted, metatypeRHSCasted));
|
|
return;
|
|
}
|
|
|
|
llvm_unreachable("IRGen unimplemented for this builtin!");
|
|
}
|