mirror of
https://github.com/apple/swift.git
synced 2025-12-14 20:36:38 +01:00
Tracking this as a single bit is actually largely uninteresting to the runtime. To handle priority escalation properly, we really need to track this at a finer grain of detail: recording that the task is running on a specific thread, enqueued on a specific actor, or so on. But starting by tracking a single bit is important for two reasons: - First, it's more realistic about the performance overheads of tasks: we're going to be doing this tracking eventually, and the cost of that tracking will be dominated by the atomic access, so doing that access now sets the baseline about right. - Second, it ensures that we've actually got runtime involvement in all the right places to do this tracking. A propos of the latter: there was no runtime involvement with awaiting a continuation, which is a point at which the task potentially transitions from running to suspended. We must do the tracking as part of this transition, rather than recognizing in the run-loops that a task is still active and treating it as having suspended, because the latter point potentially races with the resumption of the task. To do this, I've had to introduce a runtime function, swift_continuation_await, to do this awaiting rather than inlining the atomic operation on the continuation. As part of doing this work, I've also fixed a bug where we failed to load-acquire in swift_task_escalate before walking the task status records to invoke escalation actions. I've also fixed several places where the handling of task statuses may have accidentally allowed the task to revert to uncancelled.
759 lines
30 KiB
C++
759 lines
30 KiB
C++
//===--- IRGenFunction.cpp - Swift Per-Function IR Generation -------------===//
|
|
//
|
|
// This source file is part of the Swift.org open source project
|
|
//
|
|
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
|
|
// Licensed under Apache License v2.0 with Runtime Library Exception
|
|
//
|
|
// See https://swift.org/LICENSE.txt for license information
|
|
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements basic setup and teardown for the class which
|
|
// performs IR generation for function bodies.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
#include "swift/ABI/MetadataValues.h"
|
|
#include "swift/AST/IRGenOptions.h"
|
|
#include "swift/Basic/SourceLoc.h"
|
|
#include "swift/IRGen/Linking.h"
|
|
#include "llvm/IR/Instructions.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include "llvm/Support/CommandLine.h"
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
#include "Callee.h"
|
|
#include "Explosion.h"
|
|
#include "GenPointerAuth.h"
|
|
#include "IRGenDebugInfo.h"
|
|
#include "IRGenFunction.h"
|
|
#include "IRGenModule.h"
|
|
#include "LoadableTypeInfo.h"
|
|
|
|
using namespace swift;
|
|
using namespace irgen;
|
|
|
|
static llvm::cl::opt<bool> EnableTrapDebugInfo(
|
|
"enable-trap-debug-info", llvm::cl::init(true), llvm::cl::Hidden,
|
|
llvm::cl::desc("Generate failure-message functions in the debug info"));
|
|
|
|
IRGenFunction::IRGenFunction(IRGenModule &IGM, llvm::Function *Fn,
|
|
OptimizationMode OptMode,
|
|
const SILDebugScope *DbgScope,
|
|
Optional<SILLocation> DbgLoc)
|
|
: IGM(IGM), Builder(IGM.getLLVMContext(),
|
|
IGM.DebugInfo && !IGM.Context.LangOpts.DebuggerSupport),
|
|
OptMode(OptMode), CurFn(Fn), DbgScope(DbgScope) {
|
|
|
|
// Make sure the instructions in this function are attached its debug scope.
|
|
if (IGM.DebugInfo) {
|
|
// Functions, especially artificial thunks and closures, are often
|
|
// generated on-the-fly while we are in the middle of another
|
|
// function. Be nice and preserve the current debug location until
|
|
// after we're done with this function.
|
|
IGM.DebugInfo->pushLoc();
|
|
}
|
|
|
|
emitPrologue();
|
|
}
|
|
|
|
IRGenFunction::~IRGenFunction() {
|
|
emitEpilogue();
|
|
|
|
// Restore the debug location.
|
|
if (IGM.DebugInfo) IGM.DebugInfo->popLoc();
|
|
|
|
// Tear down any side-table data structures.
|
|
if (LocalTypeData) destroyLocalTypeData();
|
|
}
|
|
|
|
OptimizationMode IRGenFunction::getEffectiveOptimizationMode() const {
|
|
if (OptMode != OptimizationMode::NotSet)
|
|
return OptMode;
|
|
|
|
return IGM.getOptions().OptMode;
|
|
}
|
|
|
|
ModuleDecl *IRGenFunction::getSwiftModule() const {
|
|
return IGM.getSwiftModule();
|
|
}
|
|
|
|
SILModule &IRGenFunction::getSILModule() const {
|
|
return IGM.getSILModule();
|
|
}
|
|
|
|
Lowering::TypeConverter &IRGenFunction::getSILTypes() const {
|
|
return IGM.getSILTypes();
|
|
}
|
|
|
|
const IRGenOptions &IRGenFunction::getOptions() const {
|
|
return IGM.getOptions();
|
|
}
|
|
|
|
// Returns the default atomicity of the module.
|
|
Atomicity IRGenFunction::getDefaultAtomicity() {
|
|
return getSILModule().isDefaultAtomic() ? Atomicity::Atomic : Atomicity::NonAtomic;
|
|
}
|
|
|
|
/// Call the llvm.memcpy intrinsic. The arguments need not already
|
|
/// be of i8* type.
|
|
void IRGenFunction::emitMemCpy(llvm::Value *dest, llvm::Value *src,
|
|
Size size, Alignment align) {
|
|
emitMemCpy(dest, src, IGM.getSize(size), align);
|
|
}
|
|
|
|
void IRGenFunction::emitMemCpy(llvm::Value *dest, llvm::Value *src,
|
|
llvm::Value *size, Alignment align) {
|
|
Builder.CreateMemCpy(dest, llvm::MaybeAlign(align.getValue()), src,
|
|
llvm::MaybeAlign(align.getValue()), size);
|
|
}
|
|
|
|
void IRGenFunction::emitMemCpy(Address dest, Address src, Size size) {
|
|
emitMemCpy(dest, src, IGM.getSize(size));
|
|
}
|
|
|
|
void IRGenFunction::emitMemCpy(Address dest, Address src, llvm::Value *size) {
|
|
// Map over to the inferior design of the LLVM intrinsic.
|
|
emitMemCpy(dest.getAddress(), src.getAddress(), size,
|
|
std::min(dest.getAlignment(), src.getAlignment()));
|
|
}
|
|
|
|
static llvm::Value *emitAllocatingCall(IRGenFunction &IGF,
|
|
llvm::Constant *fn,
|
|
ArrayRef<llvm::Value*> args,
|
|
const llvm::Twine &name) {
|
|
auto allocAttrs = IGF.IGM.getAllocAttrs();
|
|
llvm::CallInst *call =
|
|
IGF.Builder.CreateCall(fn, makeArrayRef(args.begin(), args.size()));
|
|
call->setAttributes(allocAttrs);
|
|
return call;
|
|
}
|
|
|
|
/// Emit a 'raw' allocation, which has no heap pointer and is
|
|
/// not guaranteed to be zero-initialized.
|
|
llvm::Value *IRGenFunction::emitAllocRawCall(llvm::Value *size,
|
|
llvm::Value *alignMask,
|
|
const llvm::Twine &name) {
|
|
// For now, all we have is swift_slowAlloc.
|
|
return emitAllocatingCall(*this, IGM.getSlowAllocFn(),
|
|
{size, alignMask},
|
|
name);
|
|
}
|
|
|
|
/// Emit a heap allocation.
|
|
llvm::Value *IRGenFunction::emitAllocObjectCall(llvm::Value *metadata,
|
|
llvm::Value *size,
|
|
llvm::Value *alignMask,
|
|
const llvm::Twine &name) {
|
|
// For now, all we have is swift_allocObject.
|
|
return emitAllocatingCall(*this, IGM.getAllocObjectFn(),
|
|
{ metadata, size, alignMask }, name);
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::emitInitStackObjectCall(llvm::Value *metadata,
|
|
llvm::Value *object,
|
|
const llvm::Twine &name) {
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getInitStackObjectFn(), { metadata, object }, name);
|
|
call->setDoesNotThrow();
|
|
return call;
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::emitInitStaticObjectCall(llvm::Value *metadata,
|
|
llvm::Value *object,
|
|
const llvm::Twine &name) {
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getInitStaticObjectFn(), { metadata, object }, name);
|
|
call->setDoesNotThrow();
|
|
return call;
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::emitVerifyEndOfLifetimeCall(llvm::Value *object,
|
|
const llvm::Twine &name) {
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getVerifyEndOfLifetimeFn(), { object }, name);
|
|
call->setDoesNotThrow();
|
|
return call;
|
|
}
|
|
|
|
void IRGenFunction::emitAllocBoxCall(llvm::Value *typeMetadata,
|
|
llvm::Value *&box,
|
|
llvm::Value *&valueAddress) {
|
|
auto attrs = llvm::AttributeList::get(IGM.getLLVMContext(),
|
|
llvm::AttributeList::FunctionIndex,
|
|
llvm::Attribute::NoUnwind);
|
|
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getAllocBoxFn(), typeMetadata);
|
|
call->setAttributes(attrs);
|
|
|
|
box = Builder.CreateExtractValue(call, 0);
|
|
valueAddress = Builder.CreateExtractValue(call, 1);
|
|
}
|
|
|
|
void IRGenFunction::emitMakeBoxUniqueCall(llvm::Value *box,
|
|
llvm::Value *typeMetadata,
|
|
llvm::Value *alignMask,
|
|
llvm::Value *&outBox,
|
|
llvm::Value *&outValueAddress) {
|
|
auto attrs = llvm::AttributeList::get(IGM.getLLVMContext(),
|
|
llvm::AttributeList::FunctionIndex,
|
|
llvm::Attribute::NoUnwind);
|
|
|
|
llvm::CallInst *call = Builder.CreateCall(IGM.getMakeBoxUniqueFn(),
|
|
{box, typeMetadata, alignMask});
|
|
call->setAttributes(attrs);
|
|
|
|
outBox = Builder.CreateExtractValue(call, 0);
|
|
outValueAddress = Builder.CreateExtractValue(call, 1);
|
|
}
|
|
|
|
|
|
void IRGenFunction::emitDeallocBoxCall(llvm::Value *box,
|
|
llvm::Value *typeMetadata) {
|
|
auto attrs = llvm::AttributeList::get(IGM.getLLVMContext(),
|
|
llvm::AttributeList::FunctionIndex,
|
|
llvm::Attribute::NoUnwind);
|
|
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getDeallocBoxFn(), box);
|
|
call->setCallingConv(IGM.DefaultCC);
|
|
call->setAttributes(attrs);
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::emitProjectBoxCall(llvm::Value *box,
|
|
llvm::Value *typeMetadata) {
|
|
llvm::Attribute::AttrKind attrKinds[] = {
|
|
llvm::Attribute::NoUnwind,
|
|
llvm::Attribute::ReadNone,
|
|
};
|
|
auto attrs = llvm::AttributeList::get(
|
|
IGM.getLLVMContext(), llvm::AttributeList::FunctionIndex, attrKinds);
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getProjectBoxFn(), box);
|
|
call->setCallingConv(IGM.DefaultCC);
|
|
call->setAttributes(attrs);
|
|
return call;
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::emitAllocEmptyBoxCall() {
|
|
auto attrs = llvm::AttributeList::get(IGM.getLLVMContext(),
|
|
llvm::AttributeList::FunctionIndex,
|
|
llvm::Attribute::NoUnwind);
|
|
llvm::CallInst *call =
|
|
Builder.CreateCall(IGM.getAllocEmptyBoxFn(), {});
|
|
call->setCallingConv(IGM.DefaultCC);
|
|
call->setAttributes(attrs);
|
|
return call;
|
|
}
|
|
|
|
static void emitDeallocatingCall(IRGenFunction &IGF, llvm::Constant *fn,
|
|
std::initializer_list<llvm::Value *> args) {
|
|
auto cc = IGF.IGM.DefaultCC;
|
|
if (auto fun = dyn_cast<llvm::Function>(fn))
|
|
cc = fun->getCallingConv();
|
|
|
|
|
|
llvm::CallInst *call =
|
|
IGF.Builder.CreateCall(fn, makeArrayRef(args.begin(), args.size()));
|
|
call->setCallingConv(cc);
|
|
call->setDoesNotThrow();
|
|
}
|
|
|
|
/// Emit a 'raw' deallocation, which has no heap pointer and is not
|
|
/// guaranteed to be zero-initialized.
|
|
void IRGenFunction::emitDeallocRawCall(llvm::Value *pointer,
|
|
llvm::Value *size,
|
|
llvm::Value *alignMask) {
|
|
// For now, all we have is swift_slowDealloc.
|
|
return emitDeallocatingCall(*this, IGM.getSlowDeallocFn(),
|
|
{pointer, size, alignMask});
|
|
}
|
|
|
|
void IRGenFunction::emitTSanInoutAccessCall(llvm::Value *address) {
|
|
llvm::Function *fn = cast<llvm::Function>(IGM.getTSanInoutAccessFn());
|
|
|
|
llvm::Value *castAddress = Builder.CreateBitCast(address, IGM.Int8PtrTy);
|
|
|
|
// Passing 0 as the caller PC causes compiler-rt to get our PC.
|
|
llvm::Value *callerPC = llvm::ConstantPointerNull::get(IGM.Int8PtrTy);
|
|
|
|
// A magic number agreed upon with compiler-rt to indicate a modifying
|
|
// access.
|
|
const unsigned kExternalTagSwiftModifyingAccess = 0x1;
|
|
llvm::Value *tagValue =
|
|
llvm::ConstantInt::get(IGM.SizeTy, kExternalTagSwiftModifyingAccess);
|
|
llvm::Value *castTag = Builder.CreateIntToPtr(tagValue, IGM.Int8PtrTy);
|
|
|
|
Builder.CreateCall(fn, {castAddress, callerPC, castTag});
|
|
}
|
|
|
|
|
|
/// Initialize a relative indirectable pointer to the given value.
|
|
/// This always leaves the value in the direct state; if it's not a
|
|
/// far reference, it's the caller's responsibility to ensure that the
|
|
/// pointer ranges are sufficient.
|
|
void IRGenFunction::emitStoreOfRelativeIndirectablePointer(llvm::Value *value,
|
|
Address addr,
|
|
bool isFar) {
|
|
value = Builder.CreatePtrToInt(value, IGM.IntPtrTy);
|
|
auto addrAsInt =
|
|
Builder.CreatePtrToInt(addr.getAddress(), IGM.IntPtrTy);
|
|
|
|
auto difference = Builder.CreateSub(value, addrAsInt);
|
|
if (!isFar) {
|
|
difference = Builder.CreateTrunc(difference, IGM.RelativeAddressTy);
|
|
}
|
|
|
|
Builder.CreateStore(difference, addr);
|
|
}
|
|
|
|
llvm::Value *
|
|
IRGenFunction::emitLoadOfRelativePointer(Address addr, bool isFar,
|
|
llvm::PointerType *expectedType,
|
|
const llvm::Twine &name) {
|
|
llvm::Value *value = Builder.CreateLoad(addr);
|
|
assert(value->getType() ==
|
|
(isFar ? IGM.FarRelativeAddressTy : IGM.RelativeAddressTy));
|
|
if (!isFar) {
|
|
value = Builder.CreateSExt(value, IGM.IntPtrTy);
|
|
}
|
|
auto *addrInt = Builder.CreatePtrToInt(addr.getAddress(), IGM.IntPtrTy);
|
|
auto *uncastPointerInt = Builder.CreateAdd(addrInt, value);
|
|
auto *uncastPointer = Builder.CreateIntToPtr(uncastPointerInt, IGM.Int8PtrTy);
|
|
auto uncastPointerAddress = Address(uncastPointer, IGM.getPointerAlignment());
|
|
auto pointer = Builder.CreateBitCast(uncastPointerAddress, expectedType);
|
|
return pointer.getAddress();
|
|
}
|
|
|
|
llvm::Value *
|
|
IRGenFunction::emitLoadOfRelativeIndirectablePointer(Address addr,
|
|
bool isFar,
|
|
llvm::PointerType *expectedType,
|
|
const llvm::Twine &name) {
|
|
// Load the pointer and turn it back into a pointer.
|
|
llvm::Value *value = Builder.CreateLoad(addr);
|
|
assert(value->getType() == (isFar ? IGM.FarRelativeAddressTy
|
|
: IGM.RelativeAddressTy));
|
|
if (!isFar) {
|
|
value = Builder.CreateSExt(value, IGM.IntPtrTy);
|
|
}
|
|
assert(value->getType() == IGM.IntPtrTy);
|
|
|
|
llvm::BasicBlock *origBB = Builder.GetInsertBlock();
|
|
llvm::Value *directResult = Builder.CreateIntToPtr(value, expectedType);
|
|
|
|
// Check whether the low bit is set.
|
|
llvm::Constant *one = llvm::ConstantInt::get(IGM.IntPtrTy, 1);
|
|
llvm::BasicBlock *indirectBB = createBasicBlock("relptr.indirect");
|
|
llvm::BasicBlock *contBB = createBasicBlock("relptr.cont");
|
|
llvm::Value *isIndirect = Builder.CreateAnd(value, one);
|
|
isIndirect = Builder.CreateIsNotNull(isIndirect);
|
|
Builder.CreateCondBr(isIndirect, indirectBB, contBB);
|
|
|
|
// In the indirect block, clear the low bit and perform an additional load.
|
|
llvm::Value *indirectResult; {
|
|
Builder.emitBlock(indirectBB);
|
|
|
|
// Clear the low bit.
|
|
llvm::Value *ptr = Builder.CreateSub(value, one);
|
|
ptr = Builder.CreateIntToPtr(ptr, expectedType->getPointerTo());
|
|
|
|
// Load.
|
|
Address indirectAddr(ptr, IGM.getPointerAlignment());
|
|
indirectResult = Builder.CreateLoad(indirectAddr);
|
|
|
|
Builder.CreateBr(contBB);
|
|
}
|
|
|
|
Builder.emitBlock(contBB);
|
|
auto phi = Builder.CreatePHI(expectedType, 2, name);
|
|
phi->addIncoming(directResult, origBB);
|
|
phi->addIncoming(indirectResult, indirectBB);
|
|
|
|
return phi;
|
|
}
|
|
|
|
void IRGenFunction::emitFakeExplosion(const TypeInfo &type,
|
|
Explosion &explosion) {
|
|
if (!isa<LoadableTypeInfo>(type)) {
|
|
explosion.add(llvm::UndefValue::get(type.getStorageType()->getPointerTo()));
|
|
return;
|
|
}
|
|
|
|
ExplosionSchema schema = cast<LoadableTypeInfo>(type).getSchema();
|
|
for (auto &element : schema) {
|
|
llvm::Type *elementType;
|
|
if (element.isAggregate()) {
|
|
elementType = element.getAggregateType()->getPointerTo();
|
|
} else {
|
|
elementType = element.getScalarType();
|
|
}
|
|
|
|
explosion.add(llvm::UndefValue::get(elementType));
|
|
}
|
|
}
|
|
|
|
void IRGenFunction::unimplemented(SourceLoc Loc, StringRef Message) {
|
|
return IGM.unimplemented(Loc, Message);
|
|
}
|
|
|
|
// Debug output for Explosions.
|
|
|
|
void Explosion::print(llvm::raw_ostream &OS) {
|
|
for (auto value : makeArrayRef(Values).slice(NextValue)) {
|
|
value->print(OS);
|
|
OS << '\n';
|
|
}
|
|
}
|
|
|
|
void Explosion::dump() {
|
|
print(llvm::errs());
|
|
}
|
|
|
|
llvm::Value *Offset::getAsValue(IRGenFunction &IGF) const {
|
|
if (isStatic()) {
|
|
return IGF.IGM.getSize(getStatic());
|
|
} else {
|
|
return getDynamic();
|
|
}
|
|
}
|
|
|
|
Offset Offset::offsetBy(IRGenFunction &IGF, Size other) const {
|
|
if (isStatic()) {
|
|
return Offset(getStatic() + other);
|
|
}
|
|
auto otherVal = llvm::ConstantInt::get(IGF.IGM.SizeTy, other.getValue());
|
|
return Offset(IGF.Builder.CreateAdd(getDynamic(), otherVal));
|
|
}
|
|
|
|
Address IRGenFunction::emitAddressAtOffset(llvm::Value *base, Offset offset,
|
|
llvm::Type *objectTy,
|
|
Alignment objectAlignment,
|
|
const llvm::Twine &name) {
|
|
// Use a slightly more obvious IR pattern if it's a multiple of the type
|
|
// size. I'll confess that this is partly just to avoid updating tests.
|
|
if (offset.isStatic()) {
|
|
auto byteOffset = offset.getStatic();
|
|
Size objectSize(IGM.DataLayout.getTypeAllocSize(objectTy));
|
|
if (byteOffset.isMultipleOf(objectSize)) {
|
|
// Cast to T*.
|
|
auto objectPtrTy = objectTy->getPointerTo();
|
|
base = Builder.CreateBitCast(base, objectPtrTy);
|
|
|
|
// GEP to the slot, computing the index as a signed number.
|
|
auto scaledIndex =
|
|
int64_t(byteOffset.getValue()) / int64_t(objectSize.getValue());
|
|
auto indexValue = IGM.getSize(Size(scaledIndex));
|
|
auto slotPtr = Builder.CreateInBoundsGEP(base, indexValue);
|
|
|
|
return Address(slotPtr, objectAlignment);
|
|
}
|
|
}
|
|
|
|
// GEP to the slot.
|
|
auto offsetValue = offset.getAsValue(*this);
|
|
auto slotPtr = emitByteOffsetGEP(base, offsetValue, objectTy);
|
|
return Address(slotPtr, objectAlignment);
|
|
}
|
|
|
|
llvm::CallInst *IRBuilder::CreateNonMergeableTrap(IRGenModule &IGM,
|
|
StringRef failureMsg) {
|
|
if (IGM.IRGen.Opts.shouldOptimize()) {
|
|
// Emit unique side-effecting inline asm calls in order to eliminate
|
|
// the possibility that an LLVM optimization or code generation pass
|
|
// will merge these blocks back together again. We emit an empty asm
|
|
// string with the side-effect flag set, and with a unique integer
|
|
// argument for each cond_fail we see in the function.
|
|
llvm::IntegerType *asmArgTy = IGM.Int32Ty;
|
|
llvm::Type *argTys = {asmArgTy};
|
|
llvm::FunctionType *asmFnTy =
|
|
llvm::FunctionType::get(IGM.VoidTy, argTys, false /* = isVarArg */);
|
|
llvm::InlineAsm *inlineAsm =
|
|
llvm::InlineAsm::get(asmFnTy, "", "n", true /* = SideEffects */);
|
|
CreateAsmCall(inlineAsm,
|
|
llvm::ConstantInt::get(asmArgTy, NumTrapBarriers++));
|
|
}
|
|
|
|
// Emit the trap instruction.
|
|
llvm::Function *trapIntrinsic =
|
|
llvm::Intrinsic::getDeclaration(&IGM.Module, llvm::Intrinsic::trap);
|
|
if (EnableTrapDebugInfo && IGM.DebugInfo && !failureMsg.empty()) {
|
|
IGM.DebugInfo->addFailureMessageToCurrentLoc(*this, failureMsg);
|
|
}
|
|
auto Call = IRBuilderBase::CreateCall(trapIntrinsic, {});
|
|
setCallingConvUsingCallee(Call);
|
|
return Call;
|
|
}
|
|
|
|
void IRGenFunction::emitTrap(StringRef failureMessage, bool EmitUnreachable) {
|
|
Builder.CreateNonMergeableTrap(IGM, failureMessage);
|
|
if (EmitUnreachable)
|
|
Builder.CreateUnreachable();
|
|
}
|
|
|
|
Address IRGenFunction::emitTaskAlloc(llvm::Value *size, Alignment alignment) {
|
|
auto *call = Builder.CreateCall(IGM.getTaskAllocFn(), {size});
|
|
call->setDoesNotThrow();
|
|
call->setCallingConv(IGM.SwiftCC);
|
|
auto address = Address(call, alignment);
|
|
return address;
|
|
}
|
|
|
|
void IRGenFunction::emitTaskDealloc(Address address) {
|
|
auto *call = Builder.CreateCall(IGM.getTaskDeallocFn(),
|
|
{address.getAddress()});
|
|
call->setDoesNotThrow();
|
|
call->setCallingConv(IGM.SwiftCC);
|
|
}
|
|
|
|
llvm::Value *IRGenFunction::alignUpToMaximumAlignment(llvm::Type *sizeTy, llvm::Value *val) {
|
|
auto *alignMask = llvm::ConstantInt::get(sizeTy, MaximumAlignment - 1);
|
|
auto *invertedMask = Builder.CreateNot(alignMask);
|
|
return Builder.CreateAnd(Builder.CreateAdd(val, alignMask), invertedMask);
|
|
}
|
|
|
|
/// Returns the current task \p currTask as a Builtin.RawUnsafeContinuation at +1.
|
|
static llvm::Value *unsafeContinuationFromTask(IRGenFunction &IGF,
|
|
llvm::Value *currTask) {
|
|
auto &IGM = IGF.IGM;
|
|
auto &Builder = IGF.Builder;
|
|
|
|
auto &rawPointerTI = IGM.getRawUnsafeContinuationTypeInfo();
|
|
return Builder.CreateBitOrPointerCast(currTask, rawPointerTI.getStorageType());
|
|
}
|
|
|
|
static llvm::Value *emitLoadOfResumeContextFromTask(IRGenFunction &IGF,
|
|
llvm::Value *task) {
|
|
// Task.ResumeContext is at field index 8 within SwiftTaskTy. The offset comes
|
|
// from 7 pointers (two within the single RefCountedStructTy) and 2 Int32
|
|
// fields.
|
|
const unsigned taskResumeContextIndex = 8;
|
|
const Size taskResumeContextOffset = (7 * IGF.IGM.getPointerSize()) + Size(8);
|
|
|
|
auto addr = Address(task, IGF.IGM.getPointerAlignment());
|
|
auto resumeContextAddr = IGF.Builder.CreateStructGEP(
|
|
addr, taskResumeContextIndex, taskResumeContextOffset);
|
|
llvm::Value *resumeContext = IGF.Builder.CreateLoad(resumeContextAddr);
|
|
if (auto &schema = IGF.getOptions().PointerAuth.TaskResumeContext) {
|
|
auto info = PointerAuthInfo::emit(IGF, schema,
|
|
resumeContextAddr.getAddress(),
|
|
PointerAuthEntity());
|
|
resumeContext = emitPointerAuthAuth(IGF, resumeContext, info);
|
|
}
|
|
return resumeContext;
|
|
}
|
|
|
|
static Address emitLoadOfContinuationContext(IRGenFunction &IGF,
|
|
llvm::Value *continuation) {
|
|
auto ptr = emitLoadOfResumeContextFromTask(IGF, continuation);
|
|
ptr = IGF.Builder.CreateBitCast(ptr, IGF.IGM.ContinuationAsyncContextPtrTy);
|
|
return Address(ptr, IGF.IGM.getAsyncContextAlignment());
|
|
}
|
|
|
|
static Address emitAddrOfContinuationNormalResultPointer(IRGenFunction &IGF,
|
|
Address context) {
|
|
assert(context.getType() == IGF.IGM.ContinuationAsyncContextPtrTy);
|
|
auto offset = 5 * IGF.IGM.getPointerSize();
|
|
return IGF.Builder.CreateStructGEP(context, 3, offset);
|
|
}
|
|
|
|
void IRGenFunction::emitGetAsyncContinuation(SILType resumeTy,
|
|
StackAddress resultAddr,
|
|
Explosion &out,
|
|
bool canThrow) {
|
|
// A continuation is just a reference to the current async task,
|
|
// parked with a special context:
|
|
//
|
|
// struct ContinuationAsyncContext : AsyncContext {
|
|
// std::atomic<size_t> awaitSynchronization;
|
|
// SwiftError *errResult;
|
|
// Result *result;
|
|
// ExecutorRef resumeExecutor;
|
|
// };
|
|
//
|
|
// We need fill out this context essentially as if we were calling
|
|
// something.
|
|
|
|
// Create and setup the continuation context.
|
|
auto continuationContext =
|
|
createAlloca(IGM.ContinuationAsyncContextTy,
|
|
IGM.getAsyncContextAlignment());
|
|
AsyncCoroutineCurrentContinuationContext = continuationContext.getAddress();
|
|
// TODO: add lifetime with matching lifetime in await_async_continuation
|
|
|
|
// We're required to initialize three fields in the continuation
|
|
// context before calling swift_continuation_init:
|
|
|
|
// - Parent, the parent context pointer, which we initialize to
|
|
// the current context.
|
|
auto contextBase = Builder.CreateStructGEP(continuationContext, 0, Size(0));
|
|
auto parentContextAddr = Builder.CreateStructGEP(contextBase, 0, Size(0));
|
|
llvm::Value *asyncContextValue =
|
|
Builder.CreateBitCast(getAsyncContext(), IGM.SwiftContextPtrTy);
|
|
if (auto schema = IGM.getOptions().PointerAuth.AsyncContextParent) {
|
|
auto authInfo = PointerAuthInfo::emit(*this, schema,
|
|
parentContextAddr.getAddress(),
|
|
PointerAuthEntity());
|
|
asyncContextValue = emitPointerAuthSign(*this, asyncContextValue, authInfo);
|
|
}
|
|
Builder.CreateStore(asyncContextValue, parentContextAddr);
|
|
|
|
// - NormalResult, the pointer to the normal result, which we initialize
|
|
// to the result address that we were given, or else a temporary slot.
|
|
// TODO: emit lifetime.start for this temporary, paired with a
|
|
// lifetime.end within the await after we take from the slot.
|
|
auto normalResultAddr =
|
|
emitAddrOfContinuationNormalResultPointer(*this, continuationContext);
|
|
if (!resultAddr.getAddress().isValid()) {
|
|
auto &resumeTI = getTypeInfo(resumeTy);
|
|
resultAddr =
|
|
resumeTI.allocateStack(*this, resumeTy, "async.continuation.result");
|
|
}
|
|
Builder.CreateStore(Builder.CreateBitOrPointerCast(
|
|
resultAddr.getAddress().getAddress(),
|
|
IGM.OpaquePtrTy),
|
|
normalResultAddr);
|
|
|
|
// - ResumeParent, the continuation function pointer, which we initialize
|
|
// with the result of a new call to @llvm.coro.async.resume; we'll pair
|
|
// this with a suspend point when we emit the corresponding
|
|
// await_async_continuation.
|
|
auto coroResume =
|
|
Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_async_resume, {});
|
|
auto resumeFunctionAddr =
|
|
Builder.CreateStructGEP(contextBase, 1, IGM.getPointerSize());
|
|
llvm::Value *coroResumeValue =
|
|
Builder.CreateBitOrPointerCast(coroResume,
|
|
IGM.TaskContinuationFunctionPtrTy);
|
|
if (auto schema = IGM.getOptions().PointerAuth.AsyncContextResume) {
|
|
auto authInfo = PointerAuthInfo::emit(*this, schema,
|
|
resumeFunctionAddr.getAddress(),
|
|
PointerAuthEntity());
|
|
coroResumeValue = emitPointerAuthSign(*this, coroResumeValue, authInfo);
|
|
}
|
|
Builder.CreateStore(coroResumeValue, resumeFunctionAddr);
|
|
|
|
// Save the resume intrinsic call for await_async_continuation.
|
|
assert(AsyncCoroutineCurrentResume == nullptr &&
|
|
"Don't support nested get_async_continuation");
|
|
AsyncCoroutineCurrentResume = coroResume;
|
|
|
|
AsyncContinuationFlags flags;
|
|
if (canThrow) flags.setCanThrow(true);
|
|
|
|
// Call the swift_continuation_init runtime function to initialize
|
|
// the rest of the continuation and return the task pointer back to us.
|
|
auto task = Builder.CreateCall(IGM.getContinuationInitFn(), {
|
|
continuationContext.getAddress(),
|
|
IGM.getSize(Size(flags.getOpaqueValue()))
|
|
});
|
|
task->setCallingConv(IGM.SwiftCC);
|
|
|
|
// TODO: if we have a better idea of what executor to return to than
|
|
// the current executor, overwrite the ResumeToExecutor field.
|
|
|
|
auto unsafeContinuation = unsafeContinuationFromTask(*this, task);
|
|
out.add(unsafeContinuation);
|
|
}
|
|
|
|
void IRGenFunction::emitAwaitAsyncContinuation(
|
|
SILType resumeTy, bool isIndirectResult,
|
|
Explosion &outDirectResult, llvm::BasicBlock *&normalBB,
|
|
llvm::PHINode *&optionalErrorResult, llvm::BasicBlock *&optionalErrorBB) {
|
|
assert(AsyncCoroutineCurrentContinuationContext && "no active continuation");
|
|
auto pointerAlignment = IGM.getPointerAlignment();
|
|
|
|
// Call swift_continuation_await to check whether the continuation
|
|
// has already been resumed.
|
|
{
|
|
// Set up the suspend point.
|
|
SmallVector<llvm::Value *, 8> arguments;
|
|
unsigned swiftAsyncContextIndex = 0;
|
|
arguments.push_back(IGM.getInt32(swiftAsyncContextIndex)); // context index
|
|
arguments.push_back(AsyncCoroutineCurrentResume);
|
|
auto resumeProjFn = getOrCreateResumePrjFn();
|
|
arguments.push_back(
|
|
Builder.CreateBitOrPointerCast(resumeProjFn, IGM.Int8PtrTy));
|
|
arguments.push_back(Builder.CreateBitOrPointerCast(
|
|
IGM.getAwaitAsyncContinuationFn(),
|
|
IGM.Int8PtrTy));
|
|
arguments.push_back(AsyncCoroutineCurrentContinuationContext);
|
|
auto resultTy =
|
|
llvm::StructType::get(IGM.getLLVMContext(), {IGM.Int8PtrTy}, false /*packed*/);
|
|
emitSuspendAsyncCall(swiftAsyncContextIndex, resultTy, arguments);
|
|
}
|
|
|
|
// If there's an error destination (i.e. if the continuation is throwing),
|
|
// load the error value out and check whether it's null. If so, branch
|
|
// to the error destination.
|
|
if (optionalErrorBB) {
|
|
auto normalContBB = createBasicBlock("await.async.normal");
|
|
auto contErrResultAddr = Address(
|
|
Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 2),
|
|
pointerAlignment);
|
|
auto errorRes = Builder.CreateLoad(contErrResultAddr);
|
|
auto nullError = llvm::Constant::getNullValue(errorRes->getType());
|
|
auto hasError = Builder.CreateICmpNE(errorRes, nullError);
|
|
optionalErrorResult->addIncoming(errorRes, Builder.GetInsertBlock());
|
|
Builder.CreateCondBr(hasError, optionalErrorBB, normalContBB);
|
|
Builder.emitBlock(normalContBB);
|
|
}
|
|
|
|
// We're now on the normal-result path. If we didn't have an indirect
|
|
// result slot, load from the temporary we created during
|
|
// get_async_continuation.
|
|
if (!isIndirectResult) {
|
|
auto contResultAddrAddr =
|
|
Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 3);
|
|
auto resultAddrVal =
|
|
Builder.CreateLoad(Address(contResultAddrAddr, pointerAlignment));
|
|
// Take the result.
|
|
auto &resumeTI = cast<LoadableTypeInfo>(getTypeInfo(resumeTy));
|
|
auto resultStorageTy = resumeTI.getStorageType();
|
|
auto resultAddr =
|
|
Address(Builder.CreateBitOrPointerCast(resultAddrVal,
|
|
resultStorageTy->getPointerTo()),
|
|
resumeTI.getFixedAlignment());
|
|
resumeTI.loadAsTake(*this, resultAddr, outDirectResult);
|
|
}
|
|
|
|
Builder.CreateBr(normalBB);
|
|
AsyncCoroutineCurrentResume = nullptr;
|
|
AsyncCoroutineCurrentContinuationContext = nullptr;
|
|
}
|
|
|
|
void IRGenFunction::emitResumeAsyncContinuationReturning(
|
|
llvm::Value *continuation, llvm::Value *srcPtr,
|
|
SILType valueTy, bool throwing) {
|
|
continuation = Builder.CreateBitCast(continuation, IGM.SwiftTaskPtrTy);
|
|
auto &valueTI = getTypeInfo(valueTy);
|
|
Address srcAddr = valueTI.getAddressForPointer(srcPtr);
|
|
|
|
// Extract the destination value pointer and cast it from an opaque
|
|
// pointer type.
|
|
Address context = emitLoadOfContinuationContext(*this, continuation);
|
|
auto destPtrAddr = emitAddrOfContinuationNormalResultPointer(*this, context);
|
|
auto destPtr = Builder.CreateBitCast(Builder.CreateLoad(destPtrAddr),
|
|
valueTI.getStorageType()->getPointerTo());
|
|
Address destAddr = valueTI.getAddressForPointer(destPtr);
|
|
|
|
valueTI.initializeWithTake(*this, destAddr, srcAddr, valueTy,
|
|
/*outlined*/ false);
|
|
|
|
auto call = Builder.CreateCall(throwing
|
|
? IGM.getContinuationThrowingResumeFn()
|
|
: IGM.getContinuationResumeFn(),
|
|
{ continuation });
|
|
call->setCallingConv(IGM.SwiftCC);
|
|
}
|
|
|
|
void IRGenFunction::emitResumeAsyncContinuationThrowing(
|
|
llvm::Value *continuation, llvm::Value *error) {
|
|
continuation = Builder.CreateBitCast(continuation, IGM.SwiftTaskPtrTy);
|
|
auto call = Builder.CreateCall(IGM.getContinuationThrowingResumeWithErrorFn(),
|
|
{ continuation, error });
|
|
call->setCallingConv(IGM.SwiftCC);
|
|
}
|