Merge main into next - 2021-07-16

Conflicts:
	lib/IRGen/IRGenFunction.cpp
    Take the changes made in #38386
This commit is contained in:
Eric Miotto
2021-07-16 09:15:42 -07:00
32 changed files with 702 additions and 296 deletions

View File

@@ -3,6 +3,22 @@ CHANGELOG
_**Note:** This is in reverse chronological order, so newer entries are added to the top._
Swift Next
----------
* [SE-0290][]:
It is now possible to write inverted availability conditions by using the new `#unavailable` keyword:
```swift
if #unavailable(iOS 15.0) {
// Old functionality
} else {
// iOS 15 functionality
}
```
**Add new entries to the top of this section, not here!**
Swift 5.5
---------
@@ -8627,6 +8643,7 @@ Swift 1.0
[SE-0284]: <https://github.com/apple/swift-evolution/blob/main/proposals/0284-multiple-variadic-parameters.md>
[SE-0286]: <https://github.com/apple/swift-evolution/blob/main/proposals/0286-forward-scan-trailing-closures.md>
[SE-0287]: <https://github.com/apple/swift-evolution/blob/main/proposals/0287-implicit-member-chains.md>
[SE-0290]: <https://github.com/apple/swift-evolution/blob/main/proposals/0290-negative-availability.md>
[SE-0293]: <https://github.com/apple/swift-evolution/blob/main/proposals/0293-extend-property-wrappers-to-function-and-closure-parameters.md>
[SE-0296]: <https://github.com/apple/swift-evolution/blob/main/proposals/0296-async-await.md>
[SE-0297]: <https://github.com/apple/swift-evolution/blob/main/proposals/0297-concurrency-objc.md>

View File

@@ -20,6 +20,10 @@ set(LLVM_ENABLE_ASSERTIONS YES CACHE BOOL "")
set(ENABLE_X86_RELAX_RELOCATIONS YES CACHE BOOL "")
# NOTE(compnerd) we can hardcode the default target triple since the cache files
# are target dependent.
set(LLVM_DEFAULT_TARGET_TRIPLE x86_64-unknown-windows-msvc CACHE STRING "")
set(LLVM_APPEND_VC_REV NO CACHE BOOL "")
set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR YES CACHE BOOL "")
set(LLVM_ENABLE_PYTHON YES CACHE BOOL "")
@@ -39,6 +43,7 @@ foreach(target ${LLVM_RUNTIME_TARGETS})
set(RUNTIMES_${target}_COMPILER_RT_BUILD_SANITIZERS NO CACHE BOOL "")
set(RUNTIMES_${target}_COMPILER_RT_BUILD_XRAY NO CACHE BOOL "")
endforeach()
set(LLVM_TARGETS_TO_BUILD AArch64 ARM WebAssembly X86 CACHE STRING "")
# Disable certain targets to reduce the configure time or to avoid configuration

View File

@@ -48,7 +48,7 @@ enum {
NumWords_DefaultActor = 12,
/// The number of words in a task.
NumWords_AsyncTask = 16,
NumWords_AsyncTask = 24,
/// The number of words in a task group.
NumWords_TaskGroup = 32,
@@ -2211,7 +2211,10 @@ public:
Kind_width = 8,
CanThrow = 8,
ShouldNotDeallocate = 9
// Kind-specific flags should grow down from 31.
Continuation_IsExecutorSwitchForced = 31,
};
explicit AsyncContextFlags(uint32_t bits) : FlagSet(bits) {}
@@ -2227,17 +2230,10 @@ public:
/// Whether this context is permitted to throw.
FLAGSET_DEFINE_FLAG_ACCESSORS(CanThrow, canThrow, setCanThrow)
/// Whether a function should avoid deallocating its context before
/// returning. It should still pass its caller's context to its
/// return continuation.
///
/// This flag can be set in the caller to optimize context allocation,
/// e.g. if the callee's context size is known statically and simply
/// allocated as part of the caller's context, or if the callee will
/// be called multiple times.
FLAGSET_DEFINE_FLAG_ACCESSORS(ShouldNotDeallocate,
shouldNotDeallocateInCallee,
setShouldNotDeallocateInCallee)
/// See AsyncContinuationFlags::isExecutorSwitchForced.
FLAGSET_DEFINE_FLAG_ACCESSORS(Continuation_IsExecutorSwitchForced,
continuation_isExecutorSwitchForced,
continuation_setIsExecutorSwitchForced)
};
/// Flags passed to swift_continuation_init.
@@ -2247,6 +2243,7 @@ public:
CanThrow = 0,
HasExecutorOverride = 1,
IsPreawaited = 2,
IsExecutorSwitchForced = 3,
};
explicit AsyncContinuationFlags(size_t bits) : FlagSet(bits) {}
@@ -2262,10 +2259,27 @@ public:
hasExecutorOverride,
setHasExecutorOverride)
/// Whether the switch to the target executor should be forced
/// by swift_continuation_await. If this is not set, and
/// swift_continuation_await finds that the continuation has
/// already been resumed, then execution will continue on the
/// current executor. This has no effect in combination with
/// pre-awaiting.
///
/// Setting this flag when you know statically that you're
/// already on the right executor is suboptimal. In particular,
/// there's no good reason to set this if you're not also using
/// an executor override.
FLAGSET_DEFINE_FLAG_ACCESSORS(IsExecutorSwitchForced,
isExecutorSwitchForced,
setIsExecutorSwitchForced)
/// Whether the continuation is "pre-awaited". If so, it should
/// be set up in the already-awaited state, and so resumptions
/// will immediately schedule the continuation to begin
/// asynchronously.
/// asynchronously. The continuation must not be subsequently
/// awaited if this is set. The task is immediately treated as
/// suspended.
FLAGSET_DEFINE_FLAG_ACCESSORS(IsPreawaited,
isPreawaited,
setIsPreawaited)

View File

@@ -203,7 +203,7 @@ public:
/// Private storage for the use of the runtime.
struct alignas(2 * alignof(void*)) OpaquePrivateStorage {
void *Storage[6];
void *Storage[14];
/// Initialize this storage during the creation of a task.
void initialize(AsyncTask *task);
@@ -260,7 +260,25 @@ public:
void runInFullyEstablishedContext() {
return ResumeTask(ResumeContext); // 'return' forces tail call
}
/// Flag that this task is now running. This can update
/// the priority stored in the job flags if the priority has been
/// escalated.
///
/// Generally this should be done immediately after updating
/// ActiveTask.
void flagAsRunning();
void flagAsRunning_slow();
/// Flag that this task is now suspended. This can update the
/// priority stored in the job flags if the priority hsa been
/// escalated. Generally this should be done immediately after
/// clearing ActiveTask and immediately before enqueuing the task
/// somewhere. TODO: record where the task is enqueued if
/// possible.
void flagAsSuspended();
void flagAsSuspended_slow();
/// Check whether this task has been cancelled.
/// Checking this is, of course, inherently race-prone on its own.
bool isCancelled() const;
@@ -632,6 +650,10 @@ public:
ErrorResult = error;
}
bool isExecutorSwitchForced() const {
return Flags.continuation_isExecutorSwitchForced();
}
static bool classof(const AsyncContext *context) {
return context->Flags.getKind() == AsyncContextKind::Continuation;
}

View File

@@ -20,8 +20,8 @@
#ifndef SWIFT_ABI_TASKSTATUS_H
#define SWIFT_ABI_TASKSTATUS_H
#include "swift/ABI/Task.h"
#include "swift/ABI/MetadataValues.h"
#include "swift/ABI/Task.h"
namespace swift {
@@ -30,7 +30,7 @@ namespace swift {
/// TaskStatusRecords are typically allocated on the stack (possibly
/// in the task context), partially initialized, and then atomically
/// added to the task with `swift_task_addTaskStatusRecord`. While
/// registered with the task, a status record should only be
/// registered with the task, a status record should only be
/// modified in ways that respect the possibility of asynchronous
/// access by a cancelling thread. In particular, the chain of
/// status records must not be disturbed. When the task leaves
@@ -51,13 +51,9 @@ public:
TaskStatusRecord(const TaskStatusRecord &) = delete;
TaskStatusRecord &operator=(const TaskStatusRecord &) = delete;
TaskStatusRecordKind getKind() const {
return Flags.getKind();
}
TaskStatusRecordKind getKind() const { return Flags.getKind(); }
TaskStatusRecord *getParent() const {
return Parent;
}
TaskStatusRecord *getParent() const { return Parent; }
/// Change the parent of this unregistered status record to the
/// given record.
@@ -77,9 +73,7 @@ public:
/// Unlike resetParent, this assumes that it's just removing one or
/// more records from the chain and that there's no need to do any
/// extra cache manipulation.
void spliceParent(TaskStatusRecord *newParent) {
Parent = newParent;
}
void spliceParent(TaskStatusRecord *newParent) { Parent = newParent; }
};
/// A deadline for the task. If this is reached, the task will be
@@ -102,14 +96,12 @@ struct TaskDeadline {
/// within the task.
class DeadlineStatusRecord : public TaskStatusRecord {
TaskDeadline Deadline;
public:
DeadlineStatusRecord(TaskDeadline deadline)
: TaskStatusRecord(TaskStatusRecordKind::Deadline),
Deadline(deadline) {}
: TaskStatusRecord(TaskStatusRecordKind::Deadline), Deadline(deadline) {}
TaskDeadline getDeadline() const {
return Deadline;
}
TaskDeadline getDeadline() const { return Deadline; }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::Deadline;
@@ -123,25 +115,22 @@ class ChildTaskStatusRecord : public TaskStatusRecord {
public:
ChildTaskStatusRecord(AsyncTask *child)
: TaskStatusRecord(TaskStatusRecordKind::ChildTask),
FirstChild(child) {}
: TaskStatusRecord(TaskStatusRecordKind::ChildTask), FirstChild(child) {}
ChildTaskStatusRecord(AsyncTask *child, TaskStatusRecordKind kind)
: TaskStatusRecord(kind),
FirstChild(child) {
: TaskStatusRecord(kind), FirstChild(child) {
assert(kind == TaskStatusRecordKind::ChildTask);
assert(!child->hasGroupChildFragment() &&
"Group child tasks must be tracked in their respective "
"TaskGroupTaskStatusRecord, and not as independent ChildTaskStatusRecord "
"records.");
"Group child tasks must be tracked in their respective "
"TaskGroupTaskStatusRecord, and not as independent "
"ChildTaskStatusRecord "
"records.");
}
/// Return the first child linked by this record. This may be null;
/// if not, it (and all of its successors) are guaranteed to satisfy
/// `isChildTask()`.
AsyncTask *getFirstChild() const {
return FirstChild;
}
AsyncTask *getFirstChild() const { return FirstChild; }
static AsyncTask *getNextChildTask(AsyncTask *task) {
return task->childFragment()->getNextChild();
@@ -175,25 +164,21 @@ public:
/// and are only tracked by their respective `TaskGroupTaskStatusRecord`.
class TaskGroupTaskStatusRecord : public TaskStatusRecord {
AsyncTask *FirstChild;
public:
TaskGroupTaskStatusRecord()
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup),
FirstChild(nullptr) {}
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup), FirstChild(nullptr) {
}
TaskGroupTaskStatusRecord(AsyncTask *child)
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup),
FirstChild(child) {}
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup), FirstChild(child) {}
TaskGroup* getGroup() {
return reinterpret_cast<TaskGroup *>(this);
}
TaskGroup *getGroup() { return reinterpret_cast<TaskGroup *>(this); }
/// Return the first child linked by this record. This may be null;
/// if not, it (and all of its successors) are guaranteed to satisfy
/// `isChildTask()`.
AsyncTask *getFirstChild() const {
return FirstChild;
}
AsyncTask *getFirstChild() const { return FirstChild; }
/// Attach the passed in `child` task to this group.
void attachChild(AsyncTask *child) {
@@ -207,7 +192,8 @@ public:
return;
}
// We need to traverse the siblings to find the last one and add the child there.
// We need to traverse the siblings to find the last one and add the child
// there.
// FIXME: just set prepend to the current head, no need to traverse.
auto cur = FirstChild;
@@ -249,20 +235,18 @@ public:
/// subsequently used.
class CancellationNotificationStatusRecord : public TaskStatusRecord {
public:
using FunctionType = SWIFT_CC(swift) void (SWIFT_CONTEXT void *);
using FunctionType = SWIFT_CC(swift) void(SWIFT_CONTEXT void *);
private:
FunctionType * __ptrauth_swift_cancellation_notification_function Function;
FunctionType *__ptrauth_swift_cancellation_notification_function Function;
void *Argument;
public:
CancellationNotificationStatusRecord(FunctionType *fn, void *arg)
: TaskStatusRecord(TaskStatusRecordKind::CancellationNotification),
Function(fn), Argument(arg) {}
: TaskStatusRecord(TaskStatusRecordKind::CancellationNotification),
Function(fn), Argument(arg) {}
void run() {
Function(Argument);
}
void run() { Function(Argument); }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::CancellationNotification;
@@ -279,20 +263,18 @@ public:
/// subsequently used.
class EscalationNotificationStatusRecord : public TaskStatusRecord {
public:
using FunctionType = void (void *, JobPriority);
using FunctionType = void(void *, JobPriority);
private:
FunctionType * __ptrauth_swift_escalation_notification_function Function;
FunctionType *__ptrauth_swift_escalation_notification_function Function;
void *Argument;
public:
EscalationNotificationStatusRecord(FunctionType *fn, void *arg)
: TaskStatusRecord(TaskStatusRecordKind::EscalationNotification),
Function(fn), Argument(arg) {}
: TaskStatusRecord(TaskStatusRecordKind::EscalationNotification),
Function(fn), Argument(arg) {}
void run(JobPriority newPriority) {
Function(Argument, newPriority);
}
void run(JobPriority newPriority) { Function(Argument, newPriority); }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::EscalationNotification;

View File

@@ -53,10 +53,15 @@ public:
constexpr atomic_impl(Value value) : value(value) {}
/// Force clients to always pass an order.
Value load(std::memory_order order) {
Value load(std::memory_order order) const {
return value.load(order);
}
/// Force clients to always pass an order.
void store(Value newValue, std::memory_order order) {
return value.store(newValue, order);
}
/// Force clients to always pass an order.
bool compare_exchange_weak(Value &oldValue, Value newValue,
std::memory_order successOrder,
@@ -75,14 +80,14 @@ public:
/// AMD processors that lack cmpxchg16b, so we just use the intrinsic.
template <class Value>
class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
volatile Value atomicValue;
mutable volatile Value atomicValue;
public:
constexpr atomic_impl(Value initialValue) : atomicValue(initialValue) {}
atomic_impl(const atomic_impl &) = delete;
atomic_impl &operator=(const atomic_impl &) = delete;
Value load(std::memory_order order) {
Value load(std::memory_order order) const {
assert(order == std::memory_order_relaxed ||
order == std::memory_order_acquire ||
order == std::memory_order_consume);
@@ -107,6 +112,17 @@ public:
return reinterpret_cast<Value &>(resultArray);
}
void store(Value newValue, std::memory_order order) {
assert(order == std::memory_order_relaxed ||
order == std::memory_order_release);
Value oldValue = load(std::memory_order_relaxed);
while (!compare_exchange_weak(oldValue, newValue,
/*success*/ order,
/*failure*/ std::memory_order_relaxed)) {
// try again
}
}
bool compare_exchange_weak(Value &oldValue, Value newValue,
std::memory_order successOrder,
std::memory_order failureOrder) {

View File

@@ -605,20 +605,28 @@ void swift_defaultActor_enqueue(Job *job, DefaultActor *actor);
SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift)
bool swift_distributed_actor_is_remote(DefaultActor *actor);
/// Do a primitive suspension of the current task, as if part of
/// a continuation, although this does not provide any of the
/// higher-level continuation semantics. The current task is returned;
/// its ResumeFunction and ResumeContext will need to be initialized,
/// and then it will need to be enqueued or run as a job later.
SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift)
AsyncTask *swift_task_suspend();
/// Prepare a continuation in the current task.
///
/// The caller should initialize the Parent, ResumeParent,
/// and NormalResult fields. This function will initialize the other
/// fields with appropriate defaaults; the caller may then overwrite
/// fields with appropriate defaults; the caller may then overwrite
/// them if desired.
///
/// This function is provided as a code-size and runtime-usage
/// optimization; calling it is not required if code is willing to
/// do all its work inline.
SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift)
AsyncTask *swift_continuation_init(ContinuationAsyncContext *context,
AsyncContinuationFlags flags);
/// Await an initialized continuation.
SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swiftasync)
void swift_continuation_await(ContinuationAsyncContext *continuationContext);
/// Resume a task from a non-throwing continuation, given a normal
/// result which has already been stored into the continuation.
SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift)

View File

@@ -1582,6 +1582,14 @@ FUNCTION(ContinuationInit,
ARGS(ContinuationAsyncContextPtrTy, SizeTy),
ATTRS(NoUnwind))
// void swift_continuation_await(AsyncContext *continuationContext);
FUNCTION(ContinuationAwait,
swift_continuation_await, SwiftAsyncCC,
ConcurrencyAvailability,
RETURNS(VoidTy),
ARGS(ContinuationAsyncContextPtrTy),
ATTRS(NoUnwind))
// void swift_continuation_resume(AsyncTask *continuation);
FUNCTION(ContinuationResume,
swift_continuation_resume, SwiftCC,

View File

@@ -3432,7 +3432,9 @@ void PrintAST::visitAccessorDecl(AccessorDecl *decl) {
printAttributes(decl);
// Explicitly print 'mutating' and 'nonmutating' if needed.
printMutabilityModifiersIfNeeded(decl);
if (decl->isConsuming()) {
Printer.printKeyword("__consuming", Options, " ");
}
switch (auto kind = decl->getAccessorKind()) {
case AccessorKind::Get:
case AccessorKind::Address:

View File

@@ -312,6 +312,10 @@ void RewriteSystem::processMergedAssociatedTypes() {
// Add the rule X.[P1:T] => X.[P1&P2:T].
addRule(rhs, mergedTerm);
// Collect new rules here so that we're not adding rules while iterating
// over the rules list.
SmallVector<std::pair<MutableTerm, MutableTerm>, 2> inducedRules;
// Look for conformance requirements on [P1:T] and [P2:T].
for (const auto &otherRule : Rules) {
const auto &otherLHS = otherRule.getLHS();
@@ -343,10 +347,14 @@ void RewriteSystem::processMergedAssociatedTypes() {
MutableTerm newRHS;
newRHS.add(mergedAtom);
addRule(newLHS, newRHS);
inducedRules.emplace_back(newLHS, newRHS);
}
}
}
// Now add the new rules.
for (const auto &pair : inducedRules)
addRule(pair.first, pair.second);
}
MergedAssociatedTypes.clear();

View File

@@ -232,6 +232,7 @@ void ClangImporter::recordModuleDependencies(
while(It != allArgs.end()) {
StringRef arg = *It;
// Remove the -target arguments because we should use the target triple
// specified with `-clang-target` on the scanner invocation, or
// from the depending Swift modules.
if (arg == "-target") {
It += 2;
@@ -255,6 +256,16 @@ void ClangImporter::recordModuleDependencies(
swiftArgs.push_back(clangArg);
}
// If the scanner is invoked with '-clang-target', ensure this is the target
// used to build this PCM.
if (Impl.SwiftContext.LangOpts.ClangTarget.hasValue()) {
llvm::Triple triple = Impl.SwiftContext.LangOpts.ClangTarget.getValue();
swiftArgs.push_back("-Xcc");
swiftArgs.push_back("-target");
swiftArgs.push_back("-Xcc");
swiftArgs.push_back(triple.str());
}
// Swift frontend action: -emit-pcm
swiftArgs.push_back("-emit-pcm");
swiftArgs.push_back("-module-name");

View File

@@ -1049,11 +1049,16 @@ identifyMainModuleDependencies(CompilerInstance &instance) {
instance.getASTContext()
.LangOpts.EffectiveLanguageVersion.asAPINotesVersionString())
.str();
// Compute the dependencies of the main module.
auto mainDependencies = ModuleDependencies::forMainSwiftModule(
{// ExtraPCMArgs
"-Xcc", "-target", "-Xcc",
instance.getASTContext().LangOpts.Target.str(), "-Xcc", apinotesVer});
std::vector<StringRef> ExtraPCMArgs = {
"-Xcc", apinotesVer
};
if (!instance.getASTContext().LangOpts.ClangTarget.hasValue())
ExtraPCMArgs.insert(ExtraPCMArgs.begin(),
{"-Xcc", "-target", "-Xcc",
instance.getASTContext().LangOpts.Target.str()});
auto mainDependencies = ModuleDependencies::forMainSwiftModule(ExtraPCMArgs);
// Compute Implicit dependencies of the main module
{

View File

@@ -1648,15 +1648,20 @@ InterfaceSubContextDelegateImpl::runInSubCompilerInstance(StringRef moduleName,
}
info.BuildArguments = BuildArgs;
info.Hash = CacheHash;
auto target = *(std::find(BuildArgs.rbegin(), BuildArgs.rend(), "-target") - 1);
auto target = *(std::find(BuildArgs.rbegin(), BuildArgs.rend(), "-target") - 1);
auto langVersion = *(std::find(BuildArgs.rbegin(), BuildArgs.rend(),
"-swift-version") - 1);
std::array<StringRef, 6> ExtraPCMArgs = {
// PCMs should use the target triple the interface will be using to build
"-Xcc", "-target", "-Xcc", target,
std::vector<StringRef> ExtraPCMArgs = {
// PCMs should use the effective Swift language version for apinotes.
"-Xcc", ArgSaver.save((llvm::Twine("-fapinotes-swift-version=") + langVersion).str())
"-Xcc",
ArgSaver.save((llvm::Twine("-fapinotes-swift-version=") + langVersion).str())
};
if (!subInvocation.getLangOptions().ClangTarget.hasValue()) {
ExtraPCMArgs.insert(ExtraPCMArgs.begin(), {"-Xcc", "-target",
"-Xcc", target});
}
info.ExtraPCMArgs = ExtraPCMArgs;
// Run the action under the sub compiler instance.
return action(info);

View File

@@ -21,6 +21,7 @@
#include "ExtraInhabitants.h"
#include "GenProto.h"
#include "GenType.h"
#include "IRGenDebugInfo.h"
#include "IRGenFunction.h"
#include "IRGenModule.h"
#include "LoadableTypeInfo.h"
@@ -241,3 +242,34 @@ void irgen::emitDestroyTaskGroup(IRGenFunction &IGF, llvm::Value *group) {
IGF.Builder.CreateLifetimeEnd(group);
}
llvm::Function *IRGenModule::getAwaitAsyncContinuationFn() {
StringRef name = "__swift_continuation_await_point";
if (llvm::GlobalValue *F = Module.getNamedValue(name))
return cast<llvm::Function>(F);
// The parameters here match the extra arguments passed to
// @llvm.coro.suspend.async by emitAwaitAsyncContinuation.
llvm::Type *argTys[] = { ContinuationAsyncContextPtrTy };
auto *suspendFnTy =
llvm::FunctionType::get(VoidTy, argTys, false /*vaargs*/);
llvm::Function *suspendFn =
llvm::Function::Create(suspendFnTy, llvm::Function::InternalLinkage,
name, &Module);
suspendFn->setCallingConv(SwiftAsyncCC);
suspendFn->setDoesNotThrow();
IRGenFunction suspendIGF(*this, suspendFn);
if (DebugInfo)
DebugInfo->emitArtificialFunction(suspendIGF, suspendFn);
auto &Builder = suspendIGF.Builder;
llvm::Value *context = suspendFn->getArg(0);
auto *call = Builder.CreateCall(getContinuationAwaitFn(), { context });
call->setDoesNotThrow();
call->setCallingConv(SwiftAsyncCC);
call->setTailCallKind(AsyncTailCallKind);
Builder.CreateRetVoid();
return suspendFn;
}

View File

@@ -665,51 +665,8 @@ void IRGenFunction::emitAwaitAsyncContinuation(
assert(AsyncCoroutineCurrentContinuationContext && "no active continuation");
auto pointerAlignment = IGM.getPointerAlignment();
// Check whether the continuation has already been resumed.
// If so, we can just immediately continue with the control flow.
// Otherwise, we need to suspend, and resuming the continuation will
// trigger the function to resume.
//
// We do this by atomically trying to change the synchronization field
// in the continuation context from 0 (the state it was initialized
// with) to 1. If this fails, the continuation must already have been
// resumed, so we can bypass the suspension point and immediately
// start interpreting the result stored in the continuation.
// Note that we use a strong compare-exchange (the default for the LLVM
// cmpxchg instruction), so spurious failures are disallowed; we can
// therefore trust that a failure means that the continuation has
// already been resumed.
auto contAwaitSyncAddr =
Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 1);
auto pendingV = llvm::ConstantInt::get(
contAwaitSyncAddr->getType()->getPointerElementType(),
unsigned(ContinuationStatus::Pending));
auto awaitedV = llvm::ConstantInt::get(
contAwaitSyncAddr->getType()->getPointerElementType(),
unsigned(ContinuationStatus::Awaited));
auto results = Builder.CreateAtomicCmpXchg(
contAwaitSyncAddr, pendingV, awaitedV,
llvm::MaybeAlign(),
llvm::AtomicOrdering::Release /*success ordering*/,
llvm::AtomicOrdering::Acquire /* failure ordering */,
llvm::SyncScope::System);
auto firstAtAwait = Builder.CreateExtractValue(results, 1);
auto contBB = createBasicBlock("await.async.resume");
auto abortBB = createBasicBlock("await.async.abort");
Builder.CreateCondBr(firstAtAwait, abortBB, contBB);
Builder.emitBlock(abortBB);
{
// We were the first to the sync point. "Abort" (return from the
// coroutine partial function, without making a tail call to anything)
// because the continuation result is not available yet. When the
// continuation is later resumed, the task will get scheduled
// starting from the suspension point.
emitCoroutineOrAsyncExit();
}
Builder.emitBlock(contBB);
// Call swift_continuation_await to check whether the continuation
// has already been resumed.
{
// Set up the suspend point.
SmallVector<llvm::Value *, 8> arguments;
@@ -719,15 +676,10 @@ void IRGenFunction::emitAwaitAsyncContinuation(
auto resumeProjFn = getOrCreateResumePrjFn();
arguments.push_back(
Builder.CreateBitOrPointerCast(resumeProjFn, IGM.Int8PtrTy));
// The dispatch function just calls the resume point.
auto resumeFnPtr =
getFunctionPointerForResumeIntrinsic(AsyncCoroutineCurrentResume);
arguments.push_back(Builder.CreateBitOrPointerCast(
createAsyncDispatchFn(resumeFnPtr, {IGM.Int8PtrTy}),
IGM.getAwaitAsyncContinuationFn(),
IGM.Int8PtrTy));
arguments.push_back(AsyncCoroutineCurrentResume);
arguments.push_back(Builder.CreateBitOrPointerCast(
AsyncCoroutineCurrentContinuationContext, IGM.Int8PtrTy));
arguments.push_back(AsyncCoroutineCurrentContinuationContext);
auto resultTy =
llvm::StructType::get(IGM.getLLVMContext(), {IGM.Int8PtrTy}, false /*packed*/);
emitSuspendAsyncCall(swiftAsyncContextIndex, resultTy, arguments);

View File

@@ -1330,6 +1330,7 @@ public:
llvm::Constant *getFixLifetimeFn();
llvm::Constant *getFixedClassInitializationFn();
llvm::Function *getAwaitAsyncContinuationFn();
/// The constructor used when generating code.
///

View File

@@ -78,10 +78,6 @@ OVERRIDE_ACTOR(job_run, void,
swift::, (class Job *job, ExecutorRef executor),
(job, executor))
OVERRIDE_ACTOR(task_getCurrent, AsyncTask *,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift),
swift::, ,)
OVERRIDE_ACTOR(task_getCurrentExecutor, ExecutorRef,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift),
swift::, ,)
@@ -159,6 +155,21 @@ OVERRIDE_TASK(task_asyncMainDrainQueue, void,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::,
, )
OVERRIDE_TASK(task_suspend, AsyncTask *,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift),
swift::, ,)
OVERRIDE_TASK(continuation_init, AsyncTask *,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift),
swift::, (ContinuationAsyncContext *context,
AsyncContinuationFlags flags),
(context, flags))
OVERRIDE_TASK(continuation_await, void,
SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swiftasync),
swift::, (ContinuationAsyncContext *context),
(context))
OVERRIDE_ASYNC_LET(asyncLet_wait, void, SWIFT_EXPORT_FROM(swift_Concurrency),
SWIFT_CC(swiftasync), swift::,
(OpaqueValue *result,

View File

@@ -231,15 +231,16 @@ void swift::runJobInEstablishedExecutorContext(Job *job) {
// Update the active task in the current thread.
ActiveTask::set(task);
// FIXME: update the task status to say that it's running
// on the current thread. If the task suspends itself to run
// on an actor, it should update the task status appropriately;
// we don't need to update it afterwards.
// Update the task status to say that it's running on the
// current thread. If the task suspends somewhere, it should
// update the task status appropriately; we don't need to update
// it afterwards.
task->flagAsRunning();
task->runInFullyEstablishedContext();
// Clear the active task.
ActiveTask::set(nullptr);
assert(ActiveTask::get() == nullptr &&
"active task wasn't cleared before susspending?");
} else {
// There's no extra bookkeeping to do for simple jobs.
job->runSimpleInFullyEstablishedContext();
@@ -253,7 +254,7 @@ void swift::runJobInEstablishedExecutorContext(Job *job) {
}
SWIFT_CC(swift)
static AsyncTask *swift_task_getCurrentImpl() {
AsyncTask *swift::swift_task_getCurrent() {
return ActiveTask::get();
}
@@ -1845,6 +1846,9 @@ SWIFT_CC(swiftasync)
static void runOnAssumedThread(AsyncTask *task, ExecutorRef executor,
ExecutorTrackingInfo *oldTracking,
RunningJobInfo runner) {
// Note that this doesn't change the active task and so doesn't
// need to either update ActiveTask or flagAsRunning/flagAsSuspended.
// If there's alreaady tracking info set up, just change the executor
// there and tail-call the task. We don't want these frames to
// potentially accumulate linearly.
@@ -1932,6 +1936,8 @@ static void swift_task_switchImpl(SWIFT_ASYNC_CONTEXT AsyncContext *resumeContex
fprintf(stderr, "[%lu] switch failed, task %p enqueued on executor %p\n",
_swift_get_thread_id(), task, newExecutor.getIdentity());
#endif
task->flagAsSuspended();
_swift_task_clearCurrent();
swift_task_enqueue(task, newExecutor);
}

View File

@@ -93,9 +93,14 @@ func _checkExpectedExecutor(_filenameStart: Builtin.RawPointer,
_filenameStart, _filenameLength, _filenameIsASCII, _line, _executor)
}
// This must take a DispatchQueueShim, not something like AnyObject,
// or else SILGen will emit a retain/release in unoptimized builds,
// which won't work because DispatchQueues aren't actually
// Swift-retainable.
@available(SwiftStdlib 5.5, *)
@_silgen_name("swift_task_enqueueOnDispatchQueue")
internal func _enqueueOnDispatchQueue(_ job: UnownedJob, queue: AnyObject)
internal func _enqueueOnDispatchQueue(_ job: UnownedJob,
queue: DispatchQueueShim)
/// Used by the runtime solely for the witness table it produces.
/// FIXME: figure out some way to achieve that which doesn't generate
@@ -105,13 +110,11 @@ internal func _enqueueOnDispatchQueue(_ job: UnownedJob, queue: AnyObject)
/// means a dispatch_queue_t, which is not the same as DispatchQueue
/// on platforms where that is an instance of a wrapper class.
@available(SwiftStdlib 5.5, *)
internal class DispatchQueueShim: UnsafeSendable, SerialExecutor {
@inlinable
internal final class DispatchQueueShim: UnsafeSendable, SerialExecutor {
func enqueue(_ job: UnownedJob) {
_enqueueOnDispatchQueue(job, queue: self)
}
@inlinable
func asUnownedSerialExecutor() -> UnownedSerialExecutor {
return UnownedSerialExecutor(ordinary: self)
}

View File

@@ -89,6 +89,7 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask,
_swift_get_thread_id(), waitingTask, this);
#endif
_swift_tsan_acquire(static_cast<Job *>(this));
if (contextIntialized) waitingTask->flagAsRunning();
// The task is done; we don't need to wait.
return queueHead.getStatus();
@@ -98,7 +99,7 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask,
_swift_get_thread_id(), waitingTask, this);
#endif
_swift_tsan_release(static_cast<Job *>(waitingTask));
// Task is now complete. We'll need to add ourselves to the queue.
// Task is not complete. We'll need to add ourselves to the queue.
break;
}
@@ -110,6 +111,7 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask,
context->successResultPointer = result;
context->ResumeParent = resumeFn;
context->Parent = callerContext;
waitingTask->flagAsSuspended();
}
// Put the waiting task at the beginning of the wait queue.
@@ -122,6 +124,7 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask,
// Escalate the priority of this task based on the priority
// of the waiting task.
swift_task_escalate(this, waitingTask->Flags.getPriority());
_swift_task_clearCurrent();
return FutureFragment::Status::Executing;
}
}
@@ -633,7 +636,6 @@ static AsyncTaskAndContext swift_task_create_commonImpl(
// be is the final hop. Store a signed null instead.
initialContext->Parent = nullptr;
initialContext->Flags = AsyncContextKind::Ordinary;
initialContext->Flags.setShouldNotDeallocateInCallee(true);
// Attach to the group, if needed.
if (group) {
@@ -934,10 +936,20 @@ size_t swift::swift_task_getJobFlags(AsyncTask *task) {
return task->Flags.getOpaqueValue();
}
AsyncTask *swift::swift_continuation_init(ContinuationAsyncContext *context,
AsyncContinuationFlags flags) {
SWIFT_CC(swift)
static AsyncTask *swift_task_suspendImpl() {
auto task = _swift_task_clearCurrent();
task->flagAsSuspended();
return task;
}
SWIFT_CC(swift)
static AsyncTask *swift_continuation_initImpl(ContinuationAsyncContext *context,
AsyncContinuationFlags flags) {
context->Flags = AsyncContextKind::Continuation;
if (flags.canThrow()) context->Flags.setCanThrow(true);
if (flags.isExecutorSwitchForced())
context->Flags.continuation_setIsExecutorSwitchForced(true);
context->ErrorResult = nullptr;
// Set the current executor as the target executor unless there's
@@ -952,14 +964,82 @@ AsyncTask *swift::swift_continuation_init(ContinuationAsyncContext *context,
: ContinuationStatus::Pending,
std::memory_order_relaxed);
auto task = swift_task_getCurrent();
assert(task && "initializing a continuation with no current task");
AsyncTask *task;
// A preawait immediately suspends the task.
if (flags.isPreawaited()) {
task = _swift_task_clearCurrent();
assert(task && "initializing a continuation with no current task");
task->flagAsSuspended();
} else {
task = swift_task_getCurrent();
assert(task && "initializing a continuation with no current task");
}
task->ResumeContext = context;
task->ResumeTask = context->ResumeParent;
return task;
}
SWIFT_CC(swiftasync)
static void swift_continuation_awaitImpl(ContinuationAsyncContext *context) {
#ifndef NDEBUG
auto task = swift_task_getCurrent();
assert(task && "awaiting continuation without a task");
assert(task->ResumeContext == context);
assert(task->ResumeTask == context->ResumeParent);
#endif
auto &sync = context->AwaitSynchronization;
auto oldStatus = sync.load(std::memory_order_acquire);
assert((oldStatus == ContinuationStatus::Pending ||
oldStatus == ContinuationStatus::Resumed) &&
"awaiting a corrupt or already-awaited continuation");
// If the status is already Resumed, we can resume immediately.
// Comparing against Pending may be very slightly more compact.
if (oldStatus != ContinuationStatus::Pending) {
if (context->isExecutorSwitchForced())
return swift_task_switch(context, context->ResumeParent,
context->ResumeToExecutor);
return context->ResumeParent(context);
}
// Load the current task (we alreaady did this in assertions builds).
#ifdef NDEBUG
auto task = swift_task_getCurrent();
#endif
// Flag the task as suspended.
task->flagAsSuspended();
// Try to transition to Awaited.
bool success =
sync.compare_exchange_strong(oldStatus, ContinuationStatus::Awaited,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_acquire);
// If that succeeded, we have nothing to do.
if (success) {
_swift_task_clearCurrent();
return;
}
// If it failed, it should be because someone concurrently resumed
// (note that the compare-exchange above is strong).
assert(oldStatus == ContinuationStatus::Resumed &&
"continuation was concurrently corrupted or awaited");
// Restore the running state of the task and resume it.
task->flagAsRunning();
if (context->isExecutorSwitchForced())
return swift_task_switch(context, context->ResumeParent,
context->ResumeToExecutor);
return context->ResumeParent(context);
}
static void resumeTaskAfterContinuation(AsyncTask *task,
ContinuationAsyncContext *context) {
auto &sync = context->AwaitSynchronization;
@@ -1030,7 +1110,12 @@ swift_task_addCancellationHandlerImpl(
auto *record = new (allocation)
CancellationNotificationStatusRecord(unsigned_handler, context);
swift_task_addStatusRecord(record);
if (swift_task_addStatusRecord(record))
return record;
// else, the task was already cancelled, so while the record was added,
// we must run it immediately here since no other task will trigger it.
record->run();
return record;
}

View File

@@ -34,12 +34,8 @@ public func withTaskCancellationHandler<T>(
) async rethrows -> T {
let task = Builtin.getCurrentAsyncTask()
guard !_taskIsCancelled(task) else {
// If the current task is already cancelled, run the handler immediately.
handler()
return try await operation()
}
// unconditionally add the cancellation record to the task.
// if the task was already cancelled, it will be executed right away.
let record = _taskAddCancellationHandler(handler: handler)
defer { _taskRemoveCancellationHandler(record: record) }

View File

@@ -728,6 +728,9 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) {
return result;
}
// Have we suspended the task?
bool hasSuspended = false;
auto waitHead = waitQueue.load(std::memory_order_acquire);
// ==== 2) Ready task was polled, return with it immediately -----------------
@@ -742,13 +745,12 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) {
// Success! We are allowed to poll.
ReadyQueueItem item;
bool taskDequeued = readyQueue.dequeue(item);
if (!taskDequeued) {
result.status = PollStatus::MustWait;
result.storage = nullptr;
result.successType = nullptr;
result.retainedTask = nullptr;
mutex.unlock(); // TODO: remove group lock, and use status for synchronization
return result;
assert(taskDequeued); (void) taskDequeued;
// We're going back to running the task, so if we suspended before,
// we need to flag it as running again.
if (hasSuspended) {
waitingTask->flagAsRunning();
}
assert(item.getTask()->isFuture());
@@ -796,6 +798,10 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) {
assert(assumed.readyTasks() == 0);
_swift_tsan_release(static_cast<Job *>(waitingTask));
while (true) {
if (!hasSuspended) {
hasSuspended = true;
waitingTask->flagAsSuspended();
}
// Put the waiting task at the beginning of the wait queue.
if (waitQueue.compare_exchange_weak(
waitHead, waitingTask,
@@ -804,10 +810,10 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) {
mutex.unlock(); // TODO: remove fragment lock, and use status for synchronization
// no ready tasks, so we must wait.
result.status = PollStatus::MustWait;
_swift_task_clearCurrent();
return result;
} // else, try again
}
assert(false && "must successfully compare exchange the waiting task.");
}
// =============================================================================

View File

@@ -20,6 +20,7 @@
#include "swift/Runtime/Concurrency.h"
#include "swift/ABI/Task.h"
#include "swift/ABI/Metadata.h"
#include "swift/Runtime/Atomic.h"
#include "swift/Runtime/HeapObject.h"
#include "swift/Runtime/Error.h"
#include "Error.h"
@@ -160,32 +161,95 @@ public:
/// The current state of a task's status records.
class ActiveTaskStatus {
enum : uintptr_t {
IsCancelled = 0x1,
IsLocked = 0x2,
RecordMask = ~uintptr_t(IsCancelled | IsLocked)
/// The current running priority of the task.
PriorityMask = 0xFF,
/// Has the task been cancelled?
IsCancelled = 0x100,
/// Whether the task status is "locked", meaning that further
/// accesses need to wait on the task status record lock
IsLocked = 0x200,
/// Whether the running priority has been escalated above the
/// priority recorded in the Job header.
IsEscalated = 0x400,
/// Whether the task is actively running.
/// We don't really need to be tracking this in the runtime right
/// now, but we will need to eventually track enough information to
/// escalate the thread that's running a task, so doing the stores
/// necessary to maintain this gives us a more realistic baseline
/// for performance.
IsRunning = 0x800,
};
uintptr_t Value;
TaskStatusRecord *Record;
uintptr_t Flags;
ActiveTaskStatus(TaskStatusRecord *record, uintptr_t flags)
: Record(record), Flags(flags) {}
public:
constexpr ActiveTaskStatus() : Value(0) {}
ActiveTaskStatus(TaskStatusRecord *innermostRecord,
bool cancelled, bool locked)
: Value(reinterpret_cast<uintptr_t>(innermostRecord)
+ (locked ? IsLocked : 0)
+ (cancelled ? IsCancelled : 0)) {}
#ifdef __GLIBCXX__
/// We really don't want to provide this constructor, but in old
/// versions of libstdc++, std::atomic<T>::load incorrectly requires
/// the type to be default-constructible.
ActiveTaskStatus() = default;
#endif
constexpr ActiveTaskStatus(JobFlags flags)
: Record(nullptr), Flags(uintptr_t(flags.getPriority())) {}
/// Is the task currently cancelled?
bool isCancelled() const { return Value & IsCancelled; }
bool isCancelled() const { return Flags & IsCancelled; }
ActiveTaskStatus withCancelled() const {
return ActiveTaskStatus(Record, Flags | IsCancelled);
}
/// Is the task currently running?
/// Eventually we'll track this with more specificity, like whether
/// it's running on a specific thread, enqueued on a specific actor,
/// etc.
bool isRunning() const { return Flags & IsRunning; }
ActiveTaskStatus withRunning(bool isRunning) const {
return ActiveTaskStatus(Record, isRunning ? (Flags | IsRunning)
: (Flags & ~IsRunning));
}
/// Is there an active lock on the cancellation information?
bool isLocked() const { return Value & IsLocked; }
bool isLocked() const { return Flags & IsLocked; }
ActiveTaskStatus withLockingRecord(TaskStatusRecord *lockRecord) const {
assert(!isLocked());
assert(lockRecord->Parent == Record);
return ActiveTaskStatus(lockRecord, Flags | IsLocked);
}
JobPriority getStoredPriority() const {
return JobPriority(Flags & PriorityMask);
}
bool isStoredPriorityEscalated() const {
return Flags & IsEscalated;
}
ActiveTaskStatus withEscalatedPriority(JobPriority priority) const {
assert(priority > getStoredPriority());
return ActiveTaskStatus(Record,
(Flags & PriorityMask)
| IsEscalated | uintptr_t(priority));
}
ActiveTaskStatus withoutStoredPriorityEscalation() const {
assert(isStoredPriorityEscalated());
return ActiveTaskStatus(Record, Flags & ~IsEscalated);
}
/// Return the innermost cancellation record. Code running
/// asynchronously with this task should not access this record
/// without having first locked it; see swift_taskCancel.
TaskStatusRecord *getInnermostRecord() const {
return reinterpret_cast<TaskStatusRecord*>(Value & RecordMask);
return Record;
}
ActiveTaskStatus withInnermostRecord(TaskStatusRecord *newRecord) {
return ActiveTaskStatus(newRecord, Flags);
}
static TaskStatusRecord *getStatusRecordParent(TaskStatusRecord *ptr);
@@ -207,8 +271,8 @@ using TaskAllocator = StackAllocator<SlabCapacity>;
/// Private storage in an AsyncTask object.
struct AsyncTask::PrivateStorage {
/// The currently-active information about cancellation.
/// Currently one word.
std::atomic<ActiveTaskStatus> Status;
/// Currently two words.
swift::atomic<ActiveTaskStatus> Status;
/// The allocator for the task stack.
/// Currently 2 words + 8 bytes.
@@ -218,12 +282,12 @@ struct AsyncTask::PrivateStorage {
/// Currently one word.
TaskLocal::Storage Local;
PrivateStorage()
: Status(ActiveTaskStatus()),
PrivateStorage(JobFlags flags)
: Status(ActiveTaskStatus(flags)),
Local(TaskLocal::Storage()) {}
PrivateStorage(void *slab, size_t slabCapacity)
: Status(ActiveTaskStatus()),
PrivateStorage(JobFlags flags, void *slab, size_t slabCapacity)
: Status(ActiveTaskStatus(flags)),
Allocator(slab, slabCapacity),
Local(TaskLocal::Storage()) {}
@@ -251,13 +315,13 @@ AsyncTask::OpaquePrivateStorage::get() const {
return reinterpret_cast<const PrivateStorage &>(*this);
}
inline void AsyncTask::OpaquePrivateStorage::initialize(AsyncTask *task) {
new (this) PrivateStorage();
new (this) PrivateStorage(task->Flags);
}
inline void
AsyncTask::OpaquePrivateStorage::initializeWithSlab(AsyncTask *task,
void *slab,
size_t slabCapacity) {
new (this) PrivateStorage(slab, slabCapacity);
new (this) PrivateStorage(task->Flags, slab, slabCapacity);
}
inline void AsyncTask::OpaquePrivateStorage::complete(AsyncTask *task) {
get().complete(task);
@@ -278,6 +342,48 @@ inline bool AsyncTask::isCancelled() const {
.isCancelled();
}
inline void AsyncTask::flagAsRunning() {
auto oldStatus = _private().Status.load(std::memory_order_relaxed);
while (true) {
assert(!oldStatus.isRunning());
if (oldStatus.isLocked()) {
return flagAsRunning_slow();
}
auto newStatus = oldStatus.withRunning(true);
if (newStatus.isStoredPriorityEscalated()) {
newStatus = newStatus.withoutStoredPriorityEscalation();
Flags.setPriority(oldStatus.getStoredPriority());
}
if (_private().Status.compare_exchange_weak(oldStatus, newStatus,
std::memory_order_relaxed,
std::memory_order_relaxed))
return;
}
}
inline void AsyncTask::flagAsSuspended() {
auto oldStatus = _private().Status.load(std::memory_order_relaxed);
while (true) {
assert(oldStatus.isRunning());
if (oldStatus.isLocked()) {
return flagAsSuspended_slow();
}
auto newStatus = oldStatus.withRunning(false);
if (newStatus.isStoredPriorityEscalated()) {
newStatus = newStatus.withoutStoredPriorityEscalation();
Flags.setPriority(oldStatus.getStoredPriority());
}
if (_private().Status.compare_exchange_weak(oldStatus, newStatus,
std::memory_order_relaxed,
std::memory_order_relaxed))
return;
}
}
inline void AsyncTask::localValuePush(const HeapObject *key,
/* +1 */ OpaqueValue *value,
const Metadata *valueType) {

View File

@@ -185,6 +185,19 @@ static void waitForStatusRecordUnlock(AsyncTask *task,
}
}
enum class LockContext {
/// The lock is being acquired from within the running task.
OnTask,
/// The lock is being acquired asynchronously in order to cancel the
/// task.
Cancellation,
/// The lock is being acquired asynchronously in order to read the
/// status records for some other reason.
OtherAsynchronous
};
/// Acquire a task's status record lock and return the
/// previous value of its status record state.
///
@@ -195,10 +208,11 @@ static void waitForStatusRecordUnlock(AsyncTask *task,
static ActiveTaskStatus
acquireStatusRecordLock(AsyncTask *task,
Optional<StatusRecordLockRecord> &recordLockRecord,
bool forCancellation) {
auto loadOrdering = forCancellation
LockContext lockContext) {
auto loadOrdering = lockContext != LockContext::OnTask
? std::memory_order_acquire
: std::memory_order_relaxed;
bool forCancellation = lockContext == LockContext::Cancellation;
// Load the current state. We can use relaxed loads if this isn't
// for cancellation because (1) this operation should be synchronous
@@ -224,9 +238,7 @@ acquireStatusRecordLock(AsyncTask *task,
// try to just set the cancelled bit and return.
auto oldRecord = oldStatus.getInnermostRecord();
if (!oldRecord && forCancellation) {
ActiveTaskStatus newStatus(nullptr,
/*cancelled*/ true,
/*locked*/ false);
ActiveTaskStatus newStatus = oldStatus.withCancelled();
if (task->_private().Status.compare_exchange_weak(oldStatus, newStatus,
/*success*/ std::memory_order_relaxed,
/*failure*/ loadOrdering))
@@ -245,10 +257,10 @@ acquireStatusRecordLock(AsyncTask *task,
// Install the lock record as the active cancellation info, or
// restart if that fails.
bool newIsCancelled = forCancellation || oldStatus.isCancelled();
ActiveTaskStatus newStatus(&*recordLockRecord,
/*cancelled*/ newIsCancelled,
/*locked*/ true);
ActiveTaskStatus newStatus =
oldStatus.withLockingRecord(&*recordLockRecord);
if (forCancellation)
newStatus = newStatus.withCancelled();
if (task->_private().Status.compare_exchange_weak(oldStatus, newStatus,
/*success*/ std::memory_order_release,
/*failure*/ loadOrdering))
@@ -296,9 +308,7 @@ static bool swift_task_addStatusRecordImpl(TaskStatusRecord *newRecord) {
// Set the record as the new innermost record.
// We have to use a release on success to make the initialization of
// the new record visible to the cancelling thread.
ActiveTaskStatus newStatus(newRecord,
oldStatus.isCancelled(),
/*locked*/ false);
ActiveTaskStatus newStatus = oldStatus.withInnermostRecord(newRecord);
if (task->_private().Status.compare_exchange_weak(oldStatus, newStatus,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_relaxed))
@@ -333,9 +343,7 @@ static bool swift_task_tryAddStatusRecordImpl(TaskStatusRecord *newRecord) {
// Set the record as the new innermost record.
// We have to use a release on success to make the initialization of
// the new record visible to the cancelling thread.
ActiveTaskStatus newStatus(newRecord,
/*cancelled*/ false,
/*locked*/ false);
ActiveTaskStatus newStatus = oldStatus.withInnermostRecord(newRecord);
if (task->_private().Status.compare_exchange_weak(oldStatus, newStatus,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_relaxed))
@@ -358,9 +366,8 @@ static bool swift_task_removeStatusRecordImpl(TaskStatusRecord *record) {
// If the record is the innermost record, try to just pop it off.
if (oldStatus.getInnermostRecord() == record) {
ActiveTaskStatus newStatus(record->getParent(),
oldStatus.isCancelled(),
/*locked*/ false);
ActiveTaskStatus newStatus =
oldStatus.withInnermostRecord(record->getParent());
if (status.compare_exchange_weak(oldStatus, newStatus,
/*success*/ std::memory_order_release,
/*failure*/ std::memory_order_relaxed)) {
@@ -380,7 +387,7 @@ static bool swift_task_removeStatusRecordImpl(TaskStatusRecord *record) {
// Acquire the status record lock.
Optional<StatusRecordLockRecord> recordLockRecord;
oldStatus = acquireStatusRecordLock(task, recordLockRecord,
/*forCancellation*/ false);
LockContext::OnTask);
assert(!oldStatus.isLocked());
// We can't observe the record to be the innermost record here because
@@ -420,7 +427,7 @@ static bool swift_task_hasTaskGroupStatusRecordImpl() {
// Acquire the status record lock.
auto oldStatus = acquireStatusRecordLock(task, recordLockRecord,
/*forCancellation*/ false);
LockContext::OnTask);
assert(!oldStatus.isLocked());
// Scan for the task group record within all the active records.
@@ -432,12 +439,7 @@ static bool swift_task_hasTaskGroupStatusRecordImpl() {
}
}
// Release the status record lock, being sure to flag that
// the task is now cancelled.
ActiveTaskStatus cancelledStatus(oldStatus.getInnermostRecord(),
/*cancelled*/ false, // FIXME: is this right, or must be the same as previous cancelled status?
/*locked*/ false);
releaseStatusRecordLock(task, cancelledStatus, recordLockRecord);
releaseStatusRecordLock(task, oldStatus, recordLockRecord);
return foundTaskGroupRecord;
}
@@ -550,11 +552,12 @@ static void swift_task_cancelImpl(AsyncTask *task) {
// Acquire the status record lock.
auto oldStatus = acquireStatusRecordLock(task, recordLockRecord,
/*forCancellation*/ true);
LockContext::Cancellation);
assert(!oldStatus.isLocked());
// If we were already cancelled or were able to cancel without acquiring
// the lock, there's nothing else to do.
// Lock acquisition will fail for LockContext::Cancellation if
// the task is already cancelled. In this case, we have nothing
// to do, not even releasing the lock.
if (oldStatus.isCancelled()) {
return;
}
@@ -570,9 +573,7 @@ static void swift_task_cancelImpl(AsyncTask *task) {
// Release the status record lock, being sure to flag that
// the task is now cancelled.
ActiveTaskStatus cancelledStatus(oldStatus.getInnermostRecord(),
/*cancelled*/ true,
/*locked*/ false);
ActiveTaskStatus cancelledStatus = oldStatus.withCancelled();
releaseStatusRecordLock(task, cancelledStatus, recordLockRecord);
}
@@ -587,19 +588,15 @@ static void swift_task_cancel_group_child_tasksImpl(TaskGroup *group) {
// We are NOT cancelling the entire parent task though.
auto task = swift_task_getCurrent();
auto oldStatus = acquireStatusRecordLock(task, recordLockRecord,
/*forCancellation*/ false);
LockContext::OnTask);
// Carry out the cancellation operations associated with all
// the active records.
for (auto cur: oldStatus.records()) {
performGroupCancellationAction(cur);
}
// Release the status record lock, being sure to flag that
// the task is now cancelled.
ActiveTaskStatus cancelledStatus(oldStatus.getInnermostRecord(),
/*cancelled*/ oldStatus.isCancelled(),
/*locked*/ false);
releaseStatusRecordLock(task, cancelledStatus, recordLockRecord);
// Release the status record lock, restoring exactly the old status.
releaseStatusRecordLock(task, oldStatus, recordLockRecord);
}
/**************************************************************************/
@@ -655,26 +652,23 @@ JobPriority
static swift_task_escalateImpl(AsyncTask *task, JobPriority newPriority) {
Optional<StatusRecordLockRecord> recordLockRecord;
// Fast path: check that the task's priority is not already at least
// as high as the target. The task's priority can only be modified
// under the status record lock; it's possible that the priority could
// be getting simultaneously escalated, but it's okay for us to return
// before that's complete.
if (task->Flags.getPriority() >= newPriority)
return task->Flags.getPriority();
// Fast path: check that the stored priority is already at least
// as high as the desired priority.
auto oldStatus = task->_private().Status.load(std::memory_order_relaxed);
if (oldStatus.getStoredPriority() >= newPriority)
return oldStatus.getStoredPriority();
// Acquire the status record lock.
auto oldStatus = acquireStatusRecordLock(task, recordLockRecord,
/*forCancellation*/ false);
// Acquire the status record lock. This has to do a load-acquire
// because we need to read the status records.
oldStatus = acquireStatusRecordLock(task, recordLockRecord,
LockContext::OtherAsynchronous);
assert(!oldStatus.isLocked());
// Now that we have the task's status lock, check again that the
// priority is still too low.
auto priorityToReturn = task->Flags.getPriority();
if (priorityToReturn < newPriority) {
// Change the priority.
task->Flags.setPriority(newPriority);
priorityToReturn = newPriority;
auto newStatus = oldStatus;
if (oldStatus.getStoredPriority() < newPriority) {
newStatus = oldStatus.withEscalatedPriority(newPriority);
// TODO: attempt to escalate the thread running the task, if it's
// currently running. This probably requires the task to be enqueued
@@ -687,9 +681,43 @@ static swift_task_escalateImpl(AsyncTask *task, JobPriority newPriority) {
}
// Release the status record lock, restoring the old status.
releaseStatusRecordLock(task, oldStatus, recordLockRecord);
releaseStatusRecordLock(task, newStatus, recordLockRecord);
return priorityToReturn;
return newStatus.getStoredPriority();
}
void AsyncTask::flagAsRunning_slow() {
Optional<StatusRecordLockRecord> recordLockRecord;
auto oldStatus = acquireStatusRecordLock(this, recordLockRecord,
LockContext::OnTask);
assert(!oldStatus.isLocked());
assert(!oldStatus.isRunning());
auto newStatus = oldStatus.withRunning(true);
if (newStatus.isStoredPriorityEscalated()) {
newStatus = newStatus.withoutStoredPriorityEscalation();
Flags.setPriority(oldStatus.getStoredPriority());
}
releaseStatusRecordLock(this, newStatus, recordLockRecord);
}
void AsyncTask::flagAsSuspended_slow() {
Optional<StatusRecordLockRecord> recordLockRecord;
auto oldStatus = acquireStatusRecordLock(this, recordLockRecord,
LockContext::OnTask);
assert(!oldStatus.isLocked());
assert(oldStatus.isRunning());
auto newStatus = oldStatus.withRunning(false);
if (newStatus.isStoredPriorityEscalated()) {
newStatus = newStatus.withoutStoredPriorityEscalation();
Flags.setPriority(oldStatus.getStoredPriority());
}
releaseStatusRecordLock(this, newStatus, recordLockRecord);
}
/**************************************************************************/

View File

@@ -1,4 +1,4 @@
// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch -parse-as-library)
// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch -parse-as-library) | %FileCheck %s --dump-input=always
// REQUIRES: executable_test
// REQUIRES: concurrency
@@ -21,14 +21,11 @@ func test_detach_cancel_child_early() async {
let xx = await childCancelled
print("child, cancelled: \(xx)") // CHECK: child, cancelled: true
let cancelled = Task.isCancelled
print("self, cancelled: \(cancelled )") // CHECK: self, cancelled: true
let cancelled = Task.isCancelled
print("self, cancelled: \(cancelled)") // CHECK: self, cancelled: true
return cancelled
}
// no sleep here -- this confirms that the child task `x`
// carries the cancelled flag, as it is started from a cancelled task.
h.cancel()
print("handle cancel")
let got = try! await h.value

View File

@@ -1,4 +1,4 @@
// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch -parse-as-library)
// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch -parse-as-library) | %FileCheck %s
// REQUIRES: executable_test
// REQUIRES: concurrency
@@ -10,11 +10,13 @@
import Dispatch
let seconds: UInt64 = 1_000_000_000
@available(SwiftStdlib 5.5, *)
func test_detach_cancel_while_child_running() async {
let h: Task<Bool, Error> = detach {
let task: Task<Bool, Error> = Task.detached {
async let childCancelled: Bool = { () -> Bool in
await Task.sleep(3_000_000_000)
await Task.sleep(3 * seconds)
return Task.isCancelled
}()
@@ -26,11 +28,66 @@ func test_detach_cancel_while_child_running() async {
}
// sleep here, i.e. give the task a moment to start running
await Task.sleep(2_000_000_000)
await Task.sleep(2 * seconds)
h.cancel()
print("handle cancel")
let got = try! await h.get()
task.cancel()
print("task.cancel()")
let got = try! await task.get()
print("was cancelled: \(got)") // CHECK: was cancelled: true
}
@available(SwiftStdlib 5.5, *)
func test_cancel_while_withTaskCancellationHandler_inflight() async {
let task: Task<Bool, Error> = Task.detached {
await withTaskCancellationHandler {
await Task.sleep(2 * seconds)
print("operation-1")
await Task.sleep(1 * seconds)
print("operation-2")
return Task.isCancelled
} onCancel: {
print("onCancel")
}
}
await Task.sleep(1 * seconds)
// CHECK: task.cancel()
// CHECK: onCancel
// CHECK: operation-1
// CHECK: operation-2
print("task.cancel()")
task.cancel()
let got = try! await task.get()
print("was cancelled: \(got)") // CHECK: was cancelled: true
}
@available(SwiftStdlib 5.5, *)
func test_cancel_while_withTaskCancellationHandler_onlyOnce() async {
let task: Task<Bool, Error> = Task.detached {
await withTaskCancellationHandler {
await Task.sleep(2 * seconds)
await Task.sleep(2 * seconds)
await Task.sleep(2 * seconds)
print("operation-done")
return Task.isCancelled
} onCancel: {
print("onCancel")
}
}
await Task.sleep(1 * seconds)
// CHECK: task.cancel()
// CHECK: onCancel
// onCancel runs only once, even though we attempt to cancel the task many times
// CHECK-NEXT: operation-done
print("task.cancel()")
task.cancel()
task.cancel()
task.cancel()
let got = try! await task.get()
print("was cancelled: \(got)") // CHECK: was cancelled: true
}
@@ -38,5 +95,7 @@ func test_detach_cancel_while_child_running() async {
@main struct Main {
static func main() async {
await test_detach_cancel_while_child_running()
await test_cancel_while_withTaskCancellationHandler_inflight()
await test_cancel_while_withTaskCancellationHandler_onlyOnce()
}
}

View File

@@ -58,22 +58,7 @@ bb0:
// CHECK: call swiftcc void @not_async_test()
// Arrive at the await_async_continuation point.
// CHECK: [[synchronization_addr_before_await:%.*]] = getelementptr inbounds %swift.continuation_context, %swift.continuation_context* [[cont_context]], i32 0, i32 1
// CHECK: [[first_at_sync_pt:%.*]] = cmpxchg [[INT]]* [[synchronization_addr_before_await]], {{(i64|i32)}} 0, {{(i64|i32)}} 1 release acquire
// CHECK: [[first_at_sync_pt_bool:%.*]] = extractvalue { {{(i64|i32)}}, i1 } [[first_at_sync_pt]], 1
// CHECK: br i1 [[first_at_sync_pt_bool]], label %await.async.abort, label %await.async.resume
// Abort if we are the first to arrive at the await/or continuation point --
// we must wait on the other to arrive.
// CHECK: await.async.abort:
// CHECK: br label %coro.end
// CHECK: coro.end:
// CHECK: call i1 (i8*, i1, ...) @llvm.coro.end.async(
// CHECK: unreachable
// CHECK: await.async.resume:
// CHECK: call { i8* } (i32, i8*, i8*, ...) @llvm.coro.suspend.async{{.*}}({{.*}} @__swift_async_resume_project_context{{.*}}@__swift_suspend_dispatch_1
// CHECK: [[suspend:%.*]] = call { i8* } (i32, i8*, i8*, ...) @llvm.coro.suspend.async.sl_p0i8s(i32 0, i8* [[resume_intrinsic]], i8* bitcast (i8* (i8*)* @__swift_async_resume_project_context to i8*), i8* bitcast (void (%swift.continuation_context*)* @__swift_continuation_await_point to i8*), %swift.continuation_context* [[cont_context]])
// CHECK: [[result_addr_addr:%.*]] = getelementptr inbounds %swift.continuation_context, %swift.continuation_context* [[cont_context]], i32 0, i32 3
// CHECK: [[result_addr:%.*]] = load %swift.opaque*, %swift.opaque** [[result_addr_addr]]
// CHECK: [[typed_result_addr:%.*]] = bitcast %swift.opaque* [[result_addr]] to i32*
@@ -81,12 +66,15 @@ bb0:
// CHECK: br label %[[result_bb:[0-9]+]]
// CHECK: [[result_bb]]:
// CHECK: phi i32 [ [[result_value]], %await.async.resume ]
// CHECK: phi i32 [ [[result_value]], %entry ]
// CHECK: define {{.*}} void @__swift_continuation_await_point(%swift.continuation_context* %0)
// CHECK: {{musttail call swifttailcc|tail call swiftcc}} void @swift_continuation_await(%swift.continuation_context* %0)
// CHECK-NEXT: ret void
// CHECK: define {{.*}} void @__swift_suspend_dispatch_1(i8* %0, i8* %1)
// CHECK: define {{.*}} void @__swift_suspend_dispatch_1(i8* %0, %swift.context* %1)
// CHECK-NOT: define
// CHECK: tail call swift{{(tail)?}}cc void %{{.*}}(i8* swiftasync %1)
// CHECK: tail call swift{{(tail)?}}cc void %{{.*}}(%swift.context* swiftasync %1)
// CHECK-NEXT: ret void
sil @async_continuation : $@async () -> () {

View File

@@ -0,0 +1,27 @@
// RUN: %empty-directory(%t)
// RUN: %empty-directory(%t/missing)
// RUN: %empty-directory(%t/inputs)
// RUN: %target-swift-frontend -emit-module-path %t/missing/Foo.swiftmodule -enable-library-evolution -emit-module-interface-path %t/inputs/Foo.swiftinterface -enable-objc-interop -disable-objc-attr-requires-foundation-module -module-name Foo %s
// RUN: %FileCheck --input-file %t/inputs/Foo.swiftinterface %s
// RUN: touch %t/Bar.swift
// RUN: echo "import Foo" > %t/Bar.swift
// RUN: echo "let f = Field()" >> %t/Bar.swift
// RUN: %target-swift-frontend -emit-module-path %t/Bar.swiftmodule -enable-library-evolution -enable-objc-interop -disable-objc-attr-requires-foundation-module -module-name Bar %t/Bar.swift -I %t/inputs
import Swift
public struct Field {
public init() {}
public var area: Int {
__consuming get { return 1 }
_modify {
var a = 1
yield &a
}
}
}
// CHECK: __consuming get

View File

@@ -1,9 +1,9 @@
// RUN: %empty-directory(%t)
// RUN: mkdir -p %t/clang-module-cache
// RUN: %target-swift-frontend -scan-dependencies -module-cache-path %t/clang-module-cache %s -o %t/deps.json -I %S/Inputs/CHeaders -I %S/Inputs/Swift -emit-dependencies -emit-dependencies-path %t/deps.d -import-objc-header %S/Inputs/CHeaders/Bridging.h -swift-version 4
// RUN: %target-swift-frontend -scan-dependencies -module-cache-path %t/clang-module-cache %s -o %t/deps.json -I %S/Inputs/CHeaders -I %S/Inputs/Swift -emit-dependencies -emit-dependencies-path %t/deps.d -import-objc-header %S/Inputs/CHeaders/Bridging.h -swift-version 4
// Check the contents of the JSON output
// RUN: %FileCheck %s < %t/deps.json
// RUN: %FileCheck -check-prefix CHECK_NO_CLANG_TARGET %s < %t/deps.json
// Check the contents of the JSON output
// RUN: %FileCheck %s -check-prefix CHECK-NO-SEARCH-PATHS < %t/deps.json
@@ -22,7 +22,12 @@
// Ensure that round-trip serialization does not affect result
// RUN: %target-swift-frontend -scan-dependencies -test-dependency-scan-cache-serialization -module-cache-path %t/clang-module-cache %s -o %t/deps.json -I %S/Inputs/CHeaders -I %S/Inputs/Swift -import-objc-header %S/Inputs/CHeaders/Bridging.h -swift-version 4
// RUN: %FileCheck %s < %t/deps.json
// RUN: %FileCheck -check-prefix CHECK_NO_CLANG_TARGET %s < %t/deps.json
// Ensure that scanning with `-clang-target` makes sure that Swift modules' respecitve PCM-dependency-build-argument sets do not contain target triples.
// RUN: %target-swift-frontend -scan-dependencies -module-cache-path %t/clang-module-cache %s -o %t/deps_clang_target.json -I %S/Inputs/CHeaders -I %S/Inputs/Swift -import-objc-header %S/Inputs/CHeaders/Bridging.h -swift-version 4 -clang-target %target-cpu-apple-macosx10.14
// Check the contents of the JSON output
// RUN: %FileCheck -check-prefix CHECK_CLANG_TARGET %s < %t/deps_clang_target.json
// REQUIRES: executable_test
// REQUIRES: objc_interop
@@ -178,10 +183,13 @@ import SubE
// CHECK: "-swift-version"
// CHECK: "5"
// CHECK: ],
// CHECK" "extraPcmArgs": [
// CHECK" "-target",
// CHECK" "-fapinotes-swift-version=5"
// CHECK" ]
// CHECK_NO_CLANG_TARGET: "extraPcmArgs": [
// CHECK_NO_CLANG_TARGET-NEXT: "-Xcc",
// CHECK_NO_CLANG_TARGET-NEXT: "-target",
// CHECK_CLANG_TARGET: "extraPcmArgs": [
// CHECK_CLANG_TARGET-NEXT: "-Xcc",
// CHECK_CLANG_TARGET-NEXT: "-fapinotes-swift-version={{.*}}"
// CHECK_CLANG_TARGET-NEXT: ]
/// --------Swift module Swift
// CHECK-LABEL: "modulePath": "Swift.swiftmodule",

View File

@@ -25,8 +25,8 @@ def main():
sys.exit(1)
with io.open(sys.argv[1], mode='r', encoding='utf8') as input_file, \
io.open(sys.argv[2], mode='w', encoding='utf8') as output_file, \
io.open(sys.argv[3], mode='w', encoding='utf8') as access_notes_file:
io.open(sys.argv[2], mode='w', encoding='utf8') as output_file, \
io.open(sys.argv[3], mode='w', encoding='utf8') as access_notes_file:
# Add header to access notes file
access_notes_file.write(u"""\

View File

@@ -162,6 +162,8 @@ template <class Context, class Fn>
static void parkTask(AsyncTask *task, Context *context, Fn &&fn) {
auto invoke =
TaskContinuationFromLambda<Fn, Context>::get(std::move(fn));
auto currentTask = swift_task_suspend();
EXPECT_EQ(task, currentTask);
task->ResumeTask = invoke;
task->ResumeContext = context;
}

View File

@@ -116,10 +116,6 @@ TEST_F(CompatibilityOverrideConcurrencyTest, test_swift_job_run) {
swift_job_run(nullptr, ExecutorRef::generic());
}
TEST_F(CompatibilityOverrideConcurrencyTest, test_swift_task_getCurrent) {
swift_task_getCurrent();
}
TEST_F(CompatibilityOverrideConcurrencyTest, test_swift_task_getCurrentExecutor) {
swift_task_getCurrentExecutor();
}