mirror of
https://github.com/apple/swift.git
synced 2025-12-14 20:36:38 +01:00
[Reflection] Add API for inspecting async task allocation slabs.
We remove the existing `swift_reflection_iterateAsyncTaskAllocations` API that attempts to provide all necessary information about a tasks's allocations starting from the task. Instead, we split it into two pieces: `swift_reflection_asyncTaskSlabPointer` to get the first slab for a task, and `+swift_reflection_asyncTaskSlabAllocations` to get the allocations in a slab, and a pointer to the next slab. We also add a dummy metadata pointer to the beginning of each slab. This allows tools to identify slab allocations on the heap without needing to locate every single async task object. They can then use `swift_reflection_asyncTaskSlabAllocations` on such allocations to find out about the contents. rdar://82549631
This commit is contained in:
@@ -131,6 +131,12 @@ public:
|
||||
ChunkKind Kind;
|
||||
};
|
||||
|
||||
struct AsyncTaskSlabInfo {
|
||||
StoredPointer NextSlab;
|
||||
StoredSize SlabSize;
|
||||
std::vector<AsyncTaskAllocationChunk> Chunks;
|
||||
};
|
||||
|
||||
explicit ReflectionContext(std::shared_ptr<MemoryReader> reader)
|
||||
: super(std::move(reader), *this)
|
||||
{}
|
||||
@@ -1346,44 +1352,45 @@ public:
|
||||
return llvm::None;
|
||||
}
|
||||
|
||||
llvm::Optional<std::string> iterateAsyncTaskAllocations(
|
||||
StoredPointer AsyncTaskPtr,
|
||||
std::function<void(StoredPointer, unsigned, AsyncTaskAllocationChunk[])>
|
||||
Call) {
|
||||
using AsyncTask = AsyncTask<Runtime>;
|
||||
std::pair<llvm::Optional<std::string>, AsyncTaskSlabInfo>
|
||||
asyncTaskSlabAllocations(StoredPointer SlabPtr) {
|
||||
using StackAllocator = StackAllocator<Runtime>;
|
||||
auto SlabBytes = getReader().readBytes(
|
||||
RemoteAddress(SlabPtr), sizeof(typename StackAllocator::Slab));
|
||||
auto Slab = reinterpret_cast<const typename StackAllocator::Slab *>(
|
||||
SlabBytes.get());
|
||||
if (!Slab)
|
||||
return {std::string("failure reading slab"), {}};
|
||||
|
||||
// For now, we won't try to walk the allocations in the slab, we'll just
|
||||
// provide the whole thing as one big chunk.
|
||||
size_t HeaderSize =
|
||||
llvm::alignTo(sizeof(*Slab), llvm::Align(alignof(std::max_align_t)));
|
||||
AsyncTaskAllocationChunk Chunk;
|
||||
|
||||
Chunk.Start = SlabPtr + HeaderSize;
|
||||
Chunk.Length = Slab->CurrentOffset;
|
||||
Chunk.Kind = AsyncTaskAllocationChunk::ChunkKind::Unknown;
|
||||
|
||||
// Total slab size is the slab's capacity plus the slab struct itself.
|
||||
StoredPointer SlabSize = Slab->Capacity + sizeof(*Slab);
|
||||
|
||||
return {llvm::None, {Slab->Next, SlabSize, {Chunk}}};
|
||||
}
|
||||
|
||||
std::pair<llvm::Optional<std::string>, StoredPointer>
|
||||
asyncTaskSlabPtr(StoredPointer AsyncTaskPtr) {
|
||||
using AsyncTask = AsyncTask<Runtime>;
|
||||
|
||||
auto AsyncTaskBytes =
|
||||
getReader().readBytes(RemoteAddress(AsyncTaskPtr), sizeof(AsyncTask));
|
||||
auto *AsyncTaskObj =
|
||||
reinterpret_cast<const AsyncTask *>(AsyncTaskBytes.get());
|
||||
if (!AsyncTaskObj)
|
||||
return std::string("failure reading async task");
|
||||
return {std::string("failure reading async task"), 0};
|
||||
|
||||
StoredPointer SlabPtr = AsyncTaskObj->PrivateStorage.Allocator.FirstSlab;
|
||||
while (SlabPtr) {
|
||||
auto SlabBytes = getReader().readBytes(
|
||||
RemoteAddress(SlabPtr), sizeof(typename StackAllocator::Slab));
|
||||
auto Slab = reinterpret_cast<const typename StackAllocator::Slab *>(
|
||||
SlabBytes.get());
|
||||
if (!Slab)
|
||||
return std::string("failure reading slab");
|
||||
|
||||
// For now, we won't try to walk the allocations in the slab, we'll just
|
||||
// provide the whole thing as one big chunk.
|
||||
size_t HeaderSize =
|
||||
llvm::alignTo(sizeof(*Slab), llvm::Align(alignof(std::max_align_t)));
|
||||
AsyncTaskAllocationChunk Chunk;
|
||||
|
||||
Chunk.Start = SlabPtr + HeaderSize;
|
||||
Chunk.Length = Slab->CurrentOffset;
|
||||
Chunk.Kind = AsyncTaskAllocationChunk::ChunkKind::Unknown;
|
||||
Call(SlabPtr, 1, &Chunk);
|
||||
|
||||
SlabPtr = Slab->Next;
|
||||
}
|
||||
|
||||
return llvm::None;
|
||||
return {llvm::None, SlabPtr};
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
@@ -86,6 +86,7 @@ struct StackAllocator {
|
||||
bool FirstSlabIsPreallocated;
|
||||
|
||||
struct Slab {
|
||||
typename Runtime::StoredPointer Metadata;
|
||||
typename Runtime::StoredPointer Next;
|
||||
uint32_t Capacity;
|
||||
uint32_t CurrentOffset;
|
||||
|
||||
@@ -2242,25 +2242,21 @@ private:
|
||||
return false;
|
||||
};
|
||||
|
||||
bool isTypeContext = false;
|
||||
switch (auto contextKind = descriptor->getKind()) {
|
||||
case ContextDescriptorKind::Class:
|
||||
if (!getContextName())
|
||||
return nullptr;
|
||||
nodeKind = Demangle::Node::Kind::Class;
|
||||
isTypeContext = true;
|
||||
break;
|
||||
case ContextDescriptorKind::Struct:
|
||||
if (!getContextName())
|
||||
return nullptr;
|
||||
nodeKind = Demangle::Node::Kind::Structure;
|
||||
isTypeContext = true;
|
||||
break;
|
||||
case ContextDescriptorKind::Enum:
|
||||
if (!getContextName())
|
||||
return nullptr;
|
||||
nodeKind = Demangle::Node::Kind::Enum;
|
||||
isTypeContext = true;
|
||||
break;
|
||||
case ContextDescriptorKind::Protocol: {
|
||||
if (!getContextName())
|
||||
|
||||
@@ -44,6 +44,9 @@ extern unsigned long long swift_reflection_classIsSwiftMask;
|
||||
/// first attempted fix to use the right AsyncTask layout.
|
||||
/// 1 - Indicates that swift_reflection_iterateAsyncTaskAllocations has been
|
||||
/// actually fixed to use the right AsyncTask layout.
|
||||
/// 2 - swift_reflection_iterateAsyncTaskAllocations has been replaced by
|
||||
/// swift_reflection_asyncTaskSlabPointer and
|
||||
/// swift_reflection_asyncTaskSlabAllocations.
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE extern uint32_t swift_reflection_libraryVersion;
|
||||
|
||||
/// Get the metadata version supported by the Remote Mirror library.
|
||||
@@ -395,33 +398,40 @@ const char *swift_reflection_iterateMetadataAllocationBacktraces(
|
||||
SwiftReflectionContextRef ContextRef,
|
||||
swift_metadataAllocationBacktraceIterator Call, void *ContextPtr);
|
||||
|
||||
/// Allocation iterator passed to swift_reflection_iterateAsyncTaskAllocations
|
||||
typedef void (*swift_asyncTaskAllocationIterator)(
|
||||
swift_reflection_ptr_t AllocationPtr, unsigned Count,
|
||||
swift_async_task_allocation_chunk_t Chunks[], void *ContextPtr);
|
||||
|
||||
/// Iterate over the allocations associated with the given async task object.
|
||||
/// Get the first allocation slab for a given async task object.
|
||||
/// This object must have an isa value equal to
|
||||
/// _swift_concurrency_debug_asyncTaskMetadata.
|
||||
///
|
||||
/// Calls the passed in Call function for each allocation associated with the
|
||||
/// async task object. The function is passed the allocation pointer and an
|
||||
/// array of chunks. Each chunk consists of a start, length, and kind for that
|
||||
/// chunk of the allocated memory. Any regions of the allocation that are not
|
||||
/// covered by a chunk are unallocated or garbage. The chunk array is valid only
|
||||
/// for the duration of the call.
|
||||
/// It is possible that the async task object hasn't allocated a slab yet, in
|
||||
/// which case the slab pointer will be NULL. If non-NULL, the returned slab
|
||||
/// pointer may be a separate heap allocation, or it may be interior to some
|
||||
/// allocation used by the task.
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE
|
||||
swift_async_task_slab_return_t
|
||||
swift_reflection_asyncTaskSlabPointer(SwiftReflectionContextRef ContextRef,
|
||||
swift_reflection_ptr_t AsyncTaskPtr);
|
||||
|
||||
/// Iterate over the allocations in the given async task allocator slab.
|
||||
/// This allocation must have an "isa" value (scare quotes because it's not a
|
||||
/// real object) equal to _swift_concurrency_debug_asyncTaskSlabMetadata.
|
||||
///
|
||||
/// An async task may have more than one allocation associated with it, so the
|
||||
/// function may be called more than once. It may also have no allocations, in
|
||||
/// which case the function is not called.
|
||||
/// Calls the passed in Call function for each allocation in the slab. The
|
||||
/// function is passed the allocation pointer and an array of chunks. Each chunk
|
||||
/// consists of a start, length, and kind for that chunk of the allocated
|
||||
/// memory. Any regions of the allocation that are not covered by a chunk are
|
||||
/// unallocated or garbage. The chunk array is valid only for the duration of
|
||||
/// the call.
|
||||
///
|
||||
/// A slab may be part of a chain of slabs, so the
|
||||
/// function may be called more than once.
|
||||
///
|
||||
/// Returns NULL on success. On error, returns a pointer to a C string
|
||||
/// describing the error. This pointer remains valid until the next
|
||||
/// swift_reflection call on the given context.
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE
|
||||
const char *swift_reflection_iterateAsyncTaskAllocations(
|
||||
SwiftReflectionContextRef ContextRef, swift_reflection_ptr_t AsyncTaskPtr,
|
||||
swift_asyncTaskAllocationIterator Call, void *ContextPtr);
|
||||
swift_async_task_slab_allocations_return_t
|
||||
swift_reflection_asyncTaskSlabAllocations(SwiftReflectionContextRef ContextRef,
|
||||
swift_reflection_ptr_t SlabPtr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
||||
@@ -189,12 +189,42 @@ typedef struct swift_metadata_cache_node {
|
||||
swift_reflection_ptr_t Right;
|
||||
} swift_metadata_cache_node_t;
|
||||
|
||||
/// The return value when getting an async task's slab pointer.
|
||||
typedef struct swift_async_task_slab_return {
|
||||
/// On failure, a pointer to a string describing the error. On success, NULL.
|
||||
/// This pointer remains valid until the next
|
||||
/// swift_reflection call on the given context.
|
||||
const char *Error;
|
||||
|
||||
/// The task's slab pointer, if no error occurred.
|
||||
swift_reflection_ptr_t SlabPtr;
|
||||
} swift_async_task_slab_return_t;
|
||||
|
||||
typedef struct swift_async_task_allocation_chunk {
|
||||
swift_reflection_ptr_t Start;
|
||||
unsigned Length;
|
||||
swift_layout_kind_t Kind;
|
||||
} swift_async_task_allocation_chunk_t;
|
||||
|
||||
typedef struct swift_async_task_slab_allocations_return {
|
||||
/// On failure, a pointer to a string describing the error. On success, NULL.
|
||||
/// This pointer remains valid until the next
|
||||
/// swift_reflection call on the given context.
|
||||
const char *Error;
|
||||
|
||||
/// The remote pointer to the next slab, or NULL/0 if none.
|
||||
swift_reflection_ptr_t NextSlab;
|
||||
|
||||
/// The size of the entire slab, in bytes.
|
||||
unsigned SlabSize;
|
||||
|
||||
/// The number of chunks pointed to by Chunks.
|
||||
unsigned ChunkCount;
|
||||
|
||||
/// A pointer to the chunks, if no error occurred.
|
||||
swift_async_task_allocation_chunk_t *Chunks;
|
||||
} swift_async_task_slab_allocations_return_t;
|
||||
|
||||
/// An opaque pointer to a context which maintains state and
|
||||
/// caching of reflection structure for heap instances.
|
||||
typedef struct SwiftReflectionContext *SwiftReflectionContextRef;
|
||||
|
||||
@@ -32,6 +32,10 @@ const void *const _swift_concurrency_debug_jobMetadata;
|
||||
SWIFT_EXPORT_FROM(swift_Concurrency)
|
||||
const void *const _swift_concurrency_debug_asyncTaskMetadata;
|
||||
|
||||
/// A fake metadata pointer placed at the start of async task slab allocations.
|
||||
SWIFT_EXPORT_FROM(swift_Concurrency)
|
||||
const void *const _swift_concurrency_debug_asyncTaskSlabMetadata;
|
||||
|
||||
} // namespace swift
|
||||
|
||||
#endif
|
||||
|
||||
@@ -68,6 +68,10 @@ using namespace swift;
|
||||
using FutureFragment = AsyncTask::FutureFragment;
|
||||
using TaskGroup = swift::TaskGroup;
|
||||
|
||||
Metadata swift::TaskAllocatorSlabMetadata;
|
||||
const void *const swift::_swift_concurrency_debug_asyncTaskSlabMetadata =
|
||||
&TaskAllocatorSlabMetadata;
|
||||
|
||||
void FutureFragment::destroy() {
|
||||
auto queueHead = waitQueue.load(std::memory_order_acquire);
|
||||
switch (queueHead.getStatus()) {
|
||||
|
||||
@@ -270,8 +270,9 @@ public:
|
||||
|
||||
/// The size of an allocator slab.
|
||||
static constexpr size_t SlabCapacity = 1000;
|
||||
extern Metadata TaskAllocatorSlabMetadata;
|
||||
|
||||
using TaskAllocator = StackAllocator<SlabCapacity>;
|
||||
using TaskAllocator = StackAllocator<SlabCapacity, &TaskAllocatorSlabMetadata>;
|
||||
|
||||
/// Private storage in an AsyncTask object.
|
||||
struct AsyncTask::PrivateStorage {
|
||||
|
||||
@@ -18,7 +18,7 @@ extern "C" {
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE
|
||||
unsigned long long swift_reflection_classIsSwiftMask = 2;
|
||||
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE uint32_t swift_reflection_libraryVersion = 1;
|
||||
SWIFT_REMOTE_MIRROR_LINKAGE uint32_t swift_reflection_libraryVersion = 2;
|
||||
}
|
||||
|
||||
#include "swift/Demangling/Demangler.h"
|
||||
@@ -43,6 +43,7 @@ struct SwiftReflectionContext {
|
||||
std::vector<std::function<void()>> freeFuncs;
|
||||
std::vector<std::tuple<swift_addr_t, swift_addr_t>> dataSegments;
|
||||
std::string lastString;
|
||||
std::vector<swift_async_task_allocation_chunk_t> lastChunks;
|
||||
|
||||
SwiftReflectionContext(MemoryReaderImpl impl) {
|
||||
auto Reader = std::make_shared<CMemoryReader>(impl);
|
||||
@@ -774,22 +775,46 @@ const char *swift_reflection_iterateMetadataAllocationBacktraces(
|
||||
return returnableCString(ContextRef, Error);
|
||||
}
|
||||
|
||||
const char *swift_reflection_iterateAsyncTaskAllocations(
|
||||
SwiftReflectionContextRef ContextRef, swift_reflection_ptr_t AsyncTaskPtr,
|
||||
swift_asyncTaskAllocationIterator Call, void *ContextPtr) {
|
||||
swift_async_task_slab_return_t
|
||||
swift_reflection_asyncTaskSlabPointer(SwiftReflectionContextRef ContextRef,
|
||||
swift_reflection_ptr_t AsyncTaskPtr) {
|
||||
auto Context = ContextRef->nativeContext;
|
||||
auto Error = Context->iterateAsyncTaskAllocations(
|
||||
AsyncTaskPtr, [&](auto AllocationPtr, auto Count, auto Chunks) {
|
||||
std::vector<swift_async_task_allocation_chunk_t> ConvertedChunks;
|
||||
ConvertedChunks.reserve(Count);
|
||||
for (unsigned i = 0; i < Count; i++) {
|
||||
swift_async_task_allocation_chunk_t Chunk;
|
||||
Chunk.Start = Chunks[i].Start;
|
||||
Chunk.Length = Chunks[i].Length;
|
||||
Chunk.Kind = convertAllocationChunkKind(Chunks[i].Kind);
|
||||
ConvertedChunks.push_back(Chunk);
|
||||
}
|
||||
Call(AllocationPtr, Count, ConvertedChunks.data(), ContextPtr);
|
||||
});
|
||||
return returnableCString(ContextRef, Error);
|
||||
llvm::Optional<std::string> Error;
|
||||
NativeReflectionContext::StoredPointer SlabPtr;
|
||||
std::tie(Error, SlabPtr) = Context->asyncTaskSlabPtr(AsyncTaskPtr);
|
||||
|
||||
swift_async_task_slab_return_t Result = {};
|
||||
Result.Error = returnableCString(ContextRef, Error);
|
||||
Result.SlabPtr = SlabPtr;
|
||||
return Result;
|
||||
}
|
||||
|
||||
swift_async_task_slab_allocations_return_t
|
||||
swift_reflection_asyncTaskSlabAllocations(SwiftReflectionContextRef ContextRef,
|
||||
swift_reflection_ptr_t SlabPtr) {
|
||||
auto Context = ContextRef->nativeContext;
|
||||
llvm::Optional<std::string> Error;
|
||||
NativeReflectionContext::AsyncTaskSlabInfo Info;
|
||||
std::tie(Error, Info) = Context->asyncTaskSlabAllocations(SlabPtr);
|
||||
|
||||
swift_async_task_slab_allocations_return_t Result = {};
|
||||
Result.Error = returnableCString(ContextRef, Error);
|
||||
|
||||
Result.NextSlab = Info.NextSlab;
|
||||
Result.SlabSize = Info.SlabSize;
|
||||
|
||||
ContextRef->lastChunks.clear();
|
||||
ContextRef->lastChunks.reserve(Info.Chunks.size());
|
||||
for (auto &Chunk : Info.Chunks) {
|
||||
swift_async_task_allocation_chunk_t ConvertedChunk;
|
||||
ConvertedChunk.Start = Chunk.Start;
|
||||
ConvertedChunk.Length = Chunk.Length;
|
||||
ConvertedChunk.Kind = convertAllocationChunkKind(Chunk.Kind);
|
||||
ContextRef->lastChunks.push_back(ConvertedChunk);
|
||||
}
|
||||
|
||||
Result.ChunkCount = ContextRef->lastChunks.size();
|
||||
Result.Chunks = ContextRef->lastChunks.data();
|
||||
|
||||
return Result;
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ namespace swift {
|
||||
///
|
||||
/// StackAllocator performs fast allocation and deallocation of memory by
|
||||
/// implementing a bump-pointer allocation strategy.
|
||||
///
|
||||
///
|
||||
/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
|
||||
/// memory rather than relying on a boundless contiguous heap. However, it has
|
||||
/// bump-pointer semantics in that it is a monotonically growing pool of memory
|
||||
@@ -45,7 +45,10 @@ namespace swift {
|
||||
/// It's possible to place the first slab into pre-allocated memory.
|
||||
///
|
||||
/// The SlabCapacity specifies the capacity for newly allocated slabs.
|
||||
template <size_t SlabCapacity>
|
||||
///
|
||||
/// SlabMetadataPtr specifies a fake metadata pointer to place at the beginning
|
||||
/// of slab allocations, so analysis tools can identify them.
|
||||
template <size_t SlabCapacity, Metadata *SlabMetadataPtr>
|
||||
class StackAllocator {
|
||||
private:
|
||||
|
||||
@@ -86,6 +89,10 @@ private:
|
||||
/// This struct is actually just the slab header. The slab buffer is tail
|
||||
/// allocated after Slab.
|
||||
struct Slab {
|
||||
/// A fake metadata pointer that analysis tools can use to identify slab
|
||||
/// allocations.
|
||||
const void *metadata;
|
||||
|
||||
/// A single linked list of all allocated slabs.
|
||||
Slab *next = nullptr;
|
||||
|
||||
@@ -95,7 +102,8 @@ private:
|
||||
|
||||
// Here starts the tail allocated memory buffer of the slab.
|
||||
|
||||
Slab(size_t newCapacity) : capacity(newCapacity) {
|
||||
Slab(size_t newCapacity)
|
||||
: metadata(SlabMetadataPtr), capacity(newCapacity) {
|
||||
assert((size_t)capacity == newCapacity && "capacity overflow");
|
||||
}
|
||||
|
||||
|
||||
@@ -658,22 +658,39 @@ int reflectEnumValue(SwiftReflectionContextRef RC,
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
asyncTaskIterationCallback(swift_reflection_ptr_t AllocationPtr, unsigned Count,
|
||||
swift_async_task_allocation_chunk_t Chunks[],
|
||||
void *ContextPtr) {
|
||||
printf(" Allocation block %#" PRIx64 "\n", (uint64_t)AllocationPtr);
|
||||
for (unsigned i = 0; i < Count; i++)
|
||||
printf(" Chunk at %#" PRIx64 " length %u kind %u\n",
|
||||
(uint64_t)Chunks[i].Start, Chunks[i].Length, Chunks[i].Kind);
|
||||
}
|
||||
|
||||
int reflectAsyncTask(SwiftReflectionContextRef RC,
|
||||
const PipeMemoryReader Pipe) {
|
||||
uintptr_t AsyncTaskInstance = PipeMemoryReader_receiveInstanceAddress(&Pipe);
|
||||
printf("Async task %#" PRIx64 "\n", (uint64_t)AsyncTaskInstance);
|
||||
swift_reflection_iterateAsyncTaskAllocations(
|
||||
RC, AsyncTaskInstance, asyncTaskIterationCallback, NULL);
|
||||
|
||||
swift_async_task_slab_return_t SlabPtrResult =
|
||||
swift_reflection_asyncTaskSlabPointer(RC, AsyncTaskInstance);
|
||||
if (SlabPtrResult.Error) {
|
||||
printf("swift_reflection_asyncTaskSlabPointer failed: %s\n",
|
||||
SlabPtrResult.Error);
|
||||
} else {
|
||||
swift_reflection_ptr_t SlabPtr = SlabPtrResult.SlabPtr;
|
||||
while (SlabPtr) {
|
||||
printf(" Slab pointer %#" PRIx64 "\n", (uint64_t)SlabPtr);
|
||||
swift_async_task_slab_allocations_return_t AllocationsResult =
|
||||
swift_reflection_asyncTaskSlabAllocations(RC, SlabPtr);
|
||||
if (AllocationsResult.Error) {
|
||||
printf("swift_reflection_asyncTaskSlabAllocations failed: %s\n",
|
||||
AllocationsResult.Error);
|
||||
SlabPtr = 0;
|
||||
} else {
|
||||
printf(" Slab size %" PRIu64 "\n",
|
||||
(uint64_t)AllocationsResult.SlabSize);
|
||||
for (unsigned i = 0; i < AllocationsResult.ChunkCount; i++) {
|
||||
swift_async_task_allocation_chunk_t Chunk =
|
||||
AllocationsResult.Chunks[i];
|
||||
printf(" Chunk at %#" PRIx64 " length %u kind %u\n",
|
||||
(uint64_t)Chunk.Start, Chunk.Length, Chunk.Kind);
|
||||
}
|
||||
SlabPtr = AllocationsResult.NextSlab;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n\n");
|
||||
PipeMemoryReader_sendDoneMessage(&Pipe);
|
||||
|
||||
@@ -27,10 +27,12 @@ func add(_ a: UInt, _ b: UInt) async -> UInt {
|
||||
// The actual number of chunks we'll get depends on internal implementation
|
||||
// details that we don't want this test to depend on. We'll just make sure
|
||||
// we get at least two, and ignore the details.
|
||||
// CHECK: Allocation block {{0x[0-9a-fA-F]*}}
|
||||
// CHECK: Chunk at {{0x[0-9a-fA-F]*}} length {{[0-9]*}} kind {{[0-9]*}}
|
||||
// CHECK: Allocation block {{0x[0-9a-fA-F]*}}
|
||||
// CHECK: Chunk at {{0x[0-9a-fA-F]*}} length {{[0-9]*}} kind {{[0-9]*}}
|
||||
// CHECK: Slab pointer {{0x[0-9a-fA-F]*}}
|
||||
// CHECK: Slab size {{[0-9]{2,}()}}
|
||||
// CHECK: Chunk at {{0x[0-9a-fA-F]*}} length {{[1-9][0-9]*}} kind {{[0-9]*}}
|
||||
// CHECK: Slab pointer {{0x[0-9a-fA-F]*}}
|
||||
// CHECK: Slab size {{[0-9]{2,}()}}
|
||||
// CHECK: Chunk at {{0x[0-9a-fA-F]*}} length {{[1-9[[0-9]*}} kind {{[0-9]*}}
|
||||
return a
|
||||
} else {
|
||||
return await add(a, b - 1) + 1
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "../../stdlib/public/runtime/StackAllocator.h"
|
||||
#include "swift/ABI/Metadata.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
using namespace swift;
|
||||
@@ -22,10 +23,13 @@ static constexpr size_t fitsIntoSlab = slabCapacity - 16;
|
||||
static constexpr size_t twoFitIntoSlab = slabCapacity / 2 - 32;
|
||||
static constexpr size_t exceedsSlab = slabCapacity + 16;
|
||||
|
||||
static Metadata SlabMetadata;
|
||||
|
||||
TEST(StackAllocatorTest, withPreallocatedSlab) {
|
||||
|
||||
char firstSlab[firstSlabBufferCapacity];
|
||||
StackAllocator<slabCapacity> allocator(firstSlab, firstSlabBufferCapacity);
|
||||
StackAllocator<slabCapacity, &SlabMetadata> allocator(
|
||||
firstSlab, firstSlabBufferCapacity);
|
||||
|
||||
char *mem1 = (char *)allocator.alloc(fitsIntoFirstSlab);
|
||||
EXPECT_EQ(allocator.getNumAllocatedSlabs(), 0);
|
||||
@@ -70,7 +74,7 @@ TEST(StackAllocatorTest, withoutPreallocatedSlab) {
|
||||
|
||||
constexpr size_t slabCapacity = 256;
|
||||
|
||||
StackAllocator<slabCapacity> allocator;
|
||||
StackAllocator<slabCapacity, &SlabMetadata> allocator;
|
||||
|
||||
size_t fitsIntoSlab = slabCapacity - 16;
|
||||
size_t twoFitIntoSlab = slabCapacity / 2 - 32;
|
||||
|
||||
Reference in New Issue
Block a user