[Runtime] Change ConcurrentReadableArray's API to provide iterable snapshots rather than using a callback-based read call.

rdar://problem/40230581
This commit is contained in:
Mike Ash
2018-05-21 12:30:46 -04:00
parent e27af10038
commit c7eeeb5a68
2 changed files with 124 additions and 119 deletions

View File

@@ -451,7 +451,37 @@ private:
Mutex WriterLock; Mutex WriterLock;
std::vector<Storage *> FreeList; std::vector<Storage *> FreeList;
void incrementReaders() {
ReaderCount.fetch_add(1, std::memory_order_acquire);
}
void decrementReaders() {
ReaderCount.fetch_sub(1, std::memory_order_release);
}
public: public:
struct Snapshot {
ConcurrentReadableArray *Array;
const ElemTy *Start;
size_t Count;
Snapshot(ConcurrentReadableArray *array, const ElemTy *start, size_t count)
: Array(array), Start(start), Count(count) {}
Snapshot(const Snapshot &other)
: Array(other.Array), Start(other.Start), Count(other.Count) {
Array->incrementReaders();
}
~Snapshot() {
Array->decrementReaders();
}
const ElemTy *begin() { return Start; }
const ElemTy *end() { return Start + Count; }
size_t count() { return Count; }
};
// This type cannot be safely copied, moved, or deleted. // This type cannot be safely copied, moved, or deleted.
ConcurrentReadableArray(const ConcurrentReadableArray &) = delete; ConcurrentReadableArray(const ConcurrentReadableArray &) = delete;
ConcurrentReadableArray(ConcurrentReadableArray &&) = delete; ConcurrentReadableArray(ConcurrentReadableArray &&) = delete;
@@ -486,28 +516,16 @@ public:
storage->deallocate(); storage->deallocate();
} }
/// Read the contents of the array. The parameter `f` is called with Snapshot snapshot() {
/// two parameters: a pointer to the elements in the array, and the incrementReaders();
/// count. This represents a snapshot of the contents at the time
/// `read` was called. The pointer becomes invalid after `f` returns.
template <class F> auto read(F f) -> decltype(f(nullptr, 0)) {
ReaderCount.fetch_add(1, std::memory_order_acquire);
auto *storage = Elements.load(SWIFT_MEMORY_ORDER_CONSUME); auto *storage = Elements.load(SWIFT_MEMORY_ORDER_CONSUME);
if (storage == nullptr) {
return Snapshot(this, nullptr, 0);
}
auto count = storage->Count.load(std::memory_order_acquire); auto count = storage->Count.load(std::memory_order_acquire);
const auto *ptr = storage->data(); const auto *ptr = storage->data();
return Snapshot(this, ptr, count);
decltype(f(nullptr, 0)) result = f(ptr, count);
ReaderCount.fetch_sub(1, std::memory_order_release);
return result;
}
/// Get the current count. It's just a snapshot and may be obsolete immediately.
size_t count() {
return read([](const ElemTy *ptr, size_t count) -> size_t {
return count;
});
} }
}; };

View File

@@ -326,14 +326,11 @@ void ConformanceState::verify() const {
// Iterate over all of the sections and verify all of the protocol // Iterate over all of the sections and verify all of the protocol
// descriptors. // descriptors.
auto &Self = const_cast<ConformanceState &>(*this); auto &Self = const_cast<ConformanceState &>(*this);
Self.SectionsToScan.read([](const ConformanceSection *ptr, size_t count) -> char { for (const auto &Section : Self.SectionsToScan.snapshot()) {
for (size_t i = 0; i < count; i++) { for (const auto &Record : Section) {
for (const auto &Record : ptr[i]) { Record.get()->verify();
Record.get()->verify();
}
} }
return 0; }
});
} }
#endif #endif
@@ -445,7 +442,7 @@ recur:
} }
// Check if the negative cache entry is up-to-date. // Check if the negative cache entry is up-to-date.
if (Value->getFailureGeneration() == C.SectionsToScan.count()) { if (Value->getFailureGeneration() == C.SectionsToScan.snapshot().count()) {
// Negative cache entry is up-to-date. Return failure along with // Negative cache entry is up-to-date. Return failure along with
// the original query type's own cache entry, if we found one. // the original query type's own cache entry, if we found one.
// (That entry may be out of date but the caller still has use for it.) // (That entry may be out of date but the caller still has use for it.)
@@ -546,100 +543,94 @@ swift_conformsToProtocolImpl(const Metadata * const type,
auto failureEntry = FoundConformance.failureEntry; auto failureEntry = FoundConformance.failureEntry;
// Prepare to scan conformance records. // Prepare to scan conformance records.
size_t scannedCount; auto snapshot = C.SectionsToScan.snapshot();
auto returnNull = C.SectionsToScan
.read([&](const ConformanceSection *ptr, size_t count) -> bool {
scannedCount = count;
// Scan only sections that were not scanned yet.
// If we found an out-of-date negative cache entry,
// we need not to re-scan the sections that it covers.
auto startIndex = failureEntry ? failureEntry->getFailureGeneration() : 0;
auto endIndex = count;
// If there are no unscanned sections outstanding // Scan only sections that were not scanned yet.
// then we can cache failure and give up now. // If we found an out-of-date negative cache entry,
if (startIndex == endIndex) { // we need not to re-scan the sections that it covers.
C.cacheFailure(type, protocol, count); auto startIndex = failureEntry ? failureEntry->getFailureGeneration() : 0;
return true; auto endIndex = snapshot.count();
// If there are no unscanned sections outstanding
// then we can cache failure and give up now.
if (startIndex == endIndex) {
C.cacheFailure(type, protocol, snapshot.count());
return nullptr;
}
/// Local function to retrieve the witness table and record the result.
auto recordWitnessTable = [&](const ProtocolConformanceDescriptor &descriptor,
const Metadata *type) {
switch (descriptor.getConformanceKind()) {
case ConformanceFlags::ConformanceKind::WitnessTable:
// If the record provides a nondependent witness table for all
// instances of a generic type, cache it for the generic pattern.
C.cacheSuccess(type, protocol, descriptor.getStaticWitnessTable());
return;
case ConformanceFlags::ConformanceKind::WitnessTableAccessor:
// If the record provides a dependent witness table accessor,
// cache the result for the instantiated type metadata.
C.cacheSuccess(type, protocol, descriptor.getWitnessTable(type));
return;
case ConformanceFlags::ConformanceKind::ConditionalWitnessTableAccessor: {
auto witnessTable = descriptor.getWitnessTable(type);
if (witnessTable)
C.cacheSuccess(type, protocol, witnessTable);
else
C.cacheFailure(type, protocol, snapshot.count());
return;
}
} }
/// Local function to retrieve the witness table and record the result. // Always fail, because we cannot interpret a future conformance
auto recordWitnessTable = [&](const ProtocolConformanceDescriptor &descriptor, // kind.
const Metadata *type) { C.cacheFailure(type, protocol, snapshot.count());
switch (descriptor.getConformanceKind()) { };
case ConformanceFlags::ConformanceKind::WitnessTable:
// If the record provides a nondependent witness table for all
// instances of a generic type, cache it for the generic pattern.
C.cacheSuccess(type, protocol, descriptor.getStaticWitnessTable());
return;
case ConformanceFlags::ConformanceKind::WitnessTableAccessor: // Really scan conformance records.
// If the record provides a dependent witness table accessor, for (size_t i = startIndex; i < endIndex; i++) {
// cache the result for the instantiated type metadata. auto &section = snapshot.Start[i];
C.cacheSuccess(type, protocol, descriptor.getWitnessTable(type)); // Eagerly pull records for nondependent witnesses into our cache.
return; for (const auto &record : section) {
auto &descriptor = *record.get();
case ConformanceFlags::ConformanceKind::ConditionalWitnessTableAccessor: { // If the record applies to a specific type, cache it.
auto witnessTable = descriptor.getWitnessTable(type); if (auto metadata = descriptor.getCanonicalTypeMetadata()) {
if (witnessTable) auto P = descriptor.getProtocol();
C.cacheSuccess(type, protocol, witnessTable);
else
C.cacheFailure(type, protocol, count);
return;
}
}
// Always fail, because we cannot interpret a future conformance // Look for an exact match.
// kind. if (protocol != P)
C.cacheFailure(type, protocol, count); continue;
};
// Really scan conformance records. if (!isRelatedType(type, metadata, /*candidateIsMetadata=*/true))
for (size_t i = startIndex; i < endIndex; i++) { continue;
auto &section = ptr[i];
// Eagerly pull records for nondependent witnesses into our cache.
for (const auto &record : section) {
auto &descriptor = *record.get();
// If the record applies to a specific type, cache it. // Record the witness table.
if (auto metadata = descriptor.getCanonicalTypeMetadata()) { recordWitnessTable(descriptor, metadata);
auto P = descriptor.getProtocol();
// Look for an exact match. // TODO: "Nondependent witness table" probably deserves its own flag.
if (protocol != P) // An accessor function might still be necessary even if the witness table
continue; // can be shared.
} else if (descriptor.getTypeKind()
== TypeMetadataRecordKind::DirectNominalTypeDescriptor ||
descriptor.getTypeKind()
== TypeMetadataRecordKind::IndirectNominalTypeDescriptor) {
auto R = descriptor.getTypeContextDescriptor();
auto P = descriptor.getProtocol();
if (!isRelatedType(type, metadata, /*candidateIsMetadata=*/true)) // Look for an exact match.
continue; if (protocol != P)
continue;
// Record the witness table. if (!isRelatedType(type, R, /*candidateIsMetadata=*/false))
recordWitnessTable(descriptor, metadata); continue;
// TODO: "Nondependent witness table" probably deserves its own flag. recordWitnessTable(descriptor, type);
// An accessor function might still be necessary even if the witness table
// can be shared.
} else if (descriptor.getTypeKind()
== TypeMetadataRecordKind::DirectNominalTypeDescriptor ||
descriptor.getTypeKind()
== TypeMetadataRecordKind::IndirectNominalTypeDescriptor) {
auto R = descriptor.getTypeContextDescriptor();
auto P = descriptor.getProtocol();
// Look for an exact match.
if (protocol != P)
continue;
if (!isRelatedType(type, R, /*candidateIsMetadata=*/false))
continue;
recordWitnessTable(descriptor, type);
}
} }
} }
return false; }
});
if (returnNull) return nullptr;
// Conformance scan is complete. // Conformance scan is complete.
// Search the cache once more, and this time update the cache if necessary. // Search the cache once more, and this time update the cache if necessary.
@@ -648,7 +639,7 @@ swift_conformsToProtocolImpl(const Metadata * const type,
if (FoundConformance.isAuthoritative) { if (FoundConformance.isAuthoritative) {
return FoundConformance.witnessTable; return FoundConformance.witnessTable;
} else { } else {
C.cacheFailure(type, protocol, scannedCount); C.cacheFailure(type, protocol, snapshot.count());
return nullptr; return nullptr;
} }
} }
@@ -657,19 +648,15 @@ const TypeContextDescriptor *
swift::_searchConformancesByMangledTypeName(Demangle::NodePointer node) { swift::_searchConformancesByMangledTypeName(Demangle::NodePointer node) {
auto &C = Conformances.get(); auto &C = Conformances.get();
return C.SectionsToScan for (auto &section : C.SectionsToScan.snapshot()) {
.read([&](const ConformanceSection *ptr, size_t count) -> const TypeContextDescriptor * { for (const auto &record : section) {
for (size_t i = 0; i < count; i++) { if (auto ntd = record->getTypeContextDescriptor()) {
auto &section = ptr[i]; if (_contextDescriptorMatchesMangling(ntd, node))
for (const auto &record : section) { return ntd;
if (auto ntd = record->getTypeContextDescriptor()) {
if (_contextDescriptorMatchesMangling(ntd, node))
return ntd;
}
} }
} }
return nullptr; }
}); return nullptr;
} }
/// Resolve a reference to a generic parameter to type metadata. /// Resolve a reference to a generic parameter to type metadata.