[Runtime Mem] Convert the metadata map from open to closed hash map.

This change cuts the number of mallocs() in the metadata caches in half.

The current metadata cache data structure uses a linked list for each entry in
the tree to handle collissions.  This means that we need at least two memory
allocations for each entry, one for the tree node and one for the linked list
node.

This commit changes the map used by the metadata caches from an open hash map
(that embeds a linked list at each entry) into an closed map that uses a
different hash value for each entry. With this change we no longer accept
collissions and it is now the responsibility of the user to prevent collissions.
The new get/trySet API makes this responsibility explicit. The new design also
goes well with the current design where hashing is done externally and the fact
that we don't save the full key, just the hash and the value to save memory.

This change reduces the number of allocated objects per entry in half. Instead
of allocating two 32-byte objects (one for the tree node and one for the linked
list) we just allocate a single entry that contains the hash and the value.

Unfortunately, values that are made of two 64-bit pointers (like protocol
conformance entries) are now too big for the 32-byte tree entry and are rounded
up to 48 bytes. In practice this is not a big deal because malloc has 48-byte pool
entries.
This commit is contained in:
Nadav Rotem
2016-02-08 12:47:53 -08:00
parent 38e3a8c8d3
commit cdbf839f6c
4 changed files with 195 additions and 93 deletions

View File

@@ -389,22 +389,23 @@ recur_inside_cache_lock:
{
// Hash and lookup the type-protocol pair in the cache.
size_t hash = hashTypeProtocolPair(type, protocol);
ConcurrentList<ConformanceCacheEntry> &Bucket =
C.Cache.findOrAllocateNode(hash);
// Check if the type-protocol entry exists in the cache entry that we found.
for (auto &Entry : Bucket) {
if (!Entry.matches(type, protocol)) continue;
if (Entry.isSuccessful()) {
return std::make_pair(Entry.getWitnessTable(), true);
while (auto *Value = C.Cache.findValueByKey(hash)) {
if (!Value->matches(type, protocol)) {
// If we have a collision increase the hash value by one and try again.
hash++;
continue;
}
if (Value->isSuccessful())
return std::make_pair(Value->getWitnessTable(), true);
if (type == origType)
foundEntry = &Entry;
foundEntry = Value;
// If we got a cached negative response, check the generation number.
if (Entry.getFailureGeneration() == C.SectionsToScan.size()) {
if (Value->getFailureGeneration() == C.SectionsToScan.size()) {
// We found an entry with a negative value.
return std::make_pair(nullptr, true);
}
@@ -419,14 +420,17 @@ recur_inside_cache_lock:
// Hash and lookup the type-protocol pair in the cache.
size_t hash = hashTypeProtocolPair(description, protocol);
ConcurrentList<ConformanceCacheEntry> &Bucket =
C.Cache.findOrAllocateNode(hash);
for (auto &Entry : Bucket) {
if (!Entry.matches(description, protocol)) continue;
if (Entry.isSuccessful()) {
return std::make_pair(Entry.getWitnessTable(), true);
while (auto *Value = C.Cache.findValueByKey(hash)) {
if (!Value->matches(description, protocol)) {
// If we have a collision increase the hash value by one and try again.
hash++;
continue;
}
if (Value->isSuccessful())
return std::make_pair(Value->getWitnessTable(), true);
// We don't try to cache negative responses for generic
// patterns.
}
@@ -522,10 +526,15 @@ recur:
// Hash and lookup the type-protocol pair in the cache.
size_t hash = hashTypeProtocolPair(type, protocol);
ConcurrentList<ConformanceCacheEntry> &Bucket =
C.Cache.findOrAllocateNode(hash);
Bucket.push_front(ConformanceCacheEntry::createFailure(
type, protocol, C.SectionsToScan.size()));
auto E = ConformanceCacheEntry::createFailure(
type, protocol, C.SectionsToScan.size());
while (!C.Cache.tryToAllocateNewNode(hash, E)) {
// If we have a collision increase the hash value by one and try again.
hash++;
}
pthread_mutex_unlock(&C.SectionsToScanLock);
return nullptr;
}
@@ -552,18 +561,21 @@ recur:
if (!isRelatedType(type, metadata, /*isMetadata=*/true))
continue;
// Hash and lookup the type-protocol pair in the cache.
// Store the type-protocol pair in the cache.
size_t hash = hashTypeProtocolPair(metadata, P);
ConcurrentList<ConformanceCacheEntry> &Bucket =
C.Cache.findOrAllocateNode(hash);
auto witness = record.getWitnessTable(metadata);
ConformanceCacheEntry E;
if (witness)
Bucket.push_front(
ConformanceCacheEntry::createSuccess(metadata, P, witness));
E = ConformanceCacheEntry::createSuccess(metadata, P, witness);
else
Bucket.push_front(ConformanceCacheEntry::createFailure(
metadata, P, C.SectionsToScan.size()));
E = ConformanceCacheEntry::createFailure(metadata, P,
C.SectionsToScan.size());
while (!C.Cache.tryToAllocateNewNode(hash, E)) {
// If we have a collision increase the hash value by one and try again.
hash++;
}
// If the record provides a nondependent witness table for all instances
// of a generic type, cache it for the generic pattern.
@@ -585,12 +597,16 @@ recur:
if (!isRelatedType(type, R, /*isMetadata=*/false))
continue;
// Hash and lookup the type-protocol pair in the cache.
// Hash and store the type-protocol pair in the cache.
size_t hash = hashTypeProtocolPair(R, P);
ConcurrentList<ConformanceCacheEntry> &Bucket =
C.Cache.findOrAllocateNode(hash);
Bucket.push_front(ConformanceCacheEntry::createSuccess(
R, P, record.getStaticWitnessTable()));
auto E = ConformanceCacheEntry::createSuccess(R, P,
record.getStaticWitnessTable());
while (!C.Cache.tryToAllocateNewNode(hash, E)) {
// If we have a collision increase the hash value by one and try again.
hash++;
}
}
}
}