//===--- Mutex.cpp - Mutex and ReadWriteLock Tests ------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "swift/Runtime/Mutex.h" #include "gtest/gtest.h" #include #include #include #include #include "ThreadingHelpers.h" using namespace swift; // ----------------------------------------------------------------------------- template void basicLockableThreaded(M &mutex) { int count1 = 0; int count2 = 0; threadedExecute(10, [&](int) { for (int j = 0; j < 50; ++j) { mutex.lock(); auto count = count2; count1++; count2 = count + 1; mutex.unlock(); } }); ASSERT_EQ(count1, 500); ASSERT_EQ(count2, 500); } TEST(MutexTest, BasicLockableThreaded) { Mutex mutex(/* checked = */ true); basicLockableThreaded(mutex); } TEST(StaticMutexTest, BasicLockableThreaded) { static StaticMutex mutex; basicLockableThreaded(mutex); } TEST(StaticUnsafeMutexTest, BasicLockableThreaded) { static StaticUnsafeMutex mutex; basicLockableThreaded(mutex); } TEST(SmallMutex, BasicLockableThreaded) { SmallMutex mutex; basicLockableThreaded(mutex); } template void lockableThreaded(M &mutex) { mutex.lock(); threadedExecute(5, [&](int) { ASSERT_FALSE(mutex.try_lock()); }); mutex.unlock(); threadedExecute(1, [&](int) { ASSERT_TRUE(mutex.try_lock()); mutex.unlock(); }); int count1 = 0; int count2 = 0; threadedExecute(10, [&](int) { for (int j = 0; j < 50; ++j) { if (mutex.try_lock()) { auto count = count2; count1++; count2 = count + 1; mutex.unlock(); } else { j--; } } }); ASSERT_EQ(count1, 500); ASSERT_EQ(count2, 500); } TEST(MutexTest, LockableThreaded) { Mutex mutex(/* checked = */ true); lockableThreaded(mutex); } TEST(StaticMutexTest, LockableThreaded) { static StaticMutex Mutex; lockableThreaded(Mutex); } TEST(SmallMutexTest, LockableThreaded) { SmallMutex Mutex; lockableThreaded(Mutex); } template void scopedLockThreaded(M &mutex) { int count1 = 0; int count2 = 0; threadedExecute(10, [&](int) { for (int j = 0; j < 50; ++j) { SL guard(mutex); auto count = count2; count1++; count2 = count + 1; } }); ASSERT_EQ(count1, 500); ASSERT_EQ(count2, 500); } TEST(MutexTest, ScopedLockThreaded) { Mutex mutex(/* checked = */ true); scopedLockThreaded(mutex); } TEST(StaticMutexTest, ScopedLockThreaded) { static StaticMutex Mutex; scopedLockThreaded(Mutex); } TEST(SmallMutexTest, ScopedLockThreaded) { SmallMutex mutex(/* checked = */ true); scopedLockThreaded>(mutex); } template void scopedUnlockUnderScopedLockThreaded(M &mutex) { int count1 = 0; int count2 = 0; int badCount = 0; threadedExecute(10, [&](int) { for (int j = 0; j < 50; ++j) { SL guard(mutex); { SU unguard(mutex); badCount++; } auto count = count2; count1++; count2 = count + 1; } }); ASSERT_EQ(count1, 500); ASSERT_EQ(count2, 500); } TEST(MutexTest, ScopedUnlockUnderScopedLockThreaded) { Mutex mutex(/* checked = */ true); scopedUnlockUnderScopedLockThreaded( mutex); } TEST(StaticMutexTest, ScopedUnlockUnderScopedLockThreaded) { static StaticMutex Mutex; scopedUnlockUnderScopedLockThreaded(Mutex); } TEST(SmallMutexTest, ScopedUnlockUnderScopedLockThreaded) { SmallMutex mutex(/* checked = */ true); scopedUnlockUnderScopedLockThreaded(mutex); } template void criticalSectionThreaded(M &mutex) { int count1 = 0; int count2 = 0; threadedExecute(10, [&](int) { for (int j = 0; j < 50; ++j) { mutex.withLock([&] { auto count = count2; count1++; count2 = count + 1; }); } }); ASSERT_EQ(count1, 500); ASSERT_EQ(count2, 500); } TEST(MutexTest, CriticalSectionThreaded) { Mutex mutex(/* checked = */ true); criticalSectionThreaded(mutex); } TEST(StaticMutexTest, CriticalSectionThreaded) { static StaticMutex Mutex; criticalSectionThreaded(Mutex); } template void scopedReadThreaded(RW &lock) { const int threadCount = 10; std::set writerHistory; std::vector> readerHistory; readerHistory.assign(threadCount, std::set()); int protectedValue = 0; writerHistory.insert(protectedValue); threadedExecute(threadCount, [&](int index) { if (Locking) { for (int i = 0; i < 50; ++i) { { SRL guard(lock); readerHistory[index].insert(protectedValue); } std::this_thread::yield(); } } else { lock.readLock(); for (int i = 0; i < 50; ++i) { readerHistory[index].insert(protectedValue); { SRL unguard(lock); std::this_thread::yield(); } } lock.readUnlock(); } }, [&] { for (int i = 0; i < 25; ++i) { lock.writeLock(); protectedValue += i; writerHistory.insert(protectedValue); lock.writeUnlock(); } }); for (auto &history : readerHistory) { for (auto value : history) { ASSERT_EQ(writerHistory.count(value), 1U); } } } TEST(ReadWriteLockTest, ScopedReadLockThreaded) { ReadWriteLock lock; scopedReadThreaded(lock); } TEST(StaticReadWriteLockTest, ScopedReadLockThreaded) { static StaticReadWriteLock lock; scopedReadThreaded(lock); } TEST(ReadWriteLockTest, ScopedReadUnlockThreaded) { ReadWriteLock lock; scopedReadThreaded(lock); } TEST(StaticReadWriteLockTest, ScopedReadUnlockThreaded) { static StaticReadWriteLock lock; scopedReadThreaded(lock); } template void scopedWriteLockThreaded(RW &lock) { const int threadCount = 10; std::set readerHistory; std::vector> writerHistory; writerHistory.assign(threadCount, std::set()); int protectedValue = 0; readerHistory.insert(protectedValue); threadedExecute(threadCount, [&](int index) { if (Locking) { for (int i = 0; i < 20; ++i) { { SWL guard(lock); protectedValue += index * i; writerHistory[index].insert(protectedValue); } std::this_thread::yield(); } } else { lock.writeLock(); for (int i = 0; i < 20; ++i) { protectedValue += index * i; writerHistory[index].insert(protectedValue); { SWL unguard(lock); std::this_thread::yield(); } } lock.writeUnlock(); } }, [&] { for (int i = 0; i < 100; ++i) { lock.readLock(); readerHistory.insert(protectedValue); lock.readUnlock(); } }); std::set mergedHistory; for (auto &history : writerHistory) { mergedHistory.insert(history.begin(), history.end()); } for (auto value : readerHistory) { ASSERT_EQ(mergedHistory.count(value), 1U); } } TEST(ReadWriteLockTest, ScopedWriteLockThreaded) { ReadWriteLock lock; scopedWriteLockThreaded(lock); } TEST(StaticReadWriteLockTest, ScopedWriteLockThreaded) { static StaticReadWriteLock lock; scopedWriteLockThreaded(lock); } TEST(ReadWriteLockTest, ScopedWriteUnlockThreaded) { ReadWriteLock lock; scopedWriteLockThreaded(lock); } TEST(StaticReadWriteLockTest, ScopedWriteUnlockThreaded) { static StaticReadWriteLock lock; scopedWriteLockThreaded(lock); } template void readLockWhileReadLockedThreaded(RW &lock) { lock.readLock(); const int threadCount = 10; std::atomic results[threadCount] = {}; std::atomic done(false); threadedExecute(threadCount, [&](int index) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { lock.withReadLock([&] { results[index] = true; std::this_thread::sleep_for( std::chrono::milliseconds(5)); }); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; }); lock.readUnlock(); for (auto &result : results) { ASSERT_TRUE(result); } } TEST(ReadWriteLockTest, ReadLockWhileReadLockedThreaded) { ReadWriteLock lock; readLockWhileReadLockedThreaded(lock); } TEST(StaticReadWriteLockTest, ReadLockWhileReadLockedThreaded) { static StaticReadWriteLock lock; readLockWhileReadLockedThreaded(lock); } template void readLockWhileWriteLockedThreaded(RW &lock) { lock.writeLock(); const int threadCount = 10; std::atomic results[threadCount] = {}; std::atomic done(false); threadedExecute(threadCount, [&](int index) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { lock.withReadLock([&] { results[index] += 1; std::this_thread::sleep_for( std::chrono::milliseconds(5)); }); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; lock.writeUnlock(); }); for (auto &result : results) { ASSERT_EQ(result, 1); } } TEST(ReadWriteLockTest, ReadLockWhileWriteLockedThreaded) { ReadWriteLock lock; readLockWhileWriteLockedThreaded(lock); } TEST(StaticReadWriteLockTest, ReadLockWhileWriteLockedThreaded) { static StaticReadWriteLock lock; readLockWhileWriteLockedThreaded(lock); } template void writeLockWhileReadLockedThreaded(RW &lock) { lock.readLock(); const int threadCount = 10; std::atomic results[threadCount] = {}; std::atomic done(false); threadedExecute(threadCount, [&](int index) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { lock.withWriteLock([&] { results[index] += 1; std::this_thread::sleep_for( std::chrono::milliseconds(5)); }); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; lock.readUnlock(); }); for (auto &result : results) { ASSERT_EQ(result, 1); } } TEST(ReadWriteLockTest, WriteLockWhileReadLockedThreaded) { ReadWriteLock lock; writeLockWhileReadLockedThreaded(lock); } TEST(StaticReadWriteLockTest, WriteLockWhileReadLockedThreaded) { static StaticReadWriteLock lock; writeLockWhileReadLockedThreaded(lock); } template void writeLockWhileWriteLockedThreaded(RW &lock) { lock.writeLock(); const int threadCount = 10; std::atomic results[threadCount] = {}; std::atomic done(false); threadedExecute(threadCount, [&](int index) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { lock.withWriteLock([&] { results[index] += 1; std::this_thread::sleep_for( std::chrono::milliseconds(5)); }); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; lock.writeUnlock(); }); for (auto &result : results) { ASSERT_EQ(result, 1); } } TEST(ReadWriteLockTest, WriteLockWhileWriteLockedThreaded) { ReadWriteLock lock; writeLockWhileWriteLockedThreaded(lock); } TEST(StaticReadWriteLockTest, WriteLockWhileWriteLockedThreaded) { static StaticReadWriteLock lock; writeLockWhileWriteLockedThreaded(lock); } template void tryReadLockWhileWriteLockedThreaded(RW &lock) { lock.writeLock(); std::atomic done(false); threadedExecute(10, [&](int) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { ASSERT_FALSE(lock.try_readLock()); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; }); lock.writeUnlock(); } TEST(ReadWriteLockTest, TryReadLockWhileWriteLockedThreaded) { ReadWriteLock lock; tryReadLockWhileWriteLockedThreaded(lock); } TEST(StaticReadWriteLockTest, TryReadLockWhileWriteLockedThreaded) { static StaticReadWriteLock lock; tryReadLockWhileWriteLockedThreaded(lock); } template void tryReadLockWhileReadLockedThreaded(RW &lock) { lock.readLock(); const int threadCount = 10; std::atomic results[threadCount] = {}; std::atomic done(false); threadedExecute(threadCount, [&](int index) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { ASSERT_TRUE(lock.try_readLock()); results[index] = true; std::this_thread::sleep_for(std::chrono::milliseconds(5)); lock.readUnlock(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; }); lock.readUnlock(); for (auto &result : results) { ASSERT_TRUE(result); } } TEST(ReadWriteLockTest, TryReadLockWhileReadLockedThreaded) { ReadWriteLock lock; tryReadLockWhileReadLockedThreaded(lock); } TEST(StaticReadWriteLockTest, TryReadLockWhileReadLockedThreaded) { static StaticReadWriteLock lock; tryReadLockWhileReadLockedThreaded(lock); } template void tryWriteLockWhileWriteLockedThreaded(RW &lock) { lock.writeLock(); std::atomic done(false); threadedExecute(10, [&](int) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { ASSERT_FALSE(lock.try_writeLock()); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; }); lock.writeUnlock(); } TEST(ReadWriteLockTest, TryWriteLockWhileWriteLockedThreaded) { ReadWriteLock lock; tryWriteLockWhileWriteLockedThreaded(lock); } TEST(StaticReadWriteLockTest, TryWriteLockWhileWriteLockedThreaded) { static StaticReadWriteLock lock; tryWriteLockWhileWriteLockedThreaded(lock); } template void tryWriteLockWhileReadLockedThreaded(RW &lock) { lock.readLock(); std::atomic done(false); threadedExecute(10, [&](int) { // Always perform at least one iteration of this loop to // avoid spurious failures if this thread is slow to run. do { ASSERT_FALSE(lock.try_writeLock()); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (!done); }, [&] { std::this_thread::sleep_for(std::chrono::milliseconds(100)); done = true; }); lock.readUnlock(); } TEST(ReadWriteLockTest, TryWriteLockWhileReadLockedThreaded) { ReadWriteLock lock; tryWriteLockWhileReadLockedThreaded(lock); } TEST(StaticReadWriteLockTest, TryWriteLockWhileReadLockedThreaded) { static StaticReadWriteLock lock; tryWriteLockWhileReadLockedThreaded(lock); } template void readWriteLockCacheExampleThreaded(RW &lock) { std::map cache; std::vector workers; std::vector> workerHistory; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dis(0, UINT8_MAX); workerHistory.push_back(std::set()); for (int i = 0; i < 16; i++) { uint8_t key = dis(gen); cache[key] = 0; workerHistory[0].insert(key); if (trace) printf("WarmUp create for key = %d, value = %d.\n", key, 0); } // Block the threads we are about to create. const int threadCount = 20; std::atomic spinWait(true); std::atomic readyCount(0); for (int i = 1; i <= threadCount; ++i) { workerHistory.push_back(std::set()); workers.push_back(std::thread([&, i] { readyCount++; // Block ourself until we are released to start working. while (spinWait) { std::this_thread::sleep_for(std::chrono::microseconds(10)); } std::this_thread::sleep_for(std::chrono::milliseconds(1)); for (int j = 0; j < 50; j++) { uint8_t key = dis(gen); bool found = false; auto cacheLookupSection = [&] { auto value = cache.find(key); if (value == cache.end()) { if (trace) printf("Worker[%d] miss for key = %d.\n", i, key); found = false; // cache miss, need to grab write lock } if (trace) printf("Worker[%d] HIT for key = %d, value = %d.\n", i, key, value->second); found = true; // cache hit, no need to grab write lock }; lock.withReadLock(cacheLookupSection); if (found) { continue; } lock.withWriteLock([&] { cacheLookupSection(); if (!found) { if (trace) printf("Worker[%d] create for key = %d, value = %d.\n", i, key, i); cache[key] = i; workerHistory[i].insert(key); } }); } if (trace) printf("### Worker[%d] thread exiting.\n", i); })); } while (readyCount < threadCount) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } // Allow our threads to fight for the lock. spinWait = false; // Wait until all of our workers threads have finished. for (auto &thread : workers) { thread.join(); } for (auto &entry : cache) { if (trace) printf("### Cache dump key = %d, value = %d.\n", entry.first, entry.second); ASSERT_EQ(workerHistory[entry.second].count(entry.first), 1U); } } TEST(ReadWriteLockTest, ReadWriteLockCacheExampleThreaded) { ReadWriteLock lock; readWriteLockCacheExampleThreaded(lock); } TEST(StaticReadWriteLockTest, ReadWriteLockCacheExampleThreaded) { static StaticReadWriteLock lock; readWriteLockCacheExampleThreaded(lock); }