[llvm-arc-opts] Implement swift_{retain,release}_n.

rdar://21803771

Swift SVN r30204
This commit is contained in:
Michael Gottesman
2015-07-15 00:03:03 +00:00
parent 2315c8fd06
commit 4ce5cdad17
5 changed files with 97 additions and 1 deletions

View File

@@ -150,6 +150,8 @@ extern "C" void swift_slowDealloc(void *ptr, size_t bytes, size_t alignMask);
extern "C" HeapObject *swift_retain(HeapObject *object);
extern "C" void swift_retain_noresult(HeapObject *object);
extern "C" HeapObject *swift_retain_n(HeapObject *object, uint32_t n);
static inline HeapObject *_swift_retain_inlined(HeapObject *object) {
if (object) {
object->refCount.increment();
@@ -196,6 +198,10 @@ extern "C" void swift_unpin(HeapObject *object);
/// It's unlikely that a custom CC would be beneficial here.
extern "C" void swift_release(HeapObject *object);
/// Atomically decrements the retain count of an object n times. If the retain
/// count reaches zero, the object is destroyed
extern "C" void swift_release_n(HeapObject *object, uint32_t n);
/// ObjC compatibility. Never call this.
extern "C" size_t swift_retainCount(HeapObject *object);

View File

@@ -27,10 +27,11 @@ extern "C" HeapObject *(*_swift_allocObject)(HeapMetadata const *metadata,
extern "C" BoxPair::Return (*_swift_allocBox)(Metadata const *type);
extern "C" HeapObject *(*_swift_retain)(HeapObject *object);
extern "C" HeapObject *(*_swift_retain_n)(HeapObject *object, uint32_t n);
extern "C" HeapObject *(*_swift_tryRetain)(HeapObject *object);
extern "C" bool (*_swift_isDeallocating)(HeapObject *object);
extern "C" void (*_swift_release)(HeapObject *object);
extern "C" void (*_swift_release_n)(HeapObject *object, uint32_t n);
// liboainject on iOS 8 patches the function pointers below if present.
// Do not reuse these names unless you do what oainject expects you to do.

View File

@@ -95,6 +95,11 @@ class StrongRefCount {
__atomic_fetch_add(&refCount, RC_ONE, __ATOMIC_RELAXED);
}
// Increment the reference count by n.
void increment(uint32_t n) {
__atomic_fetch_add(&refCount, n << RC_FLAGS_COUNT, __ATOMIC_RELAXED);
}
// Try to simultaneously set the pinned flag and increment the
// reference count. If the flag is already set, don't increment the
// reference count.
@@ -149,6 +154,10 @@ class StrongRefCount {
return doDecrementShouldDeallocate<false>();
}
bool decrementShouldDeallocateN(uint32_t n) {
return doDecrementShouldDeallocateN<false>(n);
}
// Return the reference count.
// During deallocation the reference count is undefined.
uint32_t getCount() const {
@@ -220,6 +229,42 @@ private:
return __atomic_compare_exchange(&refCount, &oldval, &newval, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
template <bool ClearPinnedFlag>
bool doDecrementShouldDeallocateN(uint32_t n) {
// If we're being asked to clear the pinned flag, we can assume
// it's already set.
uint32_t delta = (n << RC_FLAGS_COUNT) + (ClearPinnedFlag ? RC_PINNED_FLAG : 0);
uint32_t newval = __atomic_sub_fetch(&refCount, delta, __ATOMIC_RELEASE);
assert((!ClearPinnedFlag || !(newval & RC_PINNED_FLAG)) &&
"unpinning reference that was not pinned");
assert(newval + delta >= RC_ONE &&
"releasing reference with a refcount of zero");
// If we didn't drop the reference count to zero, or if the
// deallocating flag is already set, we're done; don't start
// deallocation. We can assume that the pinned flag isn't set
// unless the refcount is nonzero, and or'ing it in gives us a
// more efficient mask: the check just becomes "is newval nonzero".
if ((newval & (RC_COUNT_MASK | RC_PINNED_FLAG | RC_DEALLOCATING_FLAG))
!= 0) {
// Refcount is not zero. We definitely do not need to deallocate.
return false;
}
// Refcount is now 0 and is not already deallocating. Try to set
// the deallocating flag. This must be atomic because it can race
// with weak retains.
//
// This also performs the before-deinit acquire barrier if we set the flag.
static_assert(RC_FLAGS_COUNT == 2,
"fix decrementShouldDeallocate() if you add more flags");
uint32_t oldval = 0;
newval = RC_DEALLOCATING_FLAG;
return __atomic_compare_exchange(&refCount, &oldval, &newval, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
};

View File

@@ -287,6 +287,17 @@ static HeapObject *_swift_retain_(HeapObject *object) {
}
auto swift::_swift_retain = _swift_retain_;
HeapObject *swift::swift_retain_n(HeapObject *object, uint32_t n) {
return _swift_retain_n(object, n);
}
static HeapObject *_swift_retain_n_(HeapObject *object, uint32_t n) {
if (object) {
object->refCount.increment(n);
}
return object;
}
auto swift::_swift_retain_n = _swift_retain_n_;
void swift::swift_release(HeapObject *object) {
SWIFT_RELEASE();
return _swift_release(object);
@@ -298,6 +309,16 @@ static void _swift_release_(HeapObject *object) {
}
auto swift::_swift_release = _swift_release_;
void swift::swift_release_n(HeapObject *object, uint32_t n) {
return _swift_release_n(object, n);
}
static void _swift_release_n_(HeapObject *object, uint32_t n) {
if (object && object->refCount.decrementShouldDeallocateN(n)) {
_swift_release_dealloc(object);
}
}
auto swift::_swift_release_n = _swift_release_n_;
size_t swift::swift_retainCount(HeapObject *object) {
return object->refCount.getCount();
}

View File

@@ -97,3 +97,26 @@ TEST(RefcountingTest, pin_pin_unpin_unpin) {
swift_unpin(object);
EXPECT_EQ(1u, value);
}
TEST(RefcountingTest, retain_release_n) {
size_t value = 0;
auto object = allocTestObject(&value, 1);
EXPECT_EQ(0u, value);
auto retainResult = swift_retain_n(object, 32);
EXPECT_EQ(object, retainResult);
retainResult = swift_retain(object);
EXPECT_EQ(object, retainResult);
EXPECT_EQ(0u, value);
EXPECT_EQ(34u, swift_retainCount(object));
swift_release_n(object, 31);
EXPECT_EQ(0u, value);
EXPECT_EQ(3u, swift_retainCount(object));
swift_release(object);
EXPECT_EQ(0u, value);
EXPECT_EQ(2u, swift_retainCount(object));
swift_release_n(object, 1);
EXPECT_EQ(0u, value);
EXPECT_EQ(1u, swift_retainCount(object));
swift_release(object);
EXPECT_EQ(1u, value);
}