diff --git a/arc.m b/arc.m index 8875bcc..d12a044 100644 --- a/arc.m +++ b/arc.m @@ -175,6 +175,45 @@ static const long weak_mask = ((size_t)1)<<((sizeof(size_t)*8)-1); */ static const long refcount_mask = ~weak_mask; +id objc_retain_fast_np(id obj) +{ + uintptr_t *refCount = ((uintptr_t*)obj) - 1; + uintptr_t refCountVal = __sync_fetch_and_add(refCount, 0); + uintptr_t newVal = refCountVal; + do { + refCountVal = newVal; + long realCount = refCountVal & refcount_mask; + // If this object's reference count is already less than 0, then + // this is a spurious retain. This can happen when one thread is + // attempting to acquire a strong reference from a weak reference + // and the other thread is attempting to destroy it. The + // deallocating thread will decrement the reference count with no + // locks held and will then acquire the weak ref table lock and + // attempt to zero the weak references. The caller of this will be + // `objc_loadWeakRetained`, which will also hold the lock. If the + // serialisation is such that the locked retain happens after the + // decrement, then we return nil here so that the weak-to-strong + // transition doesn't happen and the object is actually destroyed. + // If the serialisation happens the other way, then the locked + // check of the reference count will happen after we've referenced + // this and we don't zero the references or deallocate. + if (realCount < 0) + { + return nil; + } + // If the reference count is saturated, don't increment it. + if (realCount == refcount_mask) + { + return obj; + } + realCount++; + realCount |= refCountVal & weak_mask; + uintptr_t updated = (uintptr_t)realCount; + newVal = __sync_val_compare_and_swap(refCount, refCountVal, updated); + } while (newVal != refCountVal); + return obj; +} + static inline id retain(id obj) { if (isSmallObject(obj)) { return obj; } @@ -186,43 +225,55 @@ static inline id retain(id obj) } if (objc_test_class_flag(cls, objc_class_flag_fast_arc)) { - uintptr_t *refCount = ((uintptr_t*)obj) - 1; - uintptr_t refCountVal = __sync_fetch_and_add(refCount, 0); - uintptr_t newVal = refCountVal; - do { - refCountVal = newVal; - long realCount = refCountVal & refcount_mask; - // If this object's reference count is already less than 0, then - // this is a spurious retain. This can happen when one thread is - // attempting to acquire a strong reference from a weak reference - // and the other thread is attempting to destroy it. The - // deallocating thread will decrement the reference count with no - // locks held and will then acquire the weak ref table lock and - // attempt to zero the weak references. The caller of this will be - // `objc_loadWeakRetained`, which will also hold the lock. If the - // serialisation is such that the locked retain happens after the - // decrement, then we return nil here so that the weak-to-strong - // transition doesn't happen and the object is actually destroyed. - // If the serialisation happens the other way, then the locked - // check of the reference count will happen after we've referenced - // this and we don't zero the references or deallocate. - if (realCount < 0) - { - return nil; - } - // If the reference count is saturated, don't increment it. - if (realCount == refcount_mask) + return objc_retain_fast_np(obj); + } + return [obj retain]; +} + +BOOL objc_release_fast_no_destroy_np(id obj) +{ + uintptr_t *refCount = ((uintptr_t*)obj) - 1; + uintptr_t refCountVal = __sync_fetch_and_add(refCount, 0); + uintptr_t newVal = refCountVal; + bool isWeak; + bool shouldFree; + do { + refCountVal = newVal; + size_t realCount = refCountVal & refcount_mask; + // If the reference count is saturated, don't decrement it. + if (realCount == refcount_mask) + { + return NO; + } + realCount--; + isWeak = (refCountVal & weak_mask) == weak_mask; + shouldFree = realCount == -1; + realCount |= refCountVal & weak_mask; + uintptr_t updated = (uintptr_t)realCount; + newVal = __sync_val_compare_and_swap(refCount, refCountVal, updated); + } while (newVal != refCountVal); + // We allow refcounts to run into the negative, but should only + // deallocate once. + if (shouldFree) + { + if (isWeak) + { + if (!objc_delete_weak_refs(obj)) { - return obj; + return NO; } - realCount++; - realCount |= refCountVal & weak_mask; - uintptr_t updated = (uintptr_t)realCount; - newVal = __sync_val_compare_and_swap(refCount, refCountVal, updated); - } while (newVal != refCountVal); - return obj; + } + return YES; + } + return NO; +} + +void objc_release_fast_np(id obj) +{ + if (objc_release_fast_no_destroy_np(obj)) + { + [obj dealloc]; } - return [obj retain]; } static inline void release(id obj) @@ -241,39 +292,7 @@ static inline void release(id obj) } if (objc_test_class_flag(cls, objc_class_flag_fast_arc)) { - uintptr_t *refCount = ((uintptr_t*)obj) - 1; - uintptr_t refCountVal = __sync_fetch_and_add(refCount, 0); - uintptr_t newVal = refCountVal; - bool isWeak; - bool shouldFree; - do { - refCountVal = newVal; - size_t realCount = refCountVal & refcount_mask; - // If the reference count is saturated, don't decrement it. - if (realCount == refcount_mask) - { - return; - } - realCount--; - isWeak = (refCountVal & weak_mask) == weak_mask; - shouldFree = realCount == -1; - realCount |= refCountVal & weak_mask; - uintptr_t updated = (uintptr_t)realCount; - newVal = __sync_val_compare_and_swap(refCount, refCountVal, updated); - } while (newVal != refCountVal); - // We allow refcounts to run into the negative, but should only - // deallocate once. - if (shouldFree) - { - if (isWeak) - { - if (!objc_delete_weak_refs(obj)) - { - return; - } - } - [obj dealloc]; - } + objc_release_fast_np(obj); return; } [obj release]; @@ -375,7 +394,6 @@ unsigned long objc_arc_autorelease_count_for_object_np(id obj) return count; } - void *objc_autoreleasePoolPush(void) { initAutorelease(); diff --git a/objc/objc-arc.h b/objc/objc-arc.h index 203cfef..75dde84 100644 --- a/objc/objc-arc.h +++ b/objc/objc-arc.h @@ -32,6 +32,12 @@ id objc_loadWeakRetained(id* obj); * Retains the argument. Equivalent to [obj retain]. */ id objc_retain(id obj); +/** + * Retains the argument, assuming that the argument is a normal object and has + * its reference count managed by the runtime. + * This is intended to implement `-retain` in ARC-compatible root classes. + */ +id objc_retain_fast_np(id obj) OBJC_NONPORTABLE; /** * Retains and autoreleases an object. Equivalent to [[obj retain] autorelease]. */ @@ -85,6 +91,26 @@ void objc_destroyWeak(id* addr); * Equivalent to objc_copyWeak(), but may also set src to nil. */ void objc_moveWeak(id *dest, id *src); +/** + * Releases the argument, assuming that the argument is a normal object and has + * its reference count managed by the runtime. If the retain count reaches + * zero then all weak references will be zeroed and the object will be + * destroyed. + * + * This is intended to implement `-release` in ARC-compatible root + * classes. + */ +void objc_release_fast_np(id obj) OBJC_NONPORTABLE; +/** + * Releases the argument, assuming that the argument is a normal object and has + * its reference count managed by the runtime. If the retain count reaches + * zero then all weak references will be zeroed but the object will *NOT* be + * destroyed. + * + * This is intended to implement `NSDecrementExtraRefCountWasZero` for use with + * ARC-compatible classes. + */ +BOOL objc_release_fast_no_destroy_np(id obj) OBJC_NONPORTABLE; /** * Releases an object. Equivalent to [obj release]. */