Use typed allocations for the hash table entries. This makes sure that the GC won't treat the jump table bitfield as a pointer.

Store the refcount structures inside the hash table, rather than in a chained structure.  This uses less space and should be easier for the GC to scan (less cache used).
main
theraven 15 years ago
parent 32b4432ebc
commit 0c72e2e418

@ -11,6 +11,9 @@ release include:
collected environment. This uses the Boehm garbage collector and is enabled
by default. To build without garbage collection specify the boehm_gc=no
option when building.
- The runtime will now use Boehm GC for several internal data structures, if it
is built with GC enabled. This avoids the need for defensive programming
with respect to thread safety in several places.
- This is the first release to provide a superset of the functionality provided
by the Mac Objective-C runtime, as shipped with OS X 10.6.
- The LLVM optimisation passes have been improved and better tested. Code

@ -355,17 +355,17 @@ void objc_assertRegisteredThreadWithCollector()
/**
* Structure stored for each GC
*/
struct gc_refcount
static struct gc_refcount
{
/** Reference count */
int refCount;
intptr_t refCount;
/** Strong pointer */
id ptr;
};
} null_refcount = {0};
static int refcount_compare(const void *ptr, const struct gc_refcount *rc)
static int refcount_compare(const void *ptr, struct gc_refcount rc)
{
return ptr == rc->ptr;
return ptr == rc.ptr;
}
static uint32_t ptr_hash(const void *ptr)
{
@ -373,14 +373,23 @@ static uint32_t ptr_hash(const void *ptr)
// always be 0, which is not so useful for a hash value
return ((uintptr_t)ptr >> 4) | ((uintptr_t)ptr << ((sizeof(id) * 8) - 4));
}
static uint32_t refcount_hash(const struct gc_refcount *rc)
static uint32_t refcount_hash(struct gc_refcount rc)
{
return ptr_hash(rc.ptr);
}
static int isEmpty(struct gc_refcount rc)
{
return ptr_hash(rc->ptr);
return rc.ptr == NULL;
}
#define MAP_TABLE_VALUE_NULL isEmpty
#define MAP_TABLE_NAME refcount
#define MAP_TABLE_COMPARE_FUNCTION refcount_compare
#define MAP_TABLE_HASH_KEY ptr_hash
#define MAP_TABLE_HASH_VALUE refcount_hash
#define MAP_TABLE_VALUE_TYPE struct gc_refcount
#define MAP_TABLE_VALUE_PLACEHOLDER null_refcount
#define MAP_TABLE_TYPES_BITMAP (1<<(offsetof(struct gc_refcount, ptr) / sizeof(void*)))
#define MAP_TABLE_ACCESS_BY_REFERENCE
#include "hash_table.h"
static refcount_table *refcounts;
@ -394,10 +403,8 @@ id objc_gc_retain(id object)
refcount = refcount_table_get(refcounts, object);
if (NULL == refcount)
{
refcount = GC_MALLOC_UNCOLLECTABLE(sizeof(struct gc_refcount));
refcount->ptr = object;
refcount->refCount = 1;
refcount_insert(refcounts, refcount);
struct gc_refcount rc = { 1, object};
refcount_insert(refcounts, rc);
return object;
}
}
@ -413,20 +420,14 @@ void objc_gc_release(id object)
if (0 == __sync_sub_and_fetch(&(refcount->refCount), 1))
{
LOCK_FOR_SCOPE(&(refcounts->lock));
refcount_remove(refcounts, object);
refcount->ptr = 0;
__sync_synchronize();
// If another thread has incremented the reference count while we were
// doing this, then we need to add the count back into the table,
// otherwise we can carry on.
if (__sync_bool_compare_and_swap(&(refcount->refCount), 0, 0))
if (!__sync_bool_compare_and_swap(&(refcount->refCount), 0, 0))
{
// This doesn't free the object, it just removes the explicit
// reference
GC_free(refcount);
}
else
{
refcount_insert(refcounts, refcount);
refcount->ptr = object;
}
}
}

@ -25,11 +25,14 @@
#ifdef ENABLE_GC
# include <gc/gc.h>
# include <gc/gc_typed.h>
# define CALLOC(x,y) GC_MALLOC(x*y)
# define IF_NO_GC(x)
# define IF_GC(x) x
#else
# define CALLOC(x,y) calloc(x,y)
# define IF_NO_GC(x) x
# define IF_GC(x)
#endif
#ifndef MAP_TABLE_NAME
@ -72,6 +75,7 @@ static BOOL PREFIX(_is_null)(void *value)
{
return value == NULL;
}
# define MAP_TABLE_TYPES_BITMAP 1
# define MAP_TABLE_VALUE_NULL PREFIX(_is_null)
# define MAP_TABLE_VALUE_PLACEHOLDER NULL
#endif
@ -105,17 +109,37 @@ typedef struct PREFIX(_table_struct)
unsigned int table_size;
unsigned int table_used;
IF_NO_GC(unsigned int enumerator_count;)
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
GC_descr descr;
# endif
struct PREFIX(_table_struct) *old;
struct PREFIX(_table_cell_struct) *table;
} PREFIX(_table);
struct PREFIX(_table_cell_struct) *PREFIX(alloc_cells)(PREFIX(_table) *table, int count)
{
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
return GC_CALLOC_EXPLICITLY_TYPED(count,
sizeof(struct PREFIX(_table_cell_struct)), table->descr);
# else
return CALLOC(count, sizeof(struct PREFIX(_table_cell_struct)));
# endif
}
PREFIX(_table) *PREFIX(_create)(uint32_t capacity)
{
PREFIX(_table) *table = CALLOC(1, sizeof(PREFIX(_table)));
# ifndef MAP_TABLE_NO_LOCK
INIT_LOCK(table->lock);
# endif
table->table = calloc(capacity, sizeof(struct PREFIX(_table_cell_struct)));
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
// The low word in the bitmap stores the offsets of the next entries
GC_word bitmap = (MAP_TABLE_TYPES_BITMAP << 1);
table->descr = GC_make_descriptor(&bitmap,
sizeof(struct PREFIX(_table_cell_struct)) / sizeof (void*));
# endif
table->table = PREFIX(alloc_cells)(table, capacity);
table->table_size = capacity;
return table;
}
@ -170,11 +194,8 @@ static int PREFIX(_insert)(PREFIX(_table) *table, MAP_TABLE_VALUE_TYPE value);
static int PREFIX(_table_resize)(PREFIX(_table) *table)
{
// Note: We multiply the table size, rather than the count, by two so that
// we get overflow checking in calloc. Two times the size of a cell will
// never overflow, but two times the table size might.
struct PREFIX(_table_cell_struct) *newArray = CALLOC(table->table_size, 2 *
sizeof(struct PREFIX(_table_cell_struct)));
struct PREFIX(_table_cell_struct) *newArray =
PREFIX(alloc_cells)(table, table->table_size * 2);
if (NULL == newArray) { return 0; }
// Allocate a new table structure and move the array into that. Now
@ -565,3 +586,5 @@ PREFIX(_current)(PREFIX(_table) *table,
#undef CALLOC
#undef IF_NO_GC
#undef IF_GC
#undef MAP_TABLE_TYPES_BITMAP

Loading…
Cancel
Save