remove cmake

main
sandyx86 1 year ago
parent 164f3744fd
commit 9206f48bce

@ -0,0 +1,54 @@
SHELL=/bin/sh
CC=clang
ifeq ($(CC),clang)
CCX=clan
endif
OS=
ifeq ($(OS),windows)
CC=x86_64-w64-mingw32-gcc
endif
LIB_NAME=objc2
SRC=src
BUILD=build
INCLUDE= -I include -I .
CFLAGS= -std=gnu11
CPPFLAGS= -I /usr/include/x86_64-linux-gnu/c++/11 -I /usr/include/c++/11
C_FILES := $(shell find $(SRC) -name '*.c')
M_FILES := $(shell find $(SRC) -name '*.m')
CPP_FILES := $(shell find $(SRC) -name '*.cpp')
MM_FILES := $(shell find $(SRC) -name '*.mm')
O_FILES := $(patsubst $(SRC)/%.c, $(BUILD)/%.o, $(C_FILES))
O_FILES += $(patsubst $(SRC)/%.m, $(BUILD)/%.o, $(M_FILES))
O_FILES += $(patsubst $(SRC)/%.cc, $(BUILD)/%.o, $(CPP_FILES))
O_FILES += $(patsubst $(SRC)/%.mm, $(BUILD)/%.o, $(MM_FILES))
.PHONY: all clean dll
all: $(LIB_NAME)
dll: $(O_FILES)
$(CC) $< -shared -o objc2.dll
$(LIB_NAME): $(O_FILES)
ar rcs lib$(LIB_NAME).a $(O_FILES)
$(BUILD)/%.o: $(SRC)/%.c
$(CC) $(CFLAGS) $(INCLUDE) -c $< -o $@
$(BUILD)/%.o: $(SRC)/%.m
$(CC) -fobjc-exceptions $(CFLAGS) -DEMBEDDED_BLOCKS_RUNTIME $(INCLUDE) -c $< -o $@
$(BUILD)/%.o: $(SRC)/%.cc
$(CCX)g++ $(CPPFLAGS) $(INCLUDE) -c $< -o $@
$(BUILD)/%.o: $(SRC)/%.mm
$(CCX)g++ $(CPPFLAGS) $(INCLUDE) -c $< -o $@
clean: build
rm $(BUILD)/*.o

@ -0,0 +1,5 @@
//just a list of dependencies i had to install:
gcc-multilib
g++-multilib
robin-map-dev

@ -0,0 +1 @@
#include <objc/blocks_runtime.h>

@ -0,0 +1 @@
#include <objc/blocks_private.h>

@ -0,0 +1,27 @@
/** Declaration of a helper function for getting class references from aliases.
Copyright (c) 2011 Free Software Foundation, Inc.
Written by: Niels Grewe <niels.grewe@halbordnung.de>
Created: March 2011
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "objc/runtime.h"
OBJC_PUBLIC Class alias_getClass(const char *alias_name);

@ -0,0 +1,29 @@
#ifdef __LP64__
#define DTABLE_OFFSET 64
#define SMALLOBJ_BITS 3
#define SHIFT_OFFSET 0
#define DATA_OFFSET 8
#define SLOT_OFFSET 0
#elif defined(_WIN64)
// long is 32 bits on Win64, so struct objc_class is smaller. All other offsets are the same.
#define DTABLE_OFFSET 56
#define SMALLOBJ_BITS 3
#define SHIFT_OFFSET 0
#define DATA_OFFSET 8
#define SLOT_OFFSET 0
#else
#define DTABLE_OFFSET 32
#define SMALLOBJ_BITS 1
#define SHIFT_OFFSET 0
#define DATA_OFFSET 8
#define SLOT_OFFSET 0
#endif
#define SMALLOBJ_MASK ((1<<SMALLOBJ_BITS) - 1)
// Page size configuration
#if defined(__powerpc64__)
# define PAGE_SHIFT 16
#else
# define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (1<<PAGE_SHIFT)

@ -0,0 +1,125 @@
/**
* Block descriptor flags.
*/
enum block_flags
{
/**
* The block descriptor contains copy and dispose helpers.
*/
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
/**
* The helpers have C++ code.
*/
BLOCK_HAS_CTOR = (1 << 26),
/**
* Block is stored in global memory and does not need to be copied.
*/
BLOCK_IS_GLOBAL = (1 << 28),
/**
* Block function uses a calling convention that returns a structure via a
* pointer passed in by the caller.
*/
BLOCK_USE_SRET = (1 << 29),
/**
* Block has an Objective-C type encoding.
*/
BLOCK_HAS_SIGNATURE = (1 << 30),
/**
* Mask for the reference count in byref structure's flags field. The low
* 3 bytes are reserved for the reference count, the top byte for the
* flags.
*/
BLOCK_REFCOUNT_MASK = 0x00ffffff
};
/**
* Flags used in the final argument to _Block_object_assign() and
* _Block_object_dispose(). These indicate the type of copy or dispose to
* perform.
*/
enum
{
/**
* The value is of some id-like type, and should be copied as an
* Objective-C object: i.e. by sending -retain or via the GC assign
* functions in GC mode (not yet supported).
*/
BLOCK_FIELD_IS_OBJECT = 3,
/**
* The field is a block. This must be copied by the block copy functions.
*/
BLOCK_FIELD_IS_BLOCK = 7,
/**
* The field is an indirect reference to a variable declared with the
* __block storage qualifier.
*/
BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable
BLOCK_FIELD_IS_WEAK = 16, // declared __weak
BLOCK_BYREF_CALLER = 128, // called from byref copy/dispose helpers
};
#define IS_SET(x, y) ((x & y) == y)
/*
* Include the block_descriptor_copydispose and block_literal definitions that
* are also made public under different names for use in libdispatch.
*/
#include "objc/blocks_private.h"
/**
* Block descriptor that does not contain copy and dispose helper functions.
*/
struct Block_descriptor_basic
{
/**
* Reserved for future use, currently always 0.
*/
unsigned long int reserved;
/** Size of the block. */
unsigned long int size;
/**
* Objective-C type encoding of the block.
*/
const char *encoding;
};
/**
* Structure used for on-stack variables that are referenced by blocks.
*/
struct block_byref_obj
{
/**
* Class pointer. Currently unused and always NULL. Could be used in the
* future to support introspection.
*/
void *isa;
/**
* The pointer to the structure that contains the real version of the data.
* All accesses go via this pointer. If an on-stack byref structure is
* copied to the heap, then its forwarding pointer should point to the heap
* version. Otherwise it should point to itself.
*/
struct block_byref_obj *forwarding;
/**
* Flags and reference count.
*/
int flags; //refcount;
/**
* Size of this structure.
*/
int size;
/**
* Copy function.
*/
void (*byref_keep)(struct block_byref_obj *dst, const struct block_byref_obj *src);
/**
* Dispose function.
*/
void (*byref_dispose)(struct block_byref_obj *);
/**
* __block-qualified variables are copied here.
*/
};

@ -0,0 +1,62 @@
/**
* buffer.h defines a simple dynamic array that is used to store temporary
* values for later processing. Define BUFFER_TYPE before including this file.
*/
#include <stdlib.h>
#define BUFFER_SIZE 128
static BUFFER_TYPE buffered_object_buffer[BUFFER_SIZE];
static BUFFER_TYPE *buffered_object_overflow;
static int buffered_objects;
static int buffered_object_overflow_space;
static void set_buffered_object_at_index(BUFFER_TYPE cat, unsigned int i)
{
if (i < BUFFER_SIZE)
{
buffered_object_buffer[i] = cat;
}
else
{
i -= BUFFER_SIZE;
if (NULL == buffered_object_overflow)
{
buffered_object_overflow =
calloc(BUFFER_SIZE, sizeof(BUFFER_TYPE));
buffered_object_overflow_space = BUFFER_SIZE;
}
while (i >= buffered_object_overflow_space)
{
buffered_object_overflow_space <<= 1;
buffered_object_overflow = realloc(buffered_object_overflow,
buffered_object_overflow_space * sizeof(BUFFER_TYPE));
}
buffered_object_overflow[i] = cat;
}
}
static BUFFER_TYPE buffered_object_at_index(unsigned int i)
{
if (i<BUFFER_SIZE)
{
return buffered_object_buffer[i];
}
return buffered_object_overflow[i-BUFFER_SIZE];
}
static void compact_buffer(void)
{
// Move up all of the non-NULL pointers
unsigned size = buffered_objects;
unsigned insert = 0;
for (unsigned i=0 ; i<size ; i++)
{
BUFFER_TYPE c = buffered_object_at_index(i);
if (c != NULL)
{
set_buffered_object_at_index(c, insert++);
}
}
buffered_objects = insert;
}

@ -0,0 +1,62 @@
#pragma once
/**
* The structure used to represent a category.
*
* This provides a set of new definitions that are used to replace those
* contained within a class.
*/
struct objc_category
{
/**
* The name of this category.
*/
const char *name;
/**
* The name of the class to which this category should be applied.
*/
const char *class_name;
/**
* The list of instance methods to add to the class.
*/
struct objc_method_list *instance_methods;
/**
* The list of class methods to add to the class.
*/
struct objc_method_list *class_methods;
/**
* The list of protocols adopted by this category.
*/
struct objc_protocol_list *protocols;
/**
* The list of properties added by this category
*/
struct objc_property_list *properties;
/**
* Class properties.
*/
struct objc_property_list *class_properties;
};
struct objc_category_gcc
{
/**
* The name of this category.
*/
const char *name;
/**
* The name of the class to which this category should be applied.
*/
const char *class_name;
/**
* The list of instance methods to add to the class.
*/
struct objc_method_list_gcc *instance_methods;
/**
* The list of class methods to add to the class.
*/
struct objc_method_list_gcc *class_methods;
/**
* The list of protocols adopted by this category.
*/
struct objc_protocol_list *protocols;
};

@ -0,0 +1,468 @@
#ifndef __OBJC_CLASS_H_INCLUDED
#define __OBJC_CLASS_H_INCLUDED
#include "visibility.h"
#include "objc/runtime.h"
#include <stdint.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Overflow bitfield. Used for bitfields that are more than 63 bits.
*/
struct objc_bitfield
{
/**
* The number of elements in the values array.
*/
int32_t length;
/**
* An array of values. Each 32 bits is stored in the native endian for the
* platform.
*/
int32_t values[0];
};
static inline BOOL objc_bitfield_test(uintptr_t bitfield, uint64_t field)
{
if (bitfield & 1)
{
uint64_t bit = 1<<(field+1);
return (bitfield & bit) == bit;
}
struct objc_bitfield *bf = (struct objc_bitfield*)bitfield;
uint64_t byte = field / 32;
if (byte >= bf->length)
{
return NO;
}
uint64_t bit = 1<<(field%32);
return (bf->values[byte] & bit) == bit;
}
// begin: objc_class
struct objc_class
{
/**
* Pointer to the metaclass for this class. The metaclass defines the
* methods use when a message is sent to the class, rather than an
* instance.
*/
Class isa;
/**
* Pointer to the superclass. The compiler will set this to the name of
* the superclass, the runtime will initialize it to point to the real
* class.
*/
Class super_class;
/**
* The name of this class. Set to the same value for both the class and
* its associated metaclass.
*/
const char *name;
/**
* The version of this class. This is not used by the language, but may be
* set explicitly at class load time.
*/
long version;
/**
* A bitfield containing various flags. See the objc_class_flags
* enumerated type for possible values.
*/
unsigned long info;
/**
* The size of this class. For classes using the non-fragile ABI, the
* compiler will set this to a negative value The absolute value will be
* the size of the instance variables defined on just this class. When
* using the fragile ABI, the instance size is the size of instances of
* this class, including any instance variables defined on superclasses.
*
* In both cases, this will be set to the size of an instance of the class
* after the class is registered with the runtime.
*/
long instance_size;
/**
* Metadata describing the instance variables in this class.
*/
struct objc_ivar_list *ivars;
/**
* Metadata for for defining the mappings from selectors to IMPs. Linked
* list of method list structures, one per class and one per category.
*/
struct objc_method_list *methods;
/**
* The dispatch table for this class. Intialized and maintained by the
* runtime.
*/
void *dtable;
/**
* A pointer to the first subclass for this class. Filled in by the
* runtime.
*/
Class subclass_list;
/**
* Pointer to the .cxx_construct method if one exists. This method needs
* to be called outside of the normal dispatch mechanism.
*/
IMP cxx_construct;
/**
* Pointer to the .cxx_destruct method if one exists. This method needs to
* be called outside of the normal dispatch mechanism.
*/
IMP cxx_destruct;
/**
* A pointer to the next sibling class to this. You may find all
* subclasses of a given class by following the subclass_list pointer and
* then subsequently following the sibling_class pointers in the
* subclasses.
*/
Class sibling_class;
/**
* Metadata describing the protocols adopted by this class. Not used by
* the runtime.
*/
struct objc_protocol_list *protocols;
/**
* Linked list of extra data attached to this class.
*/
struct reference_list *extra_data;
/**
* The version of the ABI used for this class. Currently always zero for v2
* ABI classes.
*/
long abi_version;
/**
* List of declared properties on this class (NULL if none).
*/
struct objc_property_list *properties;
};
// end: objc_class
struct objc_class_gsv1
{
/**
* Pointer to the metaclass for this class. The metaclass defines the
* methods use when a message is sent to the class, rather than an
* instance.
*/
Class isa;
/**
* Pointer to the superclass. The compiler will set this to the name of
* the superclass, the runtime will initialize it to point to the real
* class.
*/
Class super_class;
/**
* The name of this class. Set to the same value for both the class and
* its associated metaclass.
*/
const char *name;
/**
* The version of this class. This is not used by the language, but may be
* set explicitly at class load time.
*/
long version;
/**
* A bitfield containing various flags. See the objc_class_flags
* enumerated type for possible values.
*/
unsigned long info;
/**
* The size of this class. For classes using the non-fragile ABI, the
* compiler will set this to a negative value The absolute value will be
* the size of the instance variables defined on just this class. When
* using the fragile ABI, the instance size is the size of instances of
* this class, including any instance variables defined on superclasses.
*
* In both cases, this will be set to the size of an instance of the class
* after the class is registered with the runtime.
*/
long instance_size;
/**
* Metadata describing the instance variables in this class.
*/
struct objc_ivar_list_gcc *ivars;
/**
* Metadata for for defining the mappings from selectors to IMPs. Linked
* list of method list structures, one per class and one per category.
*/
struct objc_method_list_gcc *methods;
/**
* The dispatch table for this class. Intialized and maintained by the
* runtime.
*/
void *dtable;
/**
* A pointer to the first subclass for this class. Filled in by the
* runtime.
*/
Class subclass_list;
/**
* A pointer to the next sibling class to this. You may find all
* subclasses of a given class by following the subclass_list pointer and
* then subsequently following the sibling_class pointers in the
* subclasses.
*/
Class sibling_class;
/**
* Metadata describing the protocols adopted by this class. Not used by
* the runtime.
*/
struct objc_protocol_list *protocols;
/**
* Linked list of extra data attached to this class.
*/
struct reference_list *extra_data;
/**
* New ABI. The following fields are only available with classes compiled to
* support the new ABI. You may test whether any given class supports this
* ABI by using the CLS_ISNEW_ABI() macro.
*/
/**
* The version of the ABI used for this class. Zero indicates the ABI first
* implemented by clang 1.0. One indicates the presence of bitmaps
* indicating the offsets of strong, weak, and unretained ivars. Two
* indicates that the new ivar structure is used.
*/
long abi_version;
/**
* Array of pointers to variables where the runtime will store the ivar
* offset. These may be used for faster access to non-fragile ivars if all
* of the code is compiled for the new ABI. Each of these pointers should
* have the mangled name __objc_ivar_offset_value_{class name}.{ivar name}
*
* When using the compatible non-fragile ABI, this faster form should only be
* used for classes declared in the same compilation unit.
*
* The compiler should also emit symbols of the form
* __objc_ivar_offset_{class name}.{ivar name} which are pointers to the
* offset values. These should be emitted as weak symbols in every module
* where they are used. The legacy-compatible ABI uses these with a double
* layer of indirection.
*/
int **ivar_offsets;
/**
* List of declared properties on this class (NULL if none). This contains
* the accessor methods for each property.
*/
struct objc_property_list_gsv1 *properties;
/**
* GC / ARC ABI: Fields below this point only exist if abi_version is >= 1.
*/
/**
* The location of all strong pointer ivars declared by this class.
*
* If the low bit of this field is 0, then this is a pointer to an
* objc_bitfield structure. If the low bit is 1, then the remaining 63
* bits are set, from low to high, for each ivar in the object that is a
* strong pointer.
*/
uintptr_t strong_pointers;
/**
* The location of all zeroing weak pointer ivars declared by this class.
* The format of this field is the same as the format of the
* strong_pointers field.
*/
uintptr_t weak_pointers;
};
/**
* Structure representing the GCC ABI class structure. This is only ever
* required so that we can take its size - struct objc_class begins with the
* same fields, and you can test the new abi flag to tell whether it is safe to
* access the subsequent fields.
*/
struct objc_class_gcc
{
Class isa;
Class super_class;
const char *name;
long version;
unsigned long info;
long instance_size;
struct objc_ivar_list_gcc *ivars;
struct objc_method_list *methods;
void *dtable;
Class subclass_list;
Class sibling_class;
struct objc_protocol_list *protocols;
void *gc_object_type;
};
/**
* An enumerated type describing all of the valid flags that may be used in the
* info field of a class.
*/
enum objc_class_flags
{
/** This class structure represents a metaclass. */
objc_class_flag_meta = (1<<0),
/** Reserved for future ABI versions. */
objc_class_flag_reserved1 = (1<<1),
/** Reserved for future ABI versions. */
objc_class_flag_reserved2 = (1<<2),
/** Reserved for future ABI versions. */
objc_class_flag_reserved3 = (1<<3),
/** Reserved for future ABI versions. */
objc_class_flag_reserved4 = (1<<4),
/** Reserved for future ABI versions. */
objc_class_flag_reserved5 = (1<<5),
/** Reserved for future ABI versions. */
objc_class_flag_reserved6 = (1<<6),
/** Reserved for future ABI versions. */
objc_class_flag_reserved7 = (1<<7),
/**
* This class has been sent a +initalize message. This message is sent
* exactly once to every class that is sent a message by the runtime, just
* before the first other message is sent.
*
* For direct method support, this is now part of the public ABI.
*/
objc_class_flag_initialized = (1<<8),
/**
* The class has been initialized by the runtime. Its super_class pointer
* should now point to a class, rather than a C string containing the class
* name, and its subclass and sibling class links will have been assigned,
* if applicable.
*/
objc_class_flag_resolved = (1<<9),
/**
* This class was created at run time and may be freed.
*/
objc_class_flag_user_created = (1<<10),
/**
* Instances of this class are provide ARC-safe retain / release /
* autorelease implementations.
*/
objc_class_flag_fast_arc = (1<<11),
/**
* This class is a hidden class (should not be registered in the class
* table nor returned from object_getClass()).
*/
objc_class_flag_hidden_class = (1<<12),
/**
* This class is a hidden class used to store associated values.
*/
objc_class_flag_assoc_class = (1<<13),
/**
* This class has instances that are never deallocated and are therefore
* safe to store directly into weak variables and to skip all reference
* count manipulations.
*/
objc_class_flag_permanent_instances = (1<<14),
/**
* On a metaclass, guarantees that `+alloc` and `+allocWithZone:` are
* trivial wrappers around `class_createInstance`.
*
* On a class, guarantees that `+init` is trivial.
*/
objc_class_flag_fast_alloc_init = (1<<15),
/**
* The class is a block class. Reference count management must be done by
* the underlying blocks runtime.
*/
objc_class_flag_is_block = (1 << 16),
};
/**
* Sets the specific class flag. Note: This is not atomic.
*/
static inline void objc_set_class_flag(Class aClass,
enum objc_class_flags flag)
{
aClass->info |= (unsigned long)flag;
}
/**
* Unsets the specific class flag. Note: This is not atomic.
*/
static inline void objc_clear_class_flag(Class aClass,
enum objc_class_flags flag)
{
aClass->info &= ~(unsigned long)flag;
}
/**
* Checks whether a specific class flag is set.
*/
static inline BOOL objc_test_class_flag(Class aClass,
enum objc_class_flags flag)
{
return (aClass->info & (unsigned long)flag) == (unsigned long)flag;
}
/**
* Adds a class to the class table.
*/
void class_table_insert(Class cls);
/**
* Removes a class from the class table. Must be called with the runtime lock
* held!
*/
void class_table_remove(Class cls);
/**
* Array of classes used for small objects. Small objects are embedded in
* their pointer. In 32-bit mode, we have one small object class (typically
* used for storing 31-bit signed integers. In 64-bit mode then we can have 7,
* because classes are guaranteed to be word aligned.
*/
extern Class SmallObjectClasses[7];
static BOOL isSmallObject(id obj)
{
uintptr_t addr = ((uintptr_t)obj);
return (addr & OBJC_SMALL_OBJECT_MASK) != 0;
}
__attribute__((always_inline))
static inline Class classForObject(id obj)
{
if (UNLIKELY(isSmallObject(obj)))
{
if (sizeof(Class) == 4)
{
return SmallObjectClasses[0];
}
else
{
uintptr_t addr = ((uintptr_t)obj);
return SmallObjectClasses[(addr & OBJC_SMALL_OBJECT_MASK)];
}
}
return obj->isa;
}
static inline BOOL classIsOrInherits(Class cls, Class base)
{
for (Class c = cls ;
Nil != c ;
c = c->super_class)
{
if (c == base) { return YES; }
}
return NO;
}
/**
* Free the instance variable lists associated with a class.
*/
void freeIvarLists(Class aClass);
/**
* Free the method lists associated with a class.
*/
void freeMethodLists(Class aClass);
#ifdef __cplusplus
} // extern "C"
#endif
#endif //__OBJC_CLASS_H_INCLUDED

@ -0,0 +1,9 @@
#ifndef CONSTANT_STRING_CLASS
# ifdef GNUSTEP
# define CONSTANT_STRING_CLASS "NSConstantString"
# elifdef YESLIB
# define CONSTANT_STRING_CLASS "YSConstantString"
# else
# define CONSTANT_STRING_CLASS "NXConstantString"
# endif
#endif

@ -0,0 +1,141 @@
#include "lock.h"
#include "class.h"
#include "sarray2.h"
#include "objc/slot.h"
#include "visibility.h"
#include <stdint.h>
#include <stdio.h>
#ifdef __OBJC_LOW_MEMORY__
typedef struct objc_dtable* dtable_t;
struct objc_slot* objc_dtable_lookup(dtable_t dtable, uint32_t uid);
#else
typedef SparseArray* dtable_t;
# define objc_dtable_lookup SparseArrayLookup
#endif
/**
* Pointer to the sparse array representing the pretend (uninstalled) dtable.
*/
PRIVATE extern dtable_t uninstalled_dtable;
/**
* Structure for maintaining a linked list of temporary dtables. When sending
* an +initialize message to a class, we create a temporary dtables and store
* it in a linked list. This is then used when sending other messages to
* instances of classes in the middle of initialisation.
*/
typedef struct _InitializingDtable
{
/** The class that owns the dtable. */
Class class;
/** The dtable for this class. */
dtable_t dtable;
/** The next uninstalled dtable in the list. */
struct _InitializingDtable *next;
} InitializingDtable;
/** Head of the list of temporary dtables. Protected by initialize_lock. */
extern InitializingDtable *temporary_dtables;
extern mutex_t initialize_lock;
/**
* Returns whether a class has an installed dtable.
*/
static inline int classHasInstalledDtable(struct objc_class *cls)
{
return (cls->dtable != uninstalled_dtable);
}
OBJC_PUBLIC
int objc_sync_enter(id object);
OBJC_PUBLIC
int objc_sync_exit(id object);
/**
* Returns the dtable for a given class. If we are currently in an +initialize
* method then this will block if called from a thread other than the one
* running the +initialize method.
*/
static inline dtable_t dtable_for_class(Class cls)
{
if (classHasInstalledDtable(cls))
{
return cls->dtable;
}
dtable_t dtable = uninstalled_dtable;
{
LOCK_FOR_SCOPE(&initialize_lock);
if (classHasInstalledDtable(cls))
{
return cls->dtable;
}
/* This is a linear search, and so, in theory, could be very slow. It
* is O(n) where n is the number of +initialize methods on the stack.
* In practice, this is a very small number. Profiling with GNUstep
* showed that this peaks at 8. */
InitializingDtable *buffer = temporary_dtables;
while (NULL != buffer)
{
if (buffer->class == cls)
{
dtable = buffer->dtable;
break;
}
buffer = buffer->next;
}
}
if (dtable != uninstalled_dtable)
{
// Make sure that we block if +initialize is still running. We do this
// after we've released the initialize lock, so that the real dtable
// can be installed. This acquires / releases a recursive mutex, so if
// this mutex is already held by this thread then this will proceed
// immediately. If it's held by another thread (i.e. the one running
// +initialize) then we block here until it's run. We don't need to do
// this if the dtable is the uninstalled dtable, because that means
// +initialize has not yet been sent, so we can wait until something
// triggers it before needing any synchronisation.
objc_sync_enter((id)cls);
objc_sync_exit((id)cls);
}
return dtable;
}
/**
* Returns whether a class has had a dtable created. The dtable may be
* installed, or stored in the look-aside buffer.
*/
static inline int classHasDtable(struct objc_class *cls)
{
return (dtable_for_class(cls) != uninstalled_dtable);
}
/**
* Updates the dtable for a class and its subclasses. Must be called after
* modifying a class's method list.
*/
void objc_update_dtable_for_class(Class);
/**
* Updates the dtable for a class and its subclasses. Must be called after
* changing and initializing a class's superclass.
*/
void objc_update_dtable_for_new_superclass(Class, Class);
/**
* Adds a single method list to a class. This is used when loading categories,
* and is faster than completely rebuilding the dtable.
*/
void add_method_list_to_class(Class cls,
struct objc_method_list *list);
/**
* Destroys a dtable.
*/
void free_dtable(dtable_t dtable);
/**
* Checks whether the class supports ARC. This can be used before the dtable
* is installed.
*/
void checkARCAccessorsSlow(Class cls);

@ -0,0 +1,327 @@
/**
* This file is Copyright PathScale 2010. Permission granted to distribute
* according to the terms of the MIT license (see COPYING.MIT)
*/
#include <assert.h>
#include <stdint.h>
// _GNU_SOURCE must be defined for unwind.h to expose some of the functions
// that we want. If it isn't, then we define it and undefine it to make sure
// that it doesn't impact the rest of the program.
#ifndef _GNU_SOURCE
# define _GNU_SOURCE 1
# include "unwind.h"
# undef _GNU_SOURCE
#else
# include "unwind.h"
#endif
/**
* Type used to store pointers to values computed by DWARF expressions.
*/
typedef unsigned char *dw_eh_ptr_t;
// Flag indicating a signed quantity
#define DW_EH_PE_signed 0x08
/// DWARF data encoding types
enum dwarf_data_encoding
{
// Unsigned, little-endian, base 128-encoded (variable length)
DW_EH_PE_uleb128 = 0x01,
// uint16
DW_EH_PE_udata2 = 0x02,
// uint32
DW_EH_PE_udata4 = 0x03,
// uint64
DW_EH_PE_udata8 = 0x04,
// Signed versions of the above:
DW_EH_PE_sleb128 = DW_EH_PE_uleb128 | DW_EH_PE_signed,
DW_EH_PE_sdata2 = DW_EH_PE_udata2 | DW_EH_PE_signed,
DW_EH_PE_sdata4 = DW_EH_PE_udata4 | DW_EH_PE_signed,
DW_EH_PE_sdata8 = DW_EH_PE_udata8 | DW_EH_PE_signed
};
static inline enum dwarf_data_encoding get_encoding(unsigned char x)
{
return (enum dwarf_data_encoding)(x & 0xf);
}
enum dwarf_data_relative
{
// Value is omitted
DW_EH_PE_omit = 0xff,
// Absolute pointer value
DW_EH_PE_absptr = 0x00,
// Value relative to program counter
DW_EH_PE_pcrel = 0x10,
// Value relative to the text segment
DW_EH_PE_textrel = 0x20,
// Value relative to the data segment
DW_EH_PE_datarel = 0x30,
// Value relative to the start of the function
DW_EH_PE_funcrel = 0x40,
// Aligned pointer (Not supported yet - are they actually used?)
DW_EH_PE_aligned = 0x50,
// Pointer points to address of real value
DW_EH_PE_indirect = 0x80
};
static inline enum dwarf_data_relative get_base(unsigned char x)
{
return (enum dwarf_data_relative)(x & 0x70);
}
static int is_indirect(unsigned char x)
{
return (x & DW_EH_PE_indirect);
}
static inline int dwarf_size_of_fixed_size_field(unsigned char type)
{
// Low three bits indicate size...
switch (type & 7)
{
case DW_EH_PE_udata2: return 2;
case DW_EH_PE_udata4: return 4;
case DW_EH_PE_udata8: return 8;
case DW_EH_PE_absptr: return sizeof(void*);
}
abort();
}
/**
* Read an unsigned, little-endian, base-128, DWARF value. Updates *data to
* point to the end of the value.
*/
static uint64_t read_leb128(unsigned char** data, int *b)
{
uint64_t uleb = 0;
unsigned int bit = 0;
unsigned char digit = 0;
// We have to read at least one octet, and keep reading until we get to one
// with the high bit unset
do
{
// This check is a bit too strict - we should also check the highest
// bit of the digit.
assert(bit < sizeof(uint64_t) * 8);
// Get the base 128 digit
digit = (**data) & 0x7f;
// Add it to the current value
uleb += digit << bit;
// Increase the shift value
bit += 7;
// Proceed to the next octet
(*data)++;
// Terminate when we reach a value that does not have the high bit set
// (i.e. which was not modified when we mask it with 0x7f)
} while ((*(*data - 1)) != digit);
*b = bit;
return uleb;
}
static int64_t read_uleb128(unsigned char** data)
{
int b;
return read_leb128(data, &b);
}
static int64_t read_sleb128(unsigned char** data)
{
int bits;
// Read as if it's signed
uint64_t uleb = read_leb128(data, &bits);
// If the most significant bit read is 1, then we need to sign extend it
if (uleb >> (bits-1) == 1)
{
// Sign extend by setting all bits in front of it to 1
uleb |= ((int64_t)-1) << bits;
}
return (int64_t)uleb;
}
static uint64_t read_value(char encoding, unsigned char **data)
{
enum dwarf_data_encoding type = get_encoding(encoding);
uint64_t v;
switch ((int)type)
{
// Read fixed-length types
#define READ(dwarf, type) \
case dwarf:\
v = (uint64_t)(*(type*)(*data));\
*data += sizeof(type);\
break;
READ(DW_EH_PE_udata2, uint16_t)
READ(DW_EH_PE_udata4, uint32_t)
READ(DW_EH_PE_udata8, uint64_t)
READ(DW_EH_PE_sdata2, int16_t)
READ(DW_EH_PE_sdata4, int32_t)
READ(DW_EH_PE_sdata8, int64_t)
case DW_EH_PE_absptr:
v = (uint64_t)(*(intptr_t*)(*data));
*data += sizeof(intptr_t);
break;
//READ(DW_EH_PE_absptr, intptr_t)
#undef READ
case DW_EH_PE_sleb128:
v = read_sleb128(data);
break;
case DW_EH_PE_uleb128:
v = read_uleb128(data);
break;
default: abort();
}
return v;
}
static uint64_t resolve_indirect_value(struct _Unwind_Context *c, unsigned char encoding, int64_t v, dw_eh_ptr_t start)
{
switch (get_base(encoding))
{
case DW_EH_PE_pcrel:
v += (uint64_t)(uintptr_t)start;
break;
case DW_EH_PE_textrel:
v += (uint64_t)(uintptr_t)_Unwind_GetTextRelBase(c);
break;
case DW_EH_PE_datarel:
v += (uint64_t)(uintptr_t)_Unwind_GetDataRelBase(c);
break;
case DW_EH_PE_funcrel:
v += (uint64_t)(uintptr_t)_Unwind_GetRegionStart(c);
default:
break;
}
// If this is an indirect value, then it is really the address of the real
// value
// TODO: Check whether this should really always be a pointer - it seems to
// be a GCC extensions, so not properly documented...
if (is_indirect(encoding))
{
v = (uint64_t)(uintptr_t)*(void**)(uintptr_t)v;
}
return v;
}
static inline void read_value_with_encoding(struct _Unwind_Context *context,
dw_eh_ptr_t *data,
uint64_t *out)
{
dw_eh_ptr_t start = *data;
unsigned char encoding = *((*data)++);
// If this value is omitted, skip it and don't touch the output value
if (encoding == DW_EH_PE_omit) { return; }
*out = read_value(encoding, data);
*out = resolve_indirect_value(context, encoding, *out, start);
}
struct dwarf_eh_lsda
{
dw_eh_ptr_t region_start;
dw_eh_ptr_t landing_pads;
dw_eh_ptr_t type_table;
unsigned char type_table_encoding;
dw_eh_ptr_t call_site_table;
dw_eh_ptr_t action_table;
unsigned char callsite_encoding;
};
static inline struct dwarf_eh_lsda parse_lsda(struct _Unwind_Context *context, unsigned char *data)
{
struct dwarf_eh_lsda lsda;
lsda.region_start = (dw_eh_ptr_t)(uintptr_t)_Unwind_GetRegionStart(context);
// If the landing pads are relative to anything other than the start of
// this region, find out where. This is @LPStart in the spec, although the
// encoding that GCC uses does not quite match the spec.
uint64_t v = (uint64_t)(uintptr_t)lsda.region_start;
read_value_with_encoding(context, &data, &v);
lsda.landing_pads = (dw_eh_ptr_t)(uintptr_t)v;
// If there is a type table, find out where it is. This is @TTBase in the
// spec. Note: we find whether there is a type table pointer by checking
// whether the leading byte is DW_EH_PE_omit (0xff), which is not what the
// spec says, but does seem to be how G++ indicates this.
lsda.type_table = 0;
lsda.type_table_encoding = *data++;
if (lsda.type_table_encoding != DW_EH_PE_omit)
{
v = read_uleb128(&data);
dw_eh_ptr_t type_table = data;
type_table += v;
lsda.type_table = type_table;
//lsda.type_table = (uintptr_t*)(data + v);
}
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
lsda.type_table_encoding = (DW_EH_PE_pcrel | DW_EH_PE_indirect);
#endif
lsda.callsite_encoding = (enum dwarf_data_encoding)(*(data++));
// Action table is immediately after the call site table
lsda.action_table = data;
uintptr_t callsite_size = (uintptr_t)read_uleb128(&data);
lsda.action_table = data + callsite_size;
// Call site table is immediately after the header
lsda.call_site_table = (dw_eh_ptr_t)data;
return lsda;
}
struct dwarf_eh_action
{
dw_eh_ptr_t landing_pad;
dw_eh_ptr_t action_record;
};
/**
* Look up the landing pad that corresponds to the current invoke.
*/
__attribute__((unused))
static struct dwarf_eh_action
dwarf_eh_find_callsite(struct _Unwind_Context *context, struct dwarf_eh_lsda *lsda)
{
struct dwarf_eh_action result = { 0, 0 };
uint64_t ip = _Unwind_GetIP(context) - _Unwind_GetRegionStart(context);
unsigned char *callsite_table = (unsigned char*)lsda->call_site_table;
while (callsite_table <= lsda->action_table)
{
// Once again, the layout deviates from the spec.
uint64_t call_site_start, call_site_size, landing_pad, action;
call_site_start = read_value(lsda->callsite_encoding, &callsite_table);
call_site_size = read_value(lsda->callsite_encoding, &callsite_table);
// Call site entries are started
if (call_site_start > ip) { break; }
landing_pad = read_value(lsda->callsite_encoding, &callsite_table);
action = read_uleb128(&callsite_table);
if (call_site_start <= ip && ip <= call_site_start + call_site_size)
{
if (action)
{
// Action records are 1-biased so both no-record and zeroth
// record can be stored.
result.action_record = lsda->action_table + action - 1;
}
// No landing pad means keep unwinding.
if (landing_pad)
{
// Landing pad is the offset from the value in the header
result.landing_pad = lsda->landing_pads + landing_pad;
}
break;
}
}
return result;
}
#define EXCEPTION_CLASS(a,b,c,d,e,f,g,h) ((((uint64_t)a) << 56) + (((uint64_t)b) << 48) + (((uint64_t)c) << 40) + (((uint64_t)d) << 32) + (((uint64_t)e) << 24) + (((uint64_t)f) << 16) + (((uint64_t)g) << 8) + (((uint64_t)h)))

@ -0,0 +1,60 @@
/**
* Garbage collection operations.
*/
struct gc_ops
{
/**
* Initialises this collector.
*/
void (*init)(void);
/**
* Allocates enough space for a class, followed by some extra bytes.
*/
id (*allocate_class)(Class, size_t);
/**
* Frees an object.
*/
void (*free_object)(id);
/**
* Allocates some memory that can be used to store pointers. This must be
* used instead of malloc() for internal data structures that will store
* pointers passed in from outside. The function is expected to zero the
* memory that it returns.
*/
void* (*malloc)(size_t);
/**
* Frees some memory that was previously used to store pointers.
*/
void (*free)(void*);
};
/**
* The mode for garbage collection
*/
enum objc_gc_mode
{
/** This module neither uses, nor supports, garbage collection. */
GC_None = 0,
/**
* This module uses garbage collection, but also sends retain / release
* messages. It can be used with or without GC.
*/
GC_Optional = 1,
/**
* This module expects garbage collection and will break without it.
*/
GC_Required = 2,
/**
* This module was compiled with automatic reference counting. This
* guarantees the use of the non-fragile ABI and means that we could
* potentially support GC, although we don't currently.
*/
GC_ARC = 3
};
/**
* The current set of garbage collector operations to use.
*/
extern struct gc_ops *gc;

@ -0,0 +1,577 @@
/**
* hash_table.h provides a template for implementing hopscotch hash tables.
*
* Several macros must be defined before including this file:
*
* MAP_TABLE_NAME defines the name of the table. All of the operations and
* types related to this table will be prefixed with this value.
*
* MAP_TABLE_COMPARE_FUNCTION defines the function used for testing a key
* against a value in the table for equality. This must take two void*
* arguments. The first is the key and the second is the value.
*
* MAP_TABLE_HASH_KEY and MAP_TABLE_HASH_VALUE define a pair of functions that
* takes a key and a value pointer respectively as their argument and returns
* an int32_t representing the hash.
*
* Optionally, MAP_TABLE_STATIC_SIZE may be defined, to define a table type
* which has a static size.
*/
#include "lock.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef ENABLE_GC
# include <gc/gc.h>
# include <gc/gc_typed.h>
# define CALLOC(x,y) GC_MALLOC(x*y)
# define IF_NO_GC(x)
# define IF_GC(x) x
#else
# define CALLOC(x,y) calloc(x,y)
# define IF_NO_GC(x) x
# define IF_GC(x)
#endif
#ifndef MAP_TABLE_NAME
# error You must define MAP_TABLE_NAME.
#endif
#ifndef MAP_TABLE_COMPARE_FUNCTION
# error You must define MAP_TABLE_COMPARE_FUNCTION.
#endif
#ifndef MAP_TABLE_HASH_KEY
# error You must define MAP_TABLE_HASH_KEY
#endif
#ifndef MAP_TABLE_HASH_VALUE
# error You must define MAP_TABLE_HASH_VALUE
#endif
// Horrible multiple indirection to satisfy the weird precedence rules in cpp
#define REALLY_PREFIX_SUFFIX(x,y) x ## y
#define PREFIX_SUFFIX(x, y) REALLY_PREFIX_SUFFIX(x, y)
/**
* PREFIX(x) macro adds the table name prefix to the argument.
*/
#define PREFIX(x) PREFIX_SUFFIX(MAP_TABLE_NAME, x)
/**
* Map tables are protected by a lock by default. Defining MAP_TABLE_NO_LOCK
* will prevent this and make you responsible for synchronization.
*/
#ifdef MAP_TABLE_NO_LOCK
# define MAP_LOCK()
# define MAP_UNLOCK()
#else
# define MAP_LOCK() (LOCK(&table->lock))
# define MAP_UNLOCK() (UNLOCK(&table->lock))
#endif
#ifndef MAP_TABLE_VALUE_TYPE
# define MAP_TABLE_VALUE_TYPE void*
static BOOL PREFIX(_is_null)(void *value)
{
return value == NULL;
}
# define MAP_TABLE_TYPES_BITMAP 1
# define MAP_TABLE_VALUE_NULL PREFIX(_is_null)
# define MAP_TABLE_VALUE_PLACEHOLDER NULL
#endif
typedef struct PREFIX(_table_cell_struct)
{
uint32_t secondMaps;
MAP_TABLE_VALUE_TYPE value;
} *PREFIX(_table_cell);
#ifdef MAP_TABLE_STATIC_SIZE
typedef struct
{
mutex_t lock;
unsigned int table_used;
IF_NO_GC(unsigned int enumerator_count;)
struct PREFIX(_table_cell_struct) table[MAP_TABLE_STATIC_SIZE];
} PREFIX(_table);
static PREFIX(_table) MAP_TABLE_STATIC_NAME;
# ifndef MAP_TABLE_NO_LOCK
__attribute__((constructor)) void static PREFIX(_table_initializer)(void)
{
INIT_LOCK(MAP_TABLE_STATIC_NAME.lock);
}
# endif
# define TABLE_SIZE(x) MAP_TABLE_STATIC_SIZE
#else
typedef struct PREFIX(_table_struct)
{
mutex_t lock;
unsigned int table_size;
unsigned int table_used;
IF_NO_GC(unsigned int enumerator_count;)
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
GC_descr descr;
# endif
struct PREFIX(_table_struct) *old;
struct PREFIX(_table_cell_struct) *table;
} PREFIX(_table);
static struct PREFIX(_table_cell_struct) *PREFIX(alloc_cells)(PREFIX(_table) *table, int count)
{
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
return GC_CALLOC_EXPLICITLY_TYPED(count,
sizeof(struct PREFIX(_table_cell_struct)), table->descr);
# else
return CALLOC(count, sizeof(struct PREFIX(_table_cell_struct)));
# endif
}
static PREFIX(_table) *PREFIX(_create)(uint32_t capacity)
{
PREFIX(_table) *table = CALLOC(1, sizeof(PREFIX(_table)));
# ifndef MAP_TABLE_NO_LOCK
INIT_LOCK(table->lock);
# endif
# if defined(ENABLE_GC) && defined(MAP_TABLE_TYPES_BITMAP)
// The low word in the bitmap stores the offsets of the next entries
GC_word bitmap = (MAP_TABLE_TYPES_BITMAP << 1);
table->descr = GC_make_descriptor(&bitmap,
sizeof(struct PREFIX(_table_cell_struct)) / sizeof (void*));
# endif
table->table = PREFIX(alloc_cells)(table, capacity);
table->table_size = capacity;
return table;
}
static void PREFIX(_initialize)(PREFIX(_table) **table, uint32_t capacity)
{
#ifdef ENABLE_GC
GC_add_roots(table, table+1);
#endif
*table = PREFIX(_create)(capacity);
}
# define TABLE_SIZE(x) (x->table_size)
#endif
#ifdef MAP_TABLE_STATIC_SIZE
static int PREFIX(_table_resize)(PREFIX(_table) *table)
{
return 0;
}
#else
static int PREFIX(_insert)(PREFIX(_table) *table, MAP_TABLE_VALUE_TYPE value);
static int PREFIX(_table_resize)(PREFIX(_table) *table)
{
struct PREFIX(_table_cell_struct) *newArray =
PREFIX(alloc_cells)(table, table->table_size * 2);
if (NULL == newArray) { return 0; }
// Allocate a new table structure and move the array into that. Now
// lookups will try using that one, if possible.
PREFIX(_table) *copy = CALLOC(1, sizeof(PREFIX(_table)));
memcpy(copy, table, sizeof(PREFIX(_table)));
table->old = copy;
// Now we make the original table structure point to the new (empty) array.
table->table = newArray;
table->table_size *= 2;
// The table currently has no entries; the copy has them all.
table->table_used = 0;
// Finally, copy everything into the new table
// Note: we should really do this in a background thread. At this stage,
// we can do the updates safely without worrying about read contention.
int copied = 0;
for (uint32_t i=0 ; i<copy->table_size ; i++)
{
MAP_TABLE_VALUE_TYPE value = copy->table[i].value;
if (!MAP_TABLE_VALUE_NULL(value))
{
copied++;
PREFIX(_insert)(table, value);
}
}
__sync_synchronize();
table->old = NULL;
# if !defined(ENABLE_GC) && defined(MAP_TABLE_SINGLE_THREAD)
free(copy->table);
free(copy);
# endif
return 1;
}
#endif
struct PREFIX(_table_enumerator)
{
PREFIX(_table) *table;
unsigned int seen;
unsigned int index;
};
static inline PREFIX(_table_cell) PREFIX(_table_lookup)(PREFIX(_table) *table,
uint32_t hash)
{
hash = hash % TABLE_SIZE(table);
return &table->table[hash];
}
static int PREFIX(_table_move_gap)(PREFIX(_table) *table, uint32_t fromHash,
uint32_t toHash, PREFIX(_table_cell) emptyCell)
{
for (uint32_t hash = fromHash - 32 ; hash < fromHash ; hash++)
{
// Get the cell n before the hash.
PREFIX(_table_cell) cell = PREFIX(_table_lookup)(table, hash);
// If this node is a primary entry move it down
if (MAP_TABLE_HASH_VALUE(cell->value) == hash)
{
emptyCell->value = cell->value;
cell->secondMaps |= (1 << ((fromHash - hash) - 1));
cell->value = MAP_TABLE_VALUE_PLACEHOLDER;
if (hash - toHash < 32)
{
return 1;
}
return PREFIX(_table_move_gap)(table, hash, toHash, cell);
}
int hop = __builtin_ffs(cell->secondMaps);
if (hop > 0 && (hash + hop) < fromHash)
{
PREFIX(_table_cell) hopCell = PREFIX(_table_lookup)(table, hash+hop);
emptyCell->value = hopCell->value;
// Update the hop bit for the new offset
cell->secondMaps |= (1 << ((fromHash - hash) - 1));
// Clear the hop bit in the original cell
cell->secondMaps &= ~(1 << (hop - 1));
hopCell->value = MAP_TABLE_VALUE_PLACEHOLDER;
if (hash - toHash < 32)
{
return 1;
}
return PREFIX(_table_move_gap)(table, hash + hop, toHash, hopCell);
}
}
return 0;
}
static int PREFIX(_table_rebalance)(PREFIX(_table) *table, uint32_t hash)
{
for (unsigned i=32 ; i<TABLE_SIZE(table) ; i++)
{
PREFIX(_table_cell) cell = PREFIX(_table_lookup)(table, hash + i);
if (MAP_TABLE_VALUE_NULL(cell->value))
{
// We've found a free space, try to move it up.
return PREFIX(_table_move_gap)(table, hash + i, hash, cell);
}
}
return 0;
}
__attribute__((unused))
static int PREFIX(_insert)(PREFIX(_table) *table,
MAP_TABLE_VALUE_TYPE value)
{
MAP_LOCK();
uint32_t hash = MAP_TABLE_HASH_VALUE(value);
PREFIX(_table_cell) cell = PREFIX(_table_lookup)(table, hash);
if (MAP_TABLE_VALUE_NULL(cell->value))
{
cell->secondMaps = 0;
cell->value = value;
table->table_used++;
MAP_UNLOCK();
return 1;
}
/* If this cell is full, try the next one. */
for (unsigned int i=1 ; i<33 ; i++)
{
PREFIX(_table_cell) second =
PREFIX(_table_lookup)(table, hash+i);
if (MAP_TABLE_VALUE_NULL(second->value))
{
cell->secondMaps |= (1 << (i-1));
second->value = value;
table->table_used++;
MAP_UNLOCK();
return 1;
}
}
/* If the table is full, or nearly full, then resize it. Note that we
* resize when the table is at 80% capacity because it's cheaper to copy
* everything than spend the next few updates shuffling everything around
* to reduce contention. A hopscotch hash table starts to degrade in
* performance at around 90% capacity, so stay below that.
*/
if (table->table_used > (0.8 * TABLE_SIZE(table)))
{
PREFIX(_table_resize)(table);
MAP_UNLOCK();
return PREFIX(_insert)(table, value);
}
/* If this virtual cell is full, rebalance the hash from this point and
* try again. */
if (PREFIX(_table_rebalance)(table, hash))
{
MAP_UNLOCK();
return PREFIX(_insert)(table, value);
}
/** If rebalancing failed, resize even if we are <80% full. This can
* happen if your hash function sucks. If you don't want this to happen,
* get a better hash function. */
if (PREFIX(_table_resize)(table))
{
MAP_UNLOCK();
return PREFIX(_insert)(table, value);
}
fprintf(stderr, "Insert failed\n");
MAP_UNLOCK();
return 0;
}
static void *PREFIX(_table_get_cell)(PREFIX(_table) *table, const void *key)
{
uint32_t hash = MAP_TABLE_HASH_KEY(key);
PREFIX(_table_cell) cell = PREFIX(_table_lookup)(table, hash);
// Value does not exist.
if (!MAP_TABLE_VALUE_NULL(cell->value))
{
if (MAP_TABLE_COMPARE_FUNCTION(key, cell->value))
{
return cell;
}
uint32_t jump = cell->secondMaps;
// Look at each offset defined by the jump table to find the displaced location.
for (int hop = __builtin_ffs(jump) ; hop > 0 ; hop = __builtin_ffs(jump))
{
PREFIX(_table_cell) hopCell = PREFIX(_table_lookup)(table, hash+hop);
if (MAP_TABLE_COMPARE_FUNCTION(key, hopCell->value))
{
return hopCell;
}
// Clear the most significant bit and try again.
jump &= ~(1 << (hop-1));
}
}
#ifndef MAP_TABLE_STATIC_SIZE
if (table->old)
{
return PREFIX(_table_get_cell)(table->old, key);
}
#endif
return NULL;
}
__attribute__((unused))
static void PREFIX(_table_move_second)(PREFIX(_table) *table,
PREFIX(_table_cell) emptyCell)
{
uint32_t jump = emptyCell->secondMaps;
// Look at each offset defined by the jump table to find the displaced location.
int hop = __builtin_ffs(jump);
PREFIX(_table_cell) hopCell =
PREFIX(_table_lookup)(table, (emptyCell - table->table) + hop);
emptyCell->value = hopCell->value;
emptyCell->secondMaps &= ~(1 << (hop-1));
if (0 == hopCell->secondMaps)
{
hopCell->value = MAP_TABLE_VALUE_PLACEHOLDER;
}
else
{
PREFIX(_table_move_second)(table, hopCell);
}
}
__attribute__((unused))
static void PREFIX(_remove)(PREFIX(_table) *table, void *key)
{
MAP_LOCK();
PREFIX(_table_cell) cell = PREFIX(_table_get_cell)(table, key);
if (NULL == cell) { return; }
uint32_t hash = MAP_TABLE_HASH_KEY(key);
PREFIX(_table_cell) baseCell = PREFIX(_table_lookup)(table, hash);
if (baseCell && baseCell != cell)
{
uint32_t displacement = (cell - baseCell + table->table_size) % table->table_size;
uint32_t jump = 1 << (displacement - 1);
if ((baseCell->secondMaps & jump))
{
// If we are removing a cell stored adjacent to its base due to hash
// collision, we have to clear the base cell's neighbor bit.
// Otherwise, a later remove can move the new placeholder value to the head
// which will cause further chained lookups to fail.
baseCell->secondMaps &= ~jump;
}
}
// If the cell contains a value, set it to the placeholder and shuffle up
// everything
if (0 == cell->secondMaps)
{
cell->value = MAP_TABLE_VALUE_PLACEHOLDER;
}
else
{
PREFIX(_table_move_second)(table, cell);
}
table->table_used--;
MAP_UNLOCK();
}
__attribute__((unused))
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
static MAP_TABLE_VALUE_TYPE*
#else
static MAP_TABLE_VALUE_TYPE
#endif
PREFIX(_table_get)(PREFIX(_table) *table,
const void *key)
{
PREFIX(_table_cell) cell = PREFIX(_table_get_cell)(table, key);
if (NULL == cell)
{
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return NULL;
#else
return MAP_TABLE_VALUE_PLACEHOLDER;
#endif
}
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return &cell->value;
#else
return cell->value;
#endif
}
__attribute__((unused))
static void PREFIX(_table_set)(PREFIX(_table) *table, const void *key,
MAP_TABLE_VALUE_TYPE value)
{
PREFIX(_table_cell) cell = PREFIX(_table_get_cell)(table, key);
if (NULL == cell)
{
PREFIX(_insert)(table, value);
}
cell->value = value;
}
__attribute__((unused))
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
static MAP_TABLE_VALUE_TYPE*
#else
static MAP_TABLE_VALUE_TYPE
#endif
PREFIX(_next)(PREFIX(_table) *table,
struct PREFIX(_table_enumerator) **state)
{
if (NULL == *state)
{
*state = CALLOC(1, sizeof(struct PREFIX(_table_enumerator)));
// Make sure that we are not reallocating the table when we start
// enumerating
MAP_LOCK();
(*state)->table = table;
(*state)->index = -1;
IF_NO_GC(__sync_fetch_and_add(&table->enumerator_count, 1);)
MAP_UNLOCK();
}
if ((*state)->seen >= (*state)->table->table_used)
{
#ifndef ENABLE_GC
MAP_LOCK();
__sync_fetch_and_sub(&table->enumerator_count, 1);
MAP_UNLOCK();
free(*state);
#endif
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return NULL;
#else
return MAP_TABLE_VALUE_PLACEHOLDER;
#endif
}
while ((++((*state)->index)) < TABLE_SIZE((*state)->table))
{
if (!MAP_TABLE_VALUE_NULL((*state)->table->table[(*state)->index].value))
{
(*state)->seen++;
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return &(*state)->table->table[(*state)->index].value;
#else
return (*state)->table->table[(*state)->index].value;
#endif
}
}
#ifndef ENABLE_GC
// Should not be reached, but may be if the table is unsafely modified.
MAP_LOCK();
table->enumerator_count--;
MAP_UNLOCK();
free(*state);
#endif
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return NULL;
#else
return MAP_TABLE_VALUE_PLACEHOLDER;
#endif
}
/**
* Returns the current value for an enumerator. This is used when you remove
* objects during enumeration. It may cause others to be shuffled up the
* table.
*/
__attribute__((unused))
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
static MAP_TABLE_VALUE_TYPE*
#else
static MAP_TABLE_VALUE_TYPE
#endif
PREFIX(_current)(PREFIX(_table) *table,
struct PREFIX(_table_enumerator) **state)
{
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
return &(*state)->table->table[(*state)->index].value;
#else
return (*state)->table->table[(*state)->index].value;
#endif
}
#undef TABLE_SIZE
#undef REALLY_PREFIX_SUFFIX
#undef PREFIX_SUFFIX
#undef PREFIX
#undef MAP_TABLE_NAME
#undef MAP_TABLE_COMPARE_FUNCTION
#undef MAP_TABLE_HASH_KEY
#undef MAP_TABLE_HASH_VALUE
#ifdef MAP_TABLE_STATIC_SIZE
# undef MAP_TABLE_STATIC_SIZE
#endif
#undef MAP_TABLE_VALUE_TYPE
#undef MAP_LOCK
#undef MAP_UNLOCK
#ifdef MAP_TABLE_NO_LOCK
# undef MAP_TABLE_NO_LOCK
#endif
#ifdef MAP_TABLE_SINGLE_THREAD
# undef MAP_TABLE_SINGLE_THREAD
#endif
#undef MAP_TABLE_VALUE_NULL
#undef MAP_TABLE_VALUE_PLACEHOLDER
#ifdef MAP_TABLE_ACCESS_BY_REFERENCE
# undef MAP_TABLE_ACCESS_BY_REFERENCE
#endif
#undef CALLOC
#undef IF_NO_GC
#undef IF_GC
#undef MAP_TABLE_TYPES_BITMAP

@ -0,0 +1,209 @@
#include <assert.h>
#include <inttypes.h>
#include <stdlib.h>
/**
* Metadata structure for an instance variable.
*
*/
// begin: objc_ivar
struct objc_ivar
{
/**
* Name of this instance variable.
*/
const char *name;
/**
* Type encoding for this instance variable.
*/
const char *type;
/**
* The offset from the start of the object. When using the non-fragile
* ABI, this is initialized by the compiler to the offset from the start of
* the ivars declared by this class. It is then set by the runtime to the
* offset from the object pointer.
*/
int *offset;
/**
* The size of this ivar. Note that the current ABI limits ivars to 4GB.
*/
uint32_t size;
/**
* Flags for this instance variable.
*/
uint32_t flags;
};
// end: objc_ivar
/**
* Instance variable ownership.
*/
// begin: objc_ivar_ownership
typedef enum {
/**
* Invalid. Indicates that this is not an instance variable with ownership
* semantics.
*/
ownership_invalid = 0,
/**
* Strong ownership. Assignments to this instance variable should retain
* the assigned value.
*/
ownership_strong = 1,
/**
* Weak ownership. This ivar is a zeroing weak reference to an object.
*/
ownership_weak = 2,
/**
* Object that has `__unsafe_unretained` semantics.
*/
ownership_unsafe = 3
} objc_ivar_ownership;
// end: objc_ivar_ownership
/**
* Shift for instance variable alignment. */
static const int ivar_align_shift = 3;
typedef enum {
/**
* Mask applied to the flags field to indicate ownership.
*/
ivar_ownership_mask = (1<<0) | (1<<1),
/**
* Flag indicating that the ivar contains an extended type encoding.
*/
ivar_extended_type_encoding = (1<<2),
/**
* Mask for describing the alignment. We need 6 bits to represent any
* power of two aligmnent from 0 to 63-bit alignment. There is probably no
* point supporting more than 32-bit aligment, because various bits of
* offset assume objects are less than 4GB, but there's definitely no point
* in supporting 64-bit alignment because we currently don't support any
* architectures where an address space could contain more than one 2^64
* byte aligned value.
*/
#ifdef __clang__
ivar_align_mask = (((1<<6)-1) << ivar_align_shift)
#else
ivar_align_mask = 504
#endif
} objc_ivar_flags;
static inline size_t ivarGetAlign(Ivar ivar)
{
return 1<<((ivar->flags & ivar_align_mask) >> ivar_align_shift);
}
static inline void ivarSetAlign(Ivar ivar, size_t align)
{
if (align != 0)
{
if (sizeof(size_t) == 4)
{
align = 4 * 8 - __builtin_clz(align) - 1;
}
else if (sizeof(size_t) == 8)
{
align = 8 * 8 - __builtin_clzll(align) - 1;
}
_Static_assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8), "Unexpected type for size_t");
}
align <<= ivar_align_shift;
ivar->flags = (ivar->flags & ~ivar_align_mask) | align;
}
static inline void ivarSetOwnership(Ivar ivar, objc_ivar_ownership o)
{
ivar->flags = (ivar->flags & ~ivar_ownership_mask) | o;
}
/**
* Look up the ownership for a given instance variable.
*/
static inline objc_ivar_ownership ivarGetOwnership(Ivar ivar)
{
return (objc_ivar_ownership)(ivar->flags & ivar_ownership_mask);
}
/**
* Legacy ivar structure, inherited from the GCC ABI.
*/
struct objc_ivar_gcc
{
/**
* Name of this instance variable.
*/
const char *name;
/**
* Type encoding for this instance variable.
*/
const char *type;
/**
* The offset from the start of the object. When using the non-fragile
* ABI, this is initialized by the compiler to the offset from the start of
* the ivars declared by this class. It is then set by the runtime to the
* offset from the object pointer.
*/
int offset;
};
/**
* A list of instance variables declared on this class. Unlike the method
* list, this is a single array and size. Categories are not allowed to add
* instance variables, because that would require existing objects to be
* reallocated, which is only possible with accurate GC (i.e. not in C).
*/
// begin: objc_ivar_list
struct objc_ivar_list
{
/**
* The number of instance variables in this list.
*/
int count;
/**
* The size of a `struct objc_ivar`. This allows the runtime to load
* versions of this that come from a newer compiler, if we ever need to do
* so.
*/
size_t size;
/**
* An array of instance variable metadata structures. Note that this array
* has count elements.
*/
struct objc_ivar ivar_list[];
};
// end: objc_ivar_list
/**
* Returns a pointer to the ivar inside the `objc_ivar_list` structure. This
* structure is designed to allow the compiler to add other fields without
* breaking the ABI, so although the `ivar_list` field appears to be an array
* of `objc_ivar` structures, it may be an array of some future version of
* `objc_ivar` structs, which have fields appended that this version of the
* runtime does not know about.
*/
static inline struct objc_ivar *ivar_at_index(struct objc_ivar_list *l, int i)
{
assert(l->size >= sizeof(struct objc_ivar));
return (struct objc_ivar*)(((char*)l->ivar_list) + (i * l->size));
}
/**
* Legacy version of the ivar list
*/
struct objc_ivar_list_gcc
{
/**
* The number of instance variables in this list.
*/
int count;
/**
* An array of instance variable metadata structures. Note that this array
* has count elements.
*/
struct objc_ivar_gcc ivar_list[];
};

@ -0,0 +1,15 @@
#pragma once
#include "visibility.h"
#include "ivar.h"
#include "class.h"
#include "category.h"
#include "protocol.h"
PRIVATE Class objc_upgrade_class(struct objc_class_gsv1 *oldClass);
PRIVATE struct objc_category *objc_upgrade_category(struct objc_category_gcc *);
PRIVATE struct objc_class_gsv1* objc_legacy_class_for_class(Class);
PRIVATE struct objc_protocol *objc_upgrade_protocol_gcc(struct objc_protocol_gcc*);
PRIVATE struct objc_protocol *objc_upgrade_protocol_gsv1(struct objc_protocol_gsv1*);

@ -0,0 +1,67 @@
#ifndef __OBJC_LOADER_H_INCLUDED
#define __OBJC_LOADER_H_INCLUDED
#include "category.h"
#include "method.h"
#include "module.h"
#include "class.h"
#include "protocol.h"
/**
* Checks whether it is safe to load a module with the specified version and
* module size. This depends on whether another module with an incompatible
* ABI has already been loaded.
*/
BOOL objc_check_abi_version(struct objc_module_abi_8 *module);
/**
* Initializes a protocol list, uniquing the protocols in the list.
*/
void objc_init_protocols(struct objc_protocol_list *protocols);
/**
* Registers a set of selectors from a method list.
*/
void objc_register_selectors_from_list(struct objc_method_list *l);
/**
* Register all of the (unregistered) selectors that are used in a class.
*/
void objc_register_selectors_from_class(Class class);
/**
* Registers all of the selectors in an array.
*/
void objc_register_selector_array(SEL selectors, unsigned long count);
/**
* Loads a class into the runtime system. If possible, the class is resolved
* (inserted into the class tree) immediately. If its superclass is not yet
* resolved, it is enqueued for later resolution.
*/
void objc_load_class(struct objc_class *class);
/**
* Resolves classes that have not yet been resolved, if their superclasses have
* subsequently been loaded.
*/
void objc_resolve_class_links(void);
/**
* Attaches a category to its class, if the class is already loaded. Buffers
* it for future resolution if not.
*/
void objc_try_load_category(struct objc_category *cat);
/**
* Tries to load all of the categories that could not previously be loaded
* because their classes were not yet loaded.
*/
void objc_load_buffered_categories(void);
/**
* Updates the dispatch table for a class.
*/
void objc_update_dtable_for_class(Class cls);
/**
* Initialises a list of static object instances belonging to the same class if
* possible, or defers initialisation until the class has been loaded it not.
*/
void objc_init_statics(struct objc_static_instance_list *statics);
/**
* Tries again to initialise static instances which could not be initialised
* earlier.
*/
void objc_init_buffered_statics(void);
#endif //__OBJC_LOADER_H_INCLUDED

@ -0,0 +1,114 @@
/**
* libobjc requires recursive mutexes. These are delegated to the underlying
* threading implementation. This file contains a VERY thin wrapper over the
* Windows and POSIX mutex APIs.
*/
#ifndef __LIBOBJC_LOCK_H_INCLUDED__
#define __LIBOBJC_LOCK_H_INCLUDED__
#ifdef _WIN32
# include "safewindows.h"
typedef CRITICAL_SECTION mutex_t;
# define INIT_LOCK(x) InitializeCriticalSection(&(x))
# define LOCK(x) EnterCriticalSection(x)
# define UNLOCK(x) LeaveCriticalSection(x)
# define DESTROY_LOCK(x) DeleteCriticalSection(x)
#else
#include <pthread.h>
typedef pthread_mutex_t mutex_t;
// If this pthread implementation has a static initializer for recursive
// mutexes, use that, otherwise fall back to the portable version
# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
# define INIT_LOCK(x) x = (pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
# elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
# define INIT_LOCK(x) x = (pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER
# else
# define INIT_LOCK(x) init_recursive_mutex(&(x))
static inline void init_recursive_mutex(pthread_mutex_t *x)
{
pthread_mutexattr_t recursiveAttributes;
pthread_mutexattr_init(&recursiveAttributes);
pthread_mutexattr_settype(&recursiveAttributes, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(x, &recursiveAttributes);
pthread_mutexattr_destroy(&recursiveAttributes);
}
# endif
# define LOCK(x) pthread_mutex_lock(x)
# define UNLOCK(x) pthread_mutex_unlock(x)
# define DESTROY_LOCK(x) pthread_mutex_destroy(x)
#endif
__attribute__((unused)) static void objc_release_lock(void *x)
{
mutex_t *lock = *(mutex_t**)x;
UNLOCK(lock);
}
/**
* Concatenate strings during macro expansion.
*/
#define LOCK_HOLDERN_NAME_CAT(x, y) x ## y
/**
* Concatenate string with unique variable during macro expansion.
*/
#define LOCK_HOLDER_NAME_COUNTER(x, y) LOCK_HOLDERN_NAME_CAT(x, y)
/**
* Create a unique name for a lock holder variable
*/
#define LOCK_HOLDER_NAME(x) LOCK_HOLDER_NAME_COUNTER(x, __COUNTER__)
/**
* Acquires the lock and automatically releases it at the end of the current
* scope.
*/
#define LOCK_FOR_SCOPE(lock) \
__attribute__((cleanup(objc_release_lock)))\
__attribute__((unused)) mutex_t *LOCK_HOLDER_NAME(lock_pointer) = lock;\
LOCK(lock)
/**
* The global runtime mutex.
*/
extern mutex_t runtime_mutex;
#define LOCK_RUNTIME() LOCK(&runtime_mutex)
#define UNLOCK_RUNTIME() UNLOCK(&runtime_mutex)
#define LOCK_RUNTIME_FOR_SCOPE() LOCK_FOR_SCOPE(&runtime_mutex)
#ifdef __cplusplus
/**
* C++ wrapper around our mutex, for use with std::lock_guard and friends.
*/
class RecursiveMutex
{
/// The underlying mutex
mutex_t mutex;
public:
/**
* Explicit initialisation of the underlying lock, so that this can be a
* global.
*/
void init()
{
INIT_LOCK(mutex);
}
/// Acquire the lock.
void lock()
{
LOCK(&mutex);
}
/// Release the lock.
void unlock()
{
UNLOCK(&mutex);
}
};
#endif
#endif // __LIBOBJC_LOCK_H_INCLUDED__

@ -0,0 +1,103 @@
#include <assert.h>
/**
* Metadata structure describing a method.
*/
// begin: objc_method
struct objc_method
{
/**
* A pointer to the function implementing this method.
*/
IMP imp;
/**
* Selector used to send messages to this method.
*/
SEL selector;
/**
* The extended type encoding for this method.
*/
const char *types;
};
// end: objc_method
struct objc_method_gcc
{
/**
* Selector used to send messages to this method. The type encoding of
* this method should match the types field.
*/
SEL selector;
/**
* The type encoding for this selector. Used only for introspection, and
* only required because of the stupid selector handling in the old GNU
* runtime. In future, this field may be reused for something else.
*/
const char *types;
/**
* A pointer to the function implementing this method.
*/
IMP imp;
};
/**
* Method list. Each class or category defines a new one of these and they are
* all chained together in a linked list, with new ones inserted at the head.
* When constructing the dispatch table, methods in the start of the list are
* used in preference to ones at the end.
*/
// begin: objc_method_list
struct objc_method_list
{
/**
* The next group of methods in the list.
*/
struct objc_method_list *next;
/**
* The number of methods in this list.
*/
int count;
/**
* Sze of `struct objc_method`. This allows runtimes downgrading newer
* versions of this structure.
*/
size_t size;
/**
* An array of methods. Note that the actual size of this is count.
*/
struct objc_method methods[];
};
// end: objc_method_list
/**
* Returns a pointer to the method inside the `objc_method` structure. This
* structure is designed to allow the compiler to add other fields without
* breaking the ABI, so although the `methods` field appears to be an array
* of `objc_method` structures, it may be an array of some future version of
* `objc_method` structs, which have fields appended that this version of the
* runtime does not know about.
*/
static inline struct objc_method *method_at_index(struct objc_method_list *l, int i)
{
assert(l->size >= sizeof(struct objc_method));
return (struct objc_method*)(((char*)l->methods) + (i * l->size));
}
/**
* Legacy version of the method list.
*/
struct objc_method_list_gcc
{
/**
* The next group of methods in the list.
*/
struct objc_method_list_gcc *next;
/**
* The number of methods in this list.
*/
int count;
/**
* An array of methods. Note that the actual size of this is count.
*/
struct objc_method_gcc methods[];
};

@ -0,0 +1,102 @@
/**
* Defines the module structures.
*
* When defining a new ABI, the
*/
/**
* The symbol table for a module. This structure references all of the
* Objective-C symbols defined for a module, allowing the runtime to find and
* register them.
*/
struct objc_symbol_table_abi_8
{
/**
* The number of selectors referenced in this module.
*/
unsigned long selector_count;
/**
* An array of selectors used in this compilation unit. SEL is a pointer
* type and this points to the first element in an array of selectors.
*/
SEL selectors;
/**
* The number of classes defined in this module.
*/
unsigned short class_count;
/**
* The number of categories defined in this module.
*/
unsigned short category_count;
/**
* A null-terminated array of pointers to symbols defined in this module.
* This contains class_count pointers to class structures, category_count
* pointers to category structures, and then zero or more pointers to
* static object instances.
*
* Current compilers only use this for constant strings. The runtime
* permits other types.
*/
void *definitions[];
};
/**
* The module structure is passed to the __objc_exec_class function by a
* constructor function when the module is loaded.
*
* When defining a new ABI version, the first two fields in this structure must
* be retained.
*/
struct objc_module_abi_8
{
/**
* The version of the ABI used by this module. This is checked against the
* list of ABIs that the runtime supports, and the list of incompatible
* ABIs.
*/
unsigned long version;
/**
* The size of the module. This is used for sanity checking, to ensure
* that the compiler and runtime's idea of the module size match.
*/
unsigned long size;
/**
* The full path name of the source for this module. Not currently used
* for anything, could be used for debugging in theory, but duplicates
* information available from DWARF data, so probably won't.
*/
const char *name;
/**
* A pointer to the symbol table for this compilation unit.
*/
struct objc_symbol_table_abi_8 *symbol_table;
};
struct objc_module_abi_10
{
/**
* Inherited fields from version 8 of the ABI.
*/
struct objc_module_abi_8 old;
/**
* GC mode. GC_Optional code can be mixed with anything, but GC_None code
* can't be mixed with GC_Required code.
*/
int gc_mode;
};
/**
* List of static instances of a named class provided in this module.
*/
struct objc_static_instance_list
{
/**
* The name of the class. The isa pointer of all of the instances will be
* set to the class with this name.
*/
char *class_name;
/**
* NULL-terminated array of statically-allocated instances.
*/
id instances[];
};

@ -0,0 +1,10 @@
/**
* Stub declaration of NSObject. Lots of things in the runtime require the
*/
@interface NSObject
-retain;
-copy;
-(void)release;
-autorelease;
-(void)dealloc;
@end

@ -0,0 +1,66 @@
#ifdef __cplusplus
extern "C" {
#endif
/**
* Allocates a C++ exception. This function is part of the Itanium C++ ABI and
* is provided externally.
*/
/*
* Note: Recent versions of libsupc++ already provide a prototype for
* __cxa__allocate_exception(). Since the libsupc++ version is defined with
* _GLIBCXX_NOTHROW, clang gives a type mismatch error.
*/
#ifndef __cplusplus
#undef CXA_ALLOCATE_EXCEPTION_SPECIFIER
#define CXA_ALLOCATE_EXCEPTION_SPECIFIER
#endif
void *__cxa_allocate_exception(size_t thrown_size) CXA_ALLOCATE_EXCEPTION_SPECIFIER;
/**
* Initialises an exception object returned by __cxa_allocate_exception() for
* storing an Objective-C object. The return value is the location of the
* _Unwind_Exception structure within this structure, and should be passed to
* the C++ personality function.
*/
struct _Unwind_Exception *objc_init_cxx_exception(id thrown_exception);
/**
* The GNU C++ exception personality function, provided by libsupc++ (GNU) or
* libcxxrt (PathScale).
*/
__attribute__((weak)) DECLARE_PERSONALITY_FUNCTION(__gxx_personality_v0);
/**
* Frees an exception object allocated by __cxa_allocate_exception(). Part of
* the Itanium C++ ABI.
*/
void __cxa_free_exception(void *thrown_exception);
/**
* Tests whether a C++ exception contains an Objective-C object, and returns if
* if it does. The second argument is a pointer to a boolean value indicating
* whether this is a valid object.
*/
void *objc_object_for_cxx_exception(void *thrown_exception, int *isValid);
/**
* Prints the type info associated with an exception. Used only when
* debugging, not compiled in the normal build.
*/
void print_type_info(void *thrown_exception);
/**
* The exception class that we've detected that C++ runtime library uses.
*/
extern uint64_t cxx_exception_class;
/**
* The exception class that libsupc++ and libcxxrt use.
*/
const uint64_t gnu_cxx_exception_class = EXCEPTION_CLASS('G','N','U','C','C','+','+','\0');
/**
* The exception class that libc++abi uses.
*/
const uint64_t llvm_cxx_exception_class = EXCEPTION_CLASS('C','L','N','G','C','+','+','\0');
#ifdef __cplusplus
}
#endif

@ -0,0 +1,241 @@
typedef struct objc_object* id;
#include "objc/runtime.h"
#include "visibility.h"
#ifndef DEBUG_EXCEPTIONS
#define DEBUG_LOG(...)
#else
#define DEBUG_LOG(str, ...) fprintf(stderr, str, ## __VA_ARGS__)
#endif
/**
* Our own definitions of C++ ABI functions and types. These are provided
* because this file must not include cxxabi.h. We need to handle subtly
* different variations of the ABI and including one specific implementation
* would make that very difficult.
*/
namespace __cxxabiv1
{
/**
* Type info for classes. Forward declared because the GNU ABI provides a
* method on all type_info objects that the dynamic the dynamic cast header
* needs.
*/
struct __class_type_info;
/**
* The C++ in-flight exception object. We will derive the offset of fields
* in this, so we do not ever actually see a concrete definition of it.
*/
struct __cxa_exception;
/**
* The public ABI structure for current exception state.
*/
struct __cxa_eh_globals
{
/**
* The current exception that has been caught.
*/
__cxa_exception *caughtExceptions;
/**
* The number of uncaught exceptions still in flight.
*/
unsigned int uncaughtExceptions;
};
/**
* Retrieve the above structure.
*/
extern "C" __cxa_eh_globals *__cxa_get_globals();
}
namespace std
{
struct type_info;
}
// Define some C++ ABI types here, rather than including them. This prevents
// conflicts with the libstdc++ headers, which expose only a subset of the
// type_info class (the part required for standards compliance, not the
// implementation details).
typedef void (*unexpected_handler)();
typedef void (*terminate_handler)();
namespace std
{
/**
* std::type_info, containing the minimum requirements for the ABI.
* Public headers on some implementations also expose some implementation
* details. The layout of our subclasses must respect the layout of the
* C++ runtime library, but also needs to be portable across multiple
* implementations and so should not depend on internal symbols from those
* libraries.
*/
class type_info
{
public:
virtual ~type_info();
bool operator==(const type_info &) const;
bool operator!=(const type_info &) const;
bool before(const type_info &) const;
type_info();
private:
type_info(const type_info& rhs);
type_info& operator= (const type_info& rhs);
const char *__type_name;
protected:
type_info(const char *name): __type_name(name) { }
public:
const char* name() const { return __type_name; }
};
}
extern "C" void __cxa_throw(void*, std::type_info*, void(*)(void*));
extern "C" void __cxa_rethrow();
/**
* Helper function to find a particular value scanning backwards in a
* structure.
*/
template<typename T>
ptrdiff_t find_backwards(void *addr, T val)
{
T *ptr = reinterpret_cast<T*>(addr);
for (ptrdiff_t disp = -1 ; (disp * sizeof(T) > -128) ; disp--)
{
if (ptr[disp] == val)
{
return disp * sizeof(T);
}
}
fprintf(stderr, "Unable to find field in C++ exception structure\n");
abort();
}
/**
* Helper function to find a particular value scanning forwards in a
* structure.
*/
template<typename T>
ptrdiff_t find_forwards(void *addr, T val)
{
T *ptr = reinterpret_cast<T*>(addr);
for (ptrdiff_t disp = 0 ; (disp * sizeof(T) < 256) ; disp++)
{
if (ptr[disp] == val)
{
return disp * sizeof(T);
}
}
fprintf(stderr, "Unable to find field in C++ exception structure\n");
abort();
}
template<typename T>
T *pointer_add(void *ptr, ptrdiff_t offset)
{
return reinterpret_cast<T*>(reinterpret_cast<char*>(ptr) + offset);
}
namespace gnustep
{
namespace libobjc
{
/**
* Superclass for the type info for Objective-C exceptions.
*/
struct OBJC_PUBLIC __objc_type_info : std::type_info
{
/**
* Constructor that sets the name.
*/
__objc_type_info(const char *name);
/**
* Helper function used by libsupc++ and libcxxrt to determine if
* this is a pointer type. If so, catches automatically
* dereference the pointer to the thrown pointer in
* `__cxa_begin_catch`.
*/
virtual bool __is_pointer_p() const;
/**
* Helper function used by libsupc++ and libcxxrt to determine if
* this is a function pointer type. Irrelevant for our purposes.
*/
virtual bool __is_function_p() const;
/**
* Catch handler. This is used in the C++ personality function.
* `thrown_type` is the type info of the thrown object, `this` is
* the type info at the catch site. `thrown_object` is a pointer
* to a pointer to the thrown object and may be adjusted by this
* function.
*/
virtual bool __do_catch(const type_info *thrown_type,
void **thrown_object,
unsigned) const;
/**
* Function used for `dynamic_cast` between two C++ class types in
* libsupc++ and libcxxrt.
*
* This should never be called on Objective-C types.
*/
virtual bool __do_upcast(
const __cxxabiv1::__class_type_info *target,
void **thrown_object) const;
};
/**
* Singleton type info for the `id` type.
*/
struct OBJC_PUBLIC __objc_id_type_info : __objc_type_info
{
__objc_id_type_info();
virtual ~__objc_id_type_info();
virtual bool __do_catch(const type_info *thrownType,
void **obj,
unsigned outer) const;
};
struct OBJC_PUBLIC __objc_class_type_info : __objc_type_info
{
virtual ~__objc_class_type_info();
virtual bool __do_catch(const type_info *thrownType,
void **obj,
unsigned outer) const;
};
}
}
/**
* Public interface to the Objective-C++ exception mechanism
*/
extern "C"
{
/**
* The public symbol that the compiler uses to indicate the Objective-C id type.
*/
extern OBJC_PUBLIC gnustep::libobjc::__objc_id_type_info __objc_id_type_info;
} // extern "C"
/**
* C++ structure that is thrown through a frame with the `test_eh_personality`
* personality function. This contains a well-known value that we can search
* for after the unwind header.
*/
struct
PRIVATE
MagicValueHolder
{
/**
* The constant that we will search for to identify the MagicValueHolder object.
*/
static constexpr uint32_t magic = 0x01020304;
/**
* The single field in this structure.
*/
uint32_t magic_value;
/**
* Constructor. Initialises the field with the magic constant.
*/
MagicValueHolder();
};

@ -0,0 +1,57 @@
#include <stdlib.h>
#include "lock.h"
#ifndef POOL_TYPE
#error POOL_TYPE must be defined
#endif
#ifndef POOL_TYPE
#error POOL_NAME must be defined
#endif
// Horrible multiple indirection to satisfy the weird precedence rules in cpp
#define REALLY_PREFIX_SUFFIX(x,y) x ## y
#define PREFIX_SUFFIX(x, y) REALLY_PREFIX_SUFFIX(x, y)
#define NAME(x) PREFIX_SUFFIX(POOL_NAME, x)
#define PAGE_SIZE 4096
// Malloc one page at a time.
#define POOL_SIZE ((PAGE_SIZE) / sizeof(POOL_TYPE))
static POOL_TYPE* NAME(_pool);
static int NAME(_pool_next_index) = -1;
#ifdef THREAD_SAFE_POOL
static mutex_t NAME(_lock);
#define LOCK_POOL() LOCK(&POOL_NAME##_lock)
#define UNLOCK_POOL() LOCK(&POOL_NAME##_lock)
#else
#define LOCK_POOL()
#define UNLOCK_POOL()
#endif
static int pool_size = 0;
static int pool_allocs = 0;
static inline POOL_TYPE*NAME(_pool_alloc)(void)
{
LOCK_POOL();
pool_allocs++;
if (0 > NAME(_pool_next_index))
{
NAME(_pool) = malloc(PAGE_SIZE);
NAME(_pool_next_index) = POOL_SIZE - 1;
pool_size += PAGE_SIZE;
}
POOL_TYPE* new = &NAME(_pool)[NAME(_pool_next_index)--];
UNLOCK_POOL();
return new;
}
#undef NAME
#undef POOL_SIZE
#undef PAGE_SIZE
#undef POOL_NAME
#undef POOL_TYPE
#undef LOCK_POOL
#undef UNLOCK_POOL
#ifdef THREAD_SAFE_POOL
#undef THREAD_SAFE_POOL
#endif

@ -0,0 +1,45 @@
#pragma once
#ifdef _WIN32
#include "safewindows.h"
#if defined(WINAPI_FAMILY) && WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP && _WIN32_WINNT >= 0x0A00
// Prefer the *FromApp versions when we're being built in a Windows Store App context on
// Windows >= 10. *FromApp require the application to be manifested for "codeGeneration".
#define VirtualAlloc VirtualAllocFromApp
#define VirtualProtect VirtualProtectFromApp
#endif // App family partition
inline void *allocate_pages(size_t size)
{
return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
}
#else
#include <sys/mman.h>
inline void *allocate_pages(size_t size)
{
void *ret = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
return ret == MAP_FAILED ? nullptr : ret;
}
#endif
template<typename T>
class PoolAllocate
{
static constexpr size_t PageSize = 4096;
static constexpr size_t ChunkSize = sizeof(T) * PageSize;
static inline size_t index = PageSize;
static inline T *buffer = nullptr;
public:
static T *allocate()
{
if (index == PageSize)
{
index = 0;
buffer = static_cast<T*>(allocate_pages(ChunkSize));
}
return &buffer[index++];
}
};

@ -0,0 +1,239 @@
#include "visibility.h"
#include <assert.h>
enum PropertyAttributeKind
{
/**
* Property has no attributes.
*/
OBJC_PR_noattr = 0x00,
/**
* The property is declared read-only.
*/
OBJC_PR_readonly = (1<<0),
/**
* The property has a getter.
*/
OBJC_PR_getter = (1<<1),
/**
* The property has assign semantics.
*/
OBJC_PR_assign = (1<<2),
/**
* The property is declared read-write.
*/
OBJC_PR_readwrite = (1<<3),
/**
* Property has retain semantics.
*/
OBJC_PR_retain = (1<<4),
/**
* Property has copy semantics.
*/
OBJC_PR_copy = (1<<5),
/**
* Property is marked as non-atomic.
*/
OBJC_PR_nonatomic = (1<<6),
/**
* Property has setter.
*/
OBJC_PR_setter = (1<<7)
};
/**
* Flags in the second attributes field in declared properties.
* Note: This field replaces the old 'is synthesized' field and so these values
* are shifted left one from their values in clang.
*/
enum PropertyAttributeKind2
{
/**
* No extended attributes.
*/
OBJC_PR_noextattr = 0,
/**
* The property is synthesized. This has no meaning in properties on
* protocols.
*/
OBJC_PR_synthesized = (1<<0),
/**
* The property is dynamic (i.e. the implementation is inherited or
* provided at run time).
*/
OBJC_PR_dynamic = (1<<1),
/**
* This property belongs to a protocol.
*/
OBJC_PR_protocol = OBJC_PR_synthesized | OBJC_PR_dynamic,
/**
* The property is atomic.
*/
OBJC_PR_atomic = (1<<2),
/**
* The property value is a zeroing weak reference.
*/
OBJC_PR_weak = (1<<3),
/**
* The property value is strong (retained). Currently, this is equivalent
* to the strong attribute.
*/
OBJC_PR_strong = (1<<4),
/**
* The property value is just copied.
*/
OBJC_PR_unsafe_unretained = (1<<5),
};
/**
* Structure used for property enumeration. Note that property enumeration is
* currently quite broken on OS X, so achieving full compatibility there is
* impossible. Instead, we strive to achieve compatibility with the
* documentation.
*/
// begin: objc_property
struct objc_property
{
/**
* Name of this property.
*/
const char *name;
/**
* The type encoding of the property.
*/
const char *attributes;
/**
* The type encoding of the property.
*/
const char *type;
/**
* The selector for the getter for this property.
*/
SEL getter;
/**
* The selector for the setter for this property.
*/
SEL setter;
};
// end: objc_property
/**
* GNUstep v1 ABI version of `struct objc_property`
*/
struct objc_property_gsv1
{
/**
* Name of this property.
*/
const char *name;
/**
* Attributes for this property. Made by ORing together
* PropertyAttributeKinds.
*/
char attributes;
/**
* Flag set if the property is synthesized.
*/
char attributes2;
/**
* Padding field. These were implicit in the structure field alignment
* (four more on 64-bit platforms), but we'll make them explicit now for
* future use.
*/
char unused1;
/**
* More padding.
*/
char unused2;
/**
* Name of the getter for this property.
*/
const char *getter_name;
/**
* Type encoding for the get method for this property.
*/
const char *getter_types;
/**
* Name of the set method for this property.
*/
const char *setter_name;
/**
* Type encoding of the setter for this property.
*/
const char *setter_types;
};
/**
* List of property introspection data.
*/
struct objc_property_list_gsv1
{
/**
* Number of properties in this array.
*/
int count;
/*
* The next property in a linked list.
*/
struct objc_property_list *next;
/**
* List of properties.
*/
struct objc_property_gsv1 properties[];
};
/**
* List of property introspection data.
*/
// begin: objc_property_list
struct objc_property_list
{
/**
* Number of properties in this array.
*/
int count;
/**
* Size of `struct objc_property`. This allows the runtime to
* transparently support newer ABIs with more fields in the property
* metadata.
*/
int size;
/*
* The next property in a linked list.
*/
struct objc_property_list *next;
/**
* List of properties.
*/
struct objc_property properties[];
};
// end: objc_property_list
/**
* Returns a pointer to the property inside the `objc_property` structure.
* This structure is designed to allow the compiler to add other fields without
* breaking the ABI, so although the `properties` field appears to be an array
* of `objc_property` structures, it may be an array of some future version of
* `objc_property` structs, which have fields appended that this version of the
* runtime does not know about.
*/
static inline struct objc_property *property_at_index(struct objc_property_list *l, int i)
{
assert(l->size >= sizeof(struct objc_property));
return (struct objc_property*)(((char*)l->properties) + (i * l->size));
}
/**
* Constructs a property description from a list of attributes, returning the
* instance variable name via the third parameter.
*/
PRIVATE struct objc_property propertyFromAttrs(const objc_property_attribute_t *attributes,
unsigned int attributeCount,
const char *name);
/**
* Constructs and installs a property attribute string from the property
* attributes and, optionally, an ivar string.
*/
PRIVATE const char *constructPropertyAttributes(objc_property_t property,
const char *iVarName);

@ -0,0 +1,243 @@
#ifndef PROTOCOL_H_INCLUDED
#define PROTOCOL_H_INCLUDED
#include "selector.h"
#include <stdlib.h>
#include <assert.h>
struct objc_protocol_method_description_list_gcc
{
/**
* Number of method descriptions in this list.
*/
int count;
/**
* Methods in this list. Note: these selectors are NOT resolved. The name
* field points to the name, not to the index of the uniqued version of the
* name. You must not use them for dispatch.
*/
struct objc_selector methods[];
};
/**
* Protocol versions. The compiler generates these in the `isa` field for
* protocols and they are replaced with class pointers by the runtime.
*/
enum protocol_version
{
/**
* Legacy (GCC-compatible) protocol version.
*/
protocol_version_gcc = 2,
/**
* GNUstep V1 ABI protocol.
*/
protocol_version_gsv1 = 3,
/**
* GNUstep V2 ABI protocol.
*/
protocol_version_gsv2 = 4
};
/**
* A description of a method in a protocol.
*/
struct objc_protocol_method_description
{
/**
* The selector for this method, includes traditional type encoding.
*/
SEL selector;
/**
* The extended type encoding.
*/
const char *types;
};
struct objc_protocol_method_description_list
{
/**
* Number of method descriptions in this list.
*/
int count;
/**
* Size of `struct objc_method_description`
*/
int size;
/**
* Methods in this list. `count` elements long.
*/
struct objc_protocol_method_description methods[];
};
/**
* Returns a pointer to the method inside the method description list
* structure. This structure is designed to allow the compiler to add other
* fields without breaking the ABI, so although the `methods` field appears to
* be an array of `objc_protocol_method_description` structures, it may be an
* array of some future version of these structs, which have fields appended
* that this version of the runtime does not know about.
*/
static inline struct objc_protocol_method_description *
protocol_method_at_index(struct objc_protocol_method_description_list *l, int i)
{
assert(l->size >= sizeof(struct objc_protocol_method_description));
return (struct objc_protocol_method_description*)(((char*)l->methods) + (i * l->size));
}
struct objc_protocol
{
/**
* Class pointer.
*/
id isa;
/**
* Protocol name.
*/
char *name;
/**
* Protocols that this protocol conforms to.
*/
struct objc_protocol_list *protocol_list;
/**
* Required instance methods that classes conforming to this protocol must
* implement.
*/
struct objc_protocol_method_description_list *instance_methods;
/**
* Required class methods that classes conforming to this protocol must
* implement.
*/
struct objc_protocol_method_description_list *class_methods;
/**
* Instance methods that are declared as optional for this protocol.
*/
struct objc_protocol_method_description_list *optional_instance_methods;
/**
* Class methods that are declared as optional for this protocol.
*/
struct objc_protocol_method_description_list *optional_class_methods;
/**
* Properties that are required by this protocol.
*/
struct objc_property_list *properties;
/**
* Optional properties.
*/
struct objc_property_list *optional_properties;
/**
* Class properties that are required by this protocol.
*/
struct objc_property_list *class_properties;
/**
* Optional class properties.
*/
struct objc_property_list *optional_class_properties;
};
struct objc_protocol_gcc
{
/** Class pointer. */
id isa;
/**
* The name of this protocol. Two protocols are regarded as identical if
* they have the same name.
*/
char *name;
/**
* The list of protocols that this protocol conforms to.
*/
struct objc_protocol_list *protocol_list;
/**
* List of instance methods required by this protocol.
*/
struct objc_protocol_method_description_list_gcc *instance_methods;
/**
* List of class methods required by this protocol.
*/
struct objc_protocol_method_description_list_gcc *class_methods;
};
struct objc_protocol_gsv1
{
/**
* The first five ivars are shared with `objc_protocol_gcc`.
*/
id isa;
char *name;
struct objc_protocol_list *protocol_list;
struct objc_protocol_method_description_list_gcc *instance_methods;
struct objc_protocol_method_description_list_gcc *class_methods;
/**
* Instance methods that are declared as optional for this protocol.
*/
struct objc_protocol_method_description_list_gcc *optional_instance_methods;
/**
* Class methods that are declared as optional for this protocol.
*/
struct objc_protocol_method_description_list_gcc *optional_class_methods;
/**
* Properties that are required by this protocol.
*/
struct objc_property_list_gsv1 *properties;
/**
* Optional properties.
*/
struct objc_property_list_gsv1 *optional_properties;
};
#ifdef __OBJC__
@interface Object { id isa; } @end
/**
* Definition of the Protocol type. Protocols are objects, but are rarely used
* as such.
*/
@interface Protocol : Object
@end
@interface ProtocolGCC : Protocol
@end
@interface ProtocolGSv1 : Protocol
@end
#endif
/**
* List of protocols. Attached to a class or a category by the compiler and to
* a class by the runtime.
*/
// begin: objc_protocol_list
struct objc_protocol_list
{
/**
* Additional protocol lists. Loading a category that declares protocols
* will cause a new list to be prepended using this pointer to the protocol
* list for the class. Unlike methods, protocols can not be overridden,
* although it is possible for a protocol to appear twice.
*/
struct objc_protocol_list *next;
/**
* The number of protocols in this list.
*/
size_t count;
/**
* An array of protocols. Contains `count` elements.
*
* On load, this contains direct references to other protocols and should
* be updated to point to the canonical (possibly upgraded) version.
*/
struct objc_protocol *list[];
};
// end: objc_protocol_list
/**
* Function that ensures that protocol classes are linked. Calling this
* guarantees that the Protocol classes are linked into a statically linked
* runtime.
*/
void link_protocol_classes(void);
#endif // PROTOCOL_H_INCLUDED

@ -0,0 +1,20 @@
#ifndef __LIBOBJC_SAFEWINDOWS_H_INCLUDED__
#define __LIBOBJC_SAFEWINDOWS_H_INCLUDED__
#pragma push_macro("BOOL")
#ifdef BOOL
#undef BOOL
#endif
#define BOOL _WINBOOL
#include <windows.h>
// Windows.h defines interface -> struct
#ifdef interface
#undef interface
#endif
#pragma pop_macro("BOOL")
#endif // __LIBOBJC_SAFEWINDOWS_H_INCLUDED__

@ -0,0 +1,154 @@
/**
* Sparse Array
*
* Author: David Chisnall
*
* License: See COPYING.MIT
*
*/
#ifndef _SARRAY_H_INCLUDED_
#define _SARRAY_H_INCLUDED_
#include <stdint.h>
#include <stdlib.h>
#include "visibility.h"
/**
* The size of the data array. The sparse array is a tree with this many
* children at each node depth.
*/
static const uint32_t data_size = 256;
/**
* The mask used to access the elements in the data array in a sparse array
* node.
*/
static const uint32_t data_mask = data_size - 1;
/**
* Sparse arrays, used to implement dispatch tables. Current implementation is
* quite RAM-intensive and could be optimised. Maps 32-bit integers to pointers.
*
* Note that deletion from the array is not supported. This allows accesses to
* be done without locking; the worst that can happen is that the caller gets
* an old value (and if this is important to you then you should be doing your
* own locking). For this reason, you should be very careful when deleting a
* sparse array that there are no references to it held by other threads.
*/
typedef struct
{
/**
* Number of bits that the masked value should be right shifted by to get
* the index in the subarray. If this value is greater than zero, then the
* value in the array is another SparseArray*.
*/
uint32_t shift;
/**
* The reference count for this. Used for copy-on-write. When making a
* copy of a sparse array, we only copy the root node, and increment the
* reference count of the remaining nodes. When modifying any leaf node,
* we copy if its reference count is greater than one.
*/
uint32_t refCount;
/**
* The data stored in this sparse array node.
*/
#ifdef __clang__
void *data[data_size];
#else
void *data[256];
#endif
} SparseArray;
/**
* Turn an index in the array into an index in the current depth.
*/
#define MASK_INDEX(index) \
((index >> sarray->shift) & 0xff)
#define SARRAY_EMPTY ((void*)0)
/**
* Look up the specified value in the sparse array. This is used in message
* dispatch and so has been put in the header to allow compilers to inline it,
* even though this breaks the abstraction.
*/
static inline void* SparseArrayLookup(SparseArray * sarray, uint32_t index)
{
// This unrolled version of the commented-out segment below only works with
// sarrays that use one-byte leafs. It's really ugly, but seems to be faster.
// With this version, we get the same performance as the old GNU code, but
// with about half the memory usage.
uint32_t i = index;
switch (sarray->shift)
{
default: UNREACHABLE("broken sarray");
case 0:
return sarray->data[i & 0xff];
case 8:
return
((SparseArray*)sarray->data[(i & 0xff00)>>8])->data[(i & 0xff)];
case 16:
return
((SparseArray*)((SparseArray*)
sarray->data[(i & 0xff0000)>>16])->
data[(i & 0xff00)>>8])->data[(i & 0xff)];
case 24:
return
((SparseArray*)((SparseArray*)((SparseArray*)
sarray->data[(i & 0xff000000)>>24])->
data[(i & 0xff0000)>>16])->
data[(i & 0xff00)>>8])->data[(i & 0xff)];
}
/*
while(sarray->shift > 0)
{
uint32_t i = MASK_INDEX(index);
sarray = (SparseArray*) sarray->data[i];
}
uint32_t i = index & sarray->mask;
return sarray->data[i];
*/
}
/**
* Create a new sparse array.
*/
SparseArray *SparseArrayNew();
/**
* Creates a new sparse array with the specified capacity. The depth indicates
* the number of bits to use for the key. Must be a value between 8 and 32 and
* should ideally be a multiple of base_shift.
*/
SparseArray *SparseArrayNewWithDepth(uint32_t depth);
/**
* Returns a new sparse array created by adding this one as the first child
* node in an expanded one.
*/
SparseArray *SparseArrayExpandingArray(SparseArray *sarray, uint32_t new_depth);
/**
* Insert a value at the specified index.
*/
void SparseArrayInsert(SparseArray * sarray, uint32_t index, void * value);
/**
* Destroy the sparse array. Note that calling this while other threads are
* performing lookups is guaranteed to break.
*/
void SparseArrayDestroy(SparseArray * sarray);
/**
* Iterate through the array. Returns the next non-NULL value after index and
* sets index to the following value. For example, an array containing values
* at 0 and 10 will, if called with index set to 0 first return the value at 0
* and set index to 1. A subsequent call with index set to 1 will return the
* value at 10 and set index to 11.
*/
void * SparseArrayNext(SparseArray * sarray, uint32_t * index);
/**
* Creates a copy of the sparse array.
*/
SparseArray *SparseArrayCopy(SparseArray * sarray);
/**
* Returns the total memory usage of a sparse array.
*/
int SparseArraySize(SparseArray *sarray);
#endif //_SARRAY_H_INCLUDED_

@ -0,0 +1,72 @@
#ifndef OBJC_SELECTOR_H_INCLUDED
#define OBJC_SELECTOR_H_INCLUDED
/**
* Structure used to store selectors in the list.
*/
// begin: objc_selector
struct objc_selector
{
union
{
/**
* The name of this selector. Used for unregistered selectors.
*/
const char *name;
/**
* The index of this selector in the selector table. When a selector
* is registered with the runtime, its name is replaced by an index
* uniquely identifying this selector. The index is used for dispatch.
*/
uintptr_t index;
};
/**
* The Objective-C type encoding of the message identified by this selector.
*/
const char * types;
};
// end: objc_selector
/**
* Returns the untyped variant of a selector.
*/
__attribute__((unused))
static uint32_t get_untyped_idx(SEL aSel)
{
SEL untyped = sel_registerTypedName_np(sel_getName(aSel), 0);
return untyped->index;
}
__attribute__((unused))
static SEL sel_getUntyped(SEL aSel)
{
return sel_registerTypedName_np(sel_getName(aSel), 0);
}
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Registers the selector. This selector may be returned later, so it must not
* be freed.
*/
SEL objc_register_selector(SEL aSel);
#ifdef __cplusplus
}
#endif
/**
* SELECTOR() macro to work around the fact that GCC hard-codes the type of
* selectors. This is functionally equivalent to @selector(), but it ensures
* that the selector has the type that the runtime uses for selectors.
*/
#ifdef __clang__
#define SELECTOR(x) @selector(x)
#else
#define SELECTOR(x) (SEL)@selector(x)
#endif
#endif // OBJC_SELECTOR_H_INCLUDED

@ -0,0 +1,81 @@
#ifdef _WIN32
#include "safewindows.h"
static unsigned sleep(unsigned seconds)
{
Sleep(seconds*1000);
return 0;
}
#else
#include <unistd.h>
#endif
/**
* Number of spinlocks. This allocates one page on 32-bit platforms.
*/
#define spinlock_count (1<<10)
static const int spinlock_mask = spinlock_count - 1;
/**
* Integers used as spinlocks for atomic property access.
*/
extern int spinlocks[spinlock_count];
/**
* Get a spin lock from a pointer. We want to prevent lock contention between
* properties in the same object - if someone is stupid enough to be using
* atomic property access, they are probably stupid enough to do it for
* multiple properties in the same object. We also want to try to avoid
* contention between the same property in different objects, so we can't just
* use the ivar offset.
*/
static inline volatile int *lock_for_pointer(const void *ptr)
{
intptr_t hash = (intptr_t)ptr;
// Most properties will be pointers, so disregard the lowest few bits
hash >>= sizeof(void*) == 4 ? 2 : 8;
intptr_t low = hash & spinlock_mask;
hash >>= 16;
hash |= low;
return spinlocks + (hash & spinlock_mask);
}
/**
* Unlocks the spinlock. This is not an atomic operation. We are only ever
* modifying the lowest bit of the spinlock word, so it doesn't matter if this
* is two writes because there is no contention among the high bit. There is
* no possibility of contention among calls to this, because it may only be
* called by the thread owning the spin lock.
*/
inline static void unlock_spinlock(volatile int *spinlock)
{
__sync_synchronize();
*spinlock = 0;
}
/**
* Attempts to lock a spinlock. This is heavily optimised for the uncontended
* case, because property access should (generally) not be contended. In the
* uncontended case, this is a single atomic compare and swap instruction and a
* branch. Atomic CAS is relatively expensive (can be a pipeline flush, and
* may require locking a cache line in a cache-coherent SMP system, but it's a
* lot cheaper than a system call).
*
* If the lock is contended, then we just sleep and then try again after the
* other threads have run. Note that there is no upper bound on the potential
* running time of this function, which is one of the great many reasons that
* using atomic accessors is a terrible idea, but in the common case it should
* be very fast.
*/
inline static void lock_spinlock(volatile int *spinlock)
{
int count = 0;
// Set the spin lock value to 1 if it is 0.
while(!__sync_bool_compare_and_swap(spinlock, 0, 1))
{
count++;
if (0 == count % 10)
{
// If it is already 1, let another thread play with the CPU for a
// bit then try again.
sleep(0);
}
}
}

@ -0,0 +1,34 @@
#include <string.h>
#include <stdint.h>
/**
* Efficient string hash function.
*/
__attribute__((unused))
static uint32_t string_hash(const char *str)
{
uint32_t hash = 0;
int32_t c;
while ((c = *str++))
{
hash = c + (hash << 6) + (hash << 16) - hash;
}
return hash;
}
/**
* Test two strings for equality.
*/
__attribute__((unused))
static int string_compare(const char *str1, const char *str2)
{
if (str1 == str2)
{
return 1;
}
if (str1 == NULL || str2 == NULL)
{
return 0;
}
return strcmp(str1, str2) == 0;
}

@ -0,0 +1,36 @@
/**
* type_encoding_cases.h - expects the APPLY_TYPE macro to be defined. This
* macro is invoked once for every type and its Objective-C name. Use this
* file when implementing things like the -unsignedIntValue family of methods.
* For this case, the macro will be invoked with unsigned int as the type and
* unsignedInt as the name.
*/
#ifndef APPLY_TYPE
#error Define APPLY_TYPE(type, name, capitalizedName, encodingChar) before including this file
#endif
APPLY_TYPE(long double, long double, LongDouble, 'D')
APPLY_TYPE(double, double, Double, 'd')
APPLY_TYPE(float, float, Float, 'f')
APPLY_TYPE(signed char, char, Char, 'c')
APPLY_TYPE(int, int, Int, 'i')
APPLY_TYPE(short, short, Short, 's')
APPLY_TYPE(long, long, Long, 'l')
APPLY_TYPE(long long, longLong, LongLong, 'q')
//APPLY_TYPE(__int128, int128, Int128, 't')
APPLY_TYPE(unsigned char, unsignedChar, UnsignedChar, 'C')
APPLY_TYPE(unsigned short, unsignedShort, UnsignedShort, 'S')
APPLY_TYPE(unsigned int, unsignedInt, UnsignedInt, 'I')
APPLY_TYPE(unsigned long, unsignedLong, UnsignedLong, 'L')
APPLY_TYPE(unsigned long long, unsignedLongLong, UnsignedLongLong, 'Q')
//APPLY_TYPE(unsigned __int128, unsignedInt128, UnsignedInt128, 'T')
#ifdef NON_INTEGER_TYPES
#undef NON_INTEGER_TYPES
APPLY_TYPE(_Bool, bool, Bool, 'B')
#ifndef SKIP_ID
APPLY_TYPE(id, object, Object, '@')
#endif
APPLY_TYPE(Class, class, Class, '#')
APPLY_TYPE(SEL, selector, Selector, ':')
APPLY_TYPE(char*, cString, CString, '*')
#endif
#undef APPLY_TYPE

@ -0,0 +1,217 @@
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* ARM-specific unwind definitions. These are taken from the ARM EHABI
* specification.
*/
typedef enum
{
_URC_NO_REASON = 0,
_URC_OK = 0, /* operation completed successfully */
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_END_OF_STACK = 5,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8,
_URC_FAILURE = 9, /* unspecified failure of some kind */
_URC_FATAL_PHASE1_ERROR = _URC_FAILURE
} _Unwind_Reason_Code;
typedef uint32_t _Unwind_State;
#ifdef __clang__
static const _Unwind_State _US_VIRTUAL_UNWIND_FRAME = 0;
static const _Unwind_State _US_UNWIND_FRAME_STARTING = 1;
static const _Unwind_State _US_UNWIND_FRAME_RESUME = 2;
static const _Unwind_State _US_FORCE_UNWIND = 8;
#else // GCC fails at knowing what a constant expression is
# define _US_VIRTUAL_UNWIND_FRAME 0
# define _US_UNWIND_FRAME_STARTING 1
# define _US_UNWIND_FRAME_RESUME 2
# define _US_FORCE_UNWIND 8
#endif
typedef int _Unwind_Action;
typedef struct _Unwind_Context _Unwind_Context;
typedef uint32_t _Unwind_EHT_Header;
struct _Unwind_Exception
{
uint64_t exception_class;
void (*exception_cleanup)(_Unwind_Reason_Code, struct _Unwind_Exception *);
/* Unwinder cache, private fields for the unwinder's use */
struct
{
uint32_t reserved1;
uint32_t reserved2;
uint32_t reserved3;
uint32_t reserved4;
uint32_t reserved5;
/* init reserved1 to 0, then don't touch */
} unwinder_cache;
/* Propagation barrier cache (valid after phase 1): */
struct
{
uint32_t sp;
uint32_t bitpattern[5];
} barrier_cache;
/* Cleanup cache (preserved over cleanup): */
struct
{
uint32_t bitpattern[4];
} cleanup_cache;
/* Pr cache (for pr's benefit): */
struct
{
/** function start address */
uint32_t fnstart;
/** pointer to EHT entry header word */
_Unwind_EHT_Header *ehtp;
/** additional data */
uint32_t additional;
uint32_t reserved1;
} pr_cache;
/** Force alignment of next item to 8-byte boundary */
long long int :0;
};
/* Unwinding functions */
_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *ucbp);
void _Unwind_Resume(struct _Unwind_Exception *ucbp);
_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
void _Unwind_Complete(struct _Unwind_Exception *ucbp);
void _Unwind_DeleteException(struct _Unwind_Exception *ucbp);
void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context*);
typedef enum
{
_UVRSR_OK = 0,
_UVRSR_NOT_IMPLEMENTED = 1,
_UVRSR_FAILED = 2
} _Unwind_VRS_Result;
typedef enum
{
_UVRSC_CORE = 0,
_UVRSC_VFP = 1,
_UVRSC_WMMXD = 3,
_UVRSC_WMMXC = 4
} _Unwind_VRS_RegClass;
typedef enum
{
_UVRSD_UINT32 = 0,
_UVRSD_VFPX = 1,
_UVRSD_UINT64 = 3,
_UVRSD_FLOAT = 4,
_UVRSD_DOUBLE = 5
} _Unwind_VRS_DataRepresentation;
_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
uint32_t regno,
_Unwind_VRS_DataRepresentation representation,
void *valuep);
_Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context,
_Unwind_VRS_RegClass regclass,
uint32_t regno,
_Unwind_VRS_DataRepresentation representation,
void *valuep);
/* Return the base-address for data references. */
extern unsigned long _Unwind_GetDataRelBase(struct _Unwind_Context *);
/* Return the base-address for text references. */
extern unsigned long _Unwind_GetTextRelBase(struct _Unwind_Context *);
extern unsigned long _Unwind_GetRegionStart(struct _Unwind_Context *);
/**
* The next set of functions are compatibility extensions, implementing Itanium
* ABI functions on top of ARM ones.
*/
#define _UA_SEARCH_PHASE 1
#define _UA_CLEANUP_PHASE 2
#define _UA_HANDLER_FRAME 4
#define _UA_FORCE_UNWIND 8
static inline unsigned long _Unwind_GetGR(struct _Unwind_Context *context, int reg)
{
unsigned long val;
_Unwind_VRS_Get(context, _UVRSC_CORE, reg, _UVRSD_UINT32, &val);
return val;
}
static inline void _Unwind_SetGR(struct _Unwind_Context *context, int reg, unsigned long val)
{
_Unwind_VRS_Set(context, _UVRSC_CORE, reg, _UVRSD_UINT32, &val);
}
static inline unsigned long _Unwind_GetIP(_Unwind_Context *context)
{
// Low bit store the thumb state - discard it
return _Unwind_GetGR(context, 15) & ~1;
}
static inline void _Unwind_SetIP(_Unwind_Context *context, unsigned long val)
{
// The lowest bit of the instruction pointer indicates whether we're in
// thumb or ARM mode. This is assumed to be fixed throughout a function,
// so must be propagated when setting the program counter.
unsigned long thumbState = _Unwind_GetGR(context, 15) & 1;
_Unwind_SetGR(context, 15, (val | thumbState));
}
/** GNU API function that unwinds the frame */
_Unwind_Reason_Code __gnu_unwind_frame(struct _Unwind_Exception*, struct _Unwind_Context*);
#define DECLARE_PERSONALITY_FUNCTION(name) \
_Unwind_Reason_Code name(_Unwind_State state,\
struct _Unwind_Exception *exceptionObject,\
struct _Unwind_Context *context);
#define BEGIN_PERSONALITY_FUNCTION(name) \
_Unwind_Reason_Code name(_Unwind_State state,\
struct _Unwind_Exception *exceptionObject,\
struct _Unwind_Context *context)\
{\
int version = 1;\
uint64_t exceptionClass = exceptionObject->exception_class;\
int actions;\
switch (state & ~_US_FORCE_UNWIND)\
{\
default: return _URC_FAILURE;\
case _US_VIRTUAL_UNWIND_FRAME:\
{\
actions = _UA_SEARCH_PHASE;\
break;\
}\
case _US_UNWIND_FRAME_STARTING:\
{\
actions = _UA_CLEANUP_PHASE;\
if (exceptionObject->barrier_cache.sp == _Unwind_GetGR(context, 13))\
{\
actions |= _UA_HANDLER_FRAME;\
}\
break;\
}\
case _US_UNWIND_FRAME_RESUME:\
{\
return continueUnwinding(exceptionObject, context);\
break;\
}\
}\
_Unwind_SetGR (context, 12, (unsigned long)exceptionObject);
#define CALL_PERSONALITY_FUNCTION(name) name(state,exceptionObject,context)
#define COPY_EXCEPTION(dst, src) \
(dst)->unwinder_cache = (src)->unwinder_cache; \
(dst)->barrier_cache = (src)->barrier_cache; \
(dst)->cleanup_cache = (src)->cleanup_cache; \
(dst)->pr_cache = (src)->pr_cache;
#ifdef __cplusplus
}
#endif

@ -0,0 +1,188 @@
/* libunwind - a platform-independent unwind library
Copyright (C) 2003 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _UNWIND_H
#define _UNWIND_H
/* For uint64_t */
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Minimal interface as per C++ ABI draft standard:
http://www.codesourcery.com/cxx-abi/abi-eh.html */
typedef enum
{
_URC_NO_REASON = 0,
_URC_OK = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_FATAL_PHASE2_ERROR = 2,
_URC_FATAL_PHASE1_ERROR = 3,
_URC_NORMAL_STOP = 4,
_URC_END_OF_STACK = 5,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8
}
_Unwind_Reason_Code;
typedef int _Unwind_Action;
#define _UA_SEARCH_PHASE 1
#define _UA_CLEANUP_PHASE 2
#define _UA_HANDLER_FRAME 4
#define _UA_FORCE_UNWIND 8
struct _Unwind_Context; /* opaque data-structure */
struct _Unwind_Exception; /* forward-declaration */
typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code,
struct _Unwind_Exception *);
typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) (int, _Unwind_Action,
uint64_t,
struct _Unwind_Exception *,
struct _Unwind_Context *,
void *);
/* The C++ ABI requires exception_class, private_1, and private_2 to
be of type uint64 and the entire structure to be
double-word-aligned. Please note that exception_class stays 64-bit
even on 32-bit machines for gcc compatibility. */
struct _Unwind_Exception
{
uint64_t exception_class;
_Unwind_Exception_Cleanup_Fn exception_cleanup;
#ifdef __SEH__
uintptr_t private_[6];
#else
uintptr_t private_1;
uintptr_t private_2;
#endif
} __attribute__((__aligned__));
extern _Unwind_Reason_Code _Unwind_RaiseException (struct _Unwind_Exception *);
extern _Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *,
_Unwind_Stop_Fn, void *);
extern void _Unwind_Resume (struct _Unwind_Exception *);
extern void _Unwind_DeleteException (struct _Unwind_Exception *);
extern uintptr_t _Unwind_GetGR (struct _Unwind_Context *, int);
extern void _Unwind_SetGR (struct _Unwind_Context *, int, uintptr_t);
extern uintptr_t _Unwind_GetIP (struct _Unwind_Context *);
extern uintptr_t _Unwind_GetIPInfo (struct _Unwind_Context *, int *);
extern void _Unwind_SetIP (struct _Unwind_Context *, uintptr_t);
extern uintptr_t _Unwind_GetLanguageSpecificData (struct _Unwind_Context*);
extern uintptr_t _Unwind_GetRegionStart (struct _Unwind_Context *);
#ifdef _GNU_SOURCE
/* Callback for _Unwind_Backtrace(). The backtrace stops immediately
if the callback returns any value other than _URC_NO_REASON. */
typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (struct _Unwind_Context *,
void *);
/* See http://gcc.gnu.org/ml/gcc-patches/2001-09/msg00082.html for why
_UA_END_OF_STACK exists. */
# define _UA_END_OF_STACK 16
/* If the unwind was initiated due to a forced unwind, resume that
operation, else re-raise the exception. This is used by
__cxa_rethrow(). */
extern _Unwind_Reason_Code
_Unwind_Resume_or_Rethrow (struct _Unwind_Exception *);
/* See http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00154.html for why
_Unwind_GetBSP() exists. */
extern uintptr_t _Unwind_GetBSP (struct _Unwind_Context *);
/* Return the "canonical frame address" for the given context.
This is used by NPTL... */
extern uintptr_t _Unwind_GetCFA (struct _Unwind_Context *);
/* Return the base-address for data references. */
extern uintptr_t _Unwind_GetDataRelBase (struct _Unwind_Context *);
/* Return the base-address for text references. */
extern uintptr_t _Unwind_GetTextRelBase (struct _Unwind_Context *);
/* Call _Unwind_Trace_Fn once for each stack-frame, without doing any
cleanup. The first frame for which the callback is invoked is the
one for the caller of _Unwind_Backtrace(). _Unwind_Backtrace()
returns _URC_END_OF_STACK when the backtrace stopped due to
reaching the end of the call-chain or _URC_FATAL_PHASE1_ERROR if it
stops for any other reason. */
extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *);
/* Find the start-address of the procedure containing the specified IP
or NULL if it cannot be found (e.g., because the function has no
unwind info). Note: there is not necessarily a one-to-one
correspondence between source-level functions and procedures: some
functions don't have unwind-info and others are split into multiple
procedures. */
extern void *_Unwind_FindEnclosingFunction (void *);
/* See also Linux Standard Base Spec:
http://www.linuxbase.org/spec/refspecs/LSB_1.3.0/gLSB/gLSB/libgcc-s.html */
#endif /* _GNU_SOURCE */
#define DECLARE_PERSONALITY_FUNCTION(name) \
_Unwind_Reason_Code name(int version,\
_Unwind_Action actions,\
uint64_t exceptionClass,\
struct _Unwind_Exception *exceptionObject,\
struct _Unwind_Context *context);
#define BEGIN_PERSONALITY_FUNCTION(name) \
_Unwind_Reason_Code name(int version,\
_Unwind_Action actions,\
uint64_t exceptionClass,\
struct _Unwind_Exception *exceptionObject,\
struct _Unwind_Context *context)\
{
#define CALL_PERSONALITY_FUNCTION(name) name(version, actions, exceptionClass, exceptionObject, context)
#ifdef __SEH__
#define COPY_EXCEPTION(dst, src) \
do { \
memcpy((dst)->private_, (src)->private_, sizeof((src)->private_)); \
} while(0)
#else
#define COPY_EXCEPTION(dst, src) \
do { \
(dst)->private_1 = (src)->private_1; \
(dst)->private_2 = (src)->private_2; \
} while(0)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _UNWIND_H */

@ -0,0 +1,5 @@
#ifdef __arm__
#include "unwind-arm.h"
#else
#include "unwind-itanium.h"
#endif

@ -0,0 +1,24 @@
#include "objc/objc-visibility.h"
#if defined _WIN32 || defined __CYGWIN__ || __MINGW32__ || __MINGW64__
# define PRIVATE
#else
# define PRIVATE __attribute__ ((visibility("hidden")))
#endif
#ifdef NO_LEGACY
# define LEGACY PRIVATE
#else
# define LEGACY OBJC_PUBLIC
#endif
#if defined(DEBUG) || (!defined(__clang__))
# include <assert.h>
# define UNREACHABLE(x) assert(0 && x)
# define ASSERT(x) assert(x)
#else
# define UNREACHABLE(x) __builtin_unreachable()
# define ASSERT(x) do { if (!(x)) __builtin_unreachable(); } while(0)
#endif
#define LIKELY(x) __builtin_expect(x, 1)
#define UNLIKELY(x) __builtin_expect(x, 0)

@ -1,2 +1,4 @@
#include <objc/runtime.h>
#include <objc/message.h>

@ -595,6 +595,7 @@ void objc_disposeClassPair(Class cls);
* loaded, it calls the _objc_lookup_class() callback to allow an external
* library to load the module providing this class.
*/
OBJC_PUBLIC
id objc_getClass(const char *name);

@ -33,8 +33,10 @@ struct objc_slot2
*/
#if defined(__powerpc__) && !defined(__powerpc64__)
#else
#if defined(__clang__)
OBJC_PUBLIC extern _Atomic(uint64_t) objc_method_cache_version;
#endif
#endif
/**
* Legacy cache structure. This is no longer maintained in the runtime and is

@ -0,0 +1,79 @@
#include "objc/runtime.h"
#include "class.h"
#include "loader.h"
#include "lock.h"
#include "objc/blocks_runtime.h"
#include "dtable.h"
#include <assert.h>
#ifdef EMBEDDED_BLOCKS_RUNTIME
#define BLOCK_STORAGE OBJC_PUBLIC
#else
#define BLOCK_STORAGE extern
#endif
BLOCK_STORAGE struct objc_class _NSConcreteGlobalBlock;
BLOCK_STORAGE struct objc_class _NSConcreteStackBlock;
BLOCK_STORAGE struct objc_class _NSConcreteMallocBlock;
BLOCK_STORAGE struct objc_class _NSConcreteAutoBlock;
BLOCK_STORAGE struct objc_class _NSConcreteFinalizingBlock;
static struct objc_class _NSConcreteGlobalBlockMeta;
static struct objc_class _NSConcreteStackBlockMeta;
static struct objc_class _NSConcreteMallocBlockMeta;
static struct objc_class _NSConcreteAutoBlockMeta;
static struct objc_class _NSConcreteFinalizingBlockMeta;
static struct objc_class _NSBlock;
static struct objc_class _NSBlockMeta;
static void createNSBlockSubclass(Class superclass, Class newClass,
Class metaClass, char *name)
{
// Initialize the metaclass
//metaClass->class_pointer = superclass->class_pointer;
//metaClass->super_class = superclass->class_pointer;
metaClass->info = objc_class_flag_meta;
metaClass->dtable = uninstalled_dtable;
// Set up the new class
newClass->isa = metaClass;
newClass->super_class = superclass;
newClass->name = name;
newClass->dtable = uninstalled_dtable;
newClass->info = objc_class_flag_is_block;
LOCK_RUNTIME_FOR_SCOPE();
objc_load_class(newClass);
}
#define NEW_CLASS(super, sub) \
createNSBlockSubclass(super, &sub, &sub ## Meta, #sub)
OBJC_PUBLIC
BOOL objc_create_block_classes_as_subclasses_of(Class super)
{
if (_NSBlock.super_class != NULL) { return NO; }
NEW_CLASS(super, _NSBlock);
NEW_CLASS(&_NSBlock, _NSConcreteStackBlock);
NEW_CLASS(&_NSBlock, _NSConcreteGlobalBlock);
NEW_CLASS(&_NSBlock, _NSConcreteMallocBlock);
NEW_CLASS(&_NSBlock, _NSConcreteAutoBlock);
NEW_CLASS(&_NSBlock, _NSConcreteFinalizingBlock);
// Global blocks never need refcount manipulation.
objc_set_class_flag(&_NSConcreteGlobalBlock,
objc_class_flag_permanent_instances);
return YES;
}
PRIVATE void init_early_blocks(void)
{
if (_NSBlock.super_class != NULL) { return; }
_NSConcreteStackBlock.info = objc_class_flag_is_block;
_NSConcreteGlobalBlock.info = objc_class_flag_is_block | objc_class_flag_permanent_instances;
_NSConcreteMallocBlock.info = objc_class_flag_is_block;
_NSConcreteAutoBlock.info = objc_class_flag_is_block;
_NSConcreteFinalizingBlock.info = objc_class_flag_is_block;
}

@ -0,0 +1,45 @@
#include "objc/runtime.h"
#include "protocol.h"
#include "class.h"
#include <stdio.h>
#include <string.h>
@implementation Protocol
// FIXME: This needs removing, but it's included for now because GNUstep's
// implementation of +[NSObject conformsToProtocol:] calls it.
- (BOOL)conformsTo: (Protocol*)p
{
return protocol_conformsToProtocol(self, p);
}
- (id)retain
{
return self;
}
- (void)release {}
+ (Class)class { return self; }
- (id)self { return self; }
@end
@interface __IncompleteProtocol : Protocol @end
@implementation __IncompleteProtocol @end
/**
* This class exists for the sole reason that the legacy GNU ABI did not
* provide a way of registering protocols with the runtime. With the new ABI,
* every protocol in a compilation unit that is not referenced should be added
* in a category on this class. This ensures that the runtime sees every
* protocol at least once and can perform uniquing.
*/
@interface __ObjC_Protocol_Holder_Ugly_Hack { id isa; } @end
@implementation __ObjC_Protocol_Holder_Ugly_Hack @end
@implementation Object @end
@implementation ProtocolGCC @end
@implementation ProtocolGSv1 @end
PRIVATE void link_protocol_classes(void)
{
[Protocol class];
[ProtocolGCC class];
[ProtocolGSv1 class];
}

@ -0,0 +1,130 @@
#include "visibility.h"
#include "objc/runtime.h"
#include "module.h"
#include "gc_ops.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
/**
* The smallest ABI version number of loaded modules.
*/
static unsigned long min_loaded_version;
/**
* The largest ABI version number of loaded modules.
*/
static unsigned long max_loaded_version;
/**
* Structure defining the compatibility between Objective-C ABI versions.
*/
struct objc_abi_version
{
/** Version of this ABI. */
unsigned long version;
/** Lowest ABI version that this is compatible with. */
unsigned long min_compatible_version;
/** Highest ABI version compatible with this. */
unsigned long max_compatible_version;
/** Size of the module structure for this ABI version. */
unsigned long module_size;
};
enum
{
gcc_abi = 8,
gnustep_abi = 9,
gc_abi = 10
};
/**
* List of supported ABIs.
*/
static struct objc_abi_version known_abis[] =
{
/* GCC ABI. */
{gcc_abi, gcc_abi, gnustep_abi, sizeof(struct objc_module_abi_8)},
/* Non-fragile ABI. */
{gnustep_abi, gcc_abi, gc_abi, sizeof(struct objc_module_abi_8)},
/* GC ABI. Adds a field describing the GC mode. */
{gc_abi, gcc_abi, gc_abi, sizeof(struct objc_module_abi_10)}
};
static int known_abi_count =
(sizeof(known_abis) / sizeof(struct objc_abi_version));
#define FAIL_IF(x, msg) do {\
if (x)\
{\
fprintf(stderr, "Objective-C ABI Error: %s while loading %s\n", msg, module->name);\
return NO;\
}\
} while(0)
static BOOL endsWith(const char *string, const char *suffix)
{
if (NULL == string) { return NO; }
char *interior = strstr(string, suffix);
return (interior && (strlen(suffix) == strlen(interior)));
}
PRIVATE BOOL objc_check_abi_version(struct objc_module_abi_8 *module)
{
static int runtime_modules = 5;
// As a quick and ugly hack, skip these three tests for the .m files in the
// runtime. They should (in theory, at least) be aware of the GC mode and
// behave accordingly.
if (runtime_modules > 0)
{
if (endsWith(module->name, "properties.m") ||
endsWith(module->name, "associate.m") ||
endsWith(module->name, "arc.m") ||
endsWith(module->name, "blocks_runtime.m") ||
endsWith(module->name, "Protocol2.m"))
{
runtime_modules--;
return YES;
}
}
unsigned long version = module->version;
unsigned long module_size = module->size;
enum objc_gc_mode gc_mode = (version < gc_abi) ? GC_None
: ((struct objc_module_abi_10*)module)->gc_mode;
struct objc_abi_version *v = NULL;
for (int i=0 ; i<known_abi_count ; i++)
{
if (known_abis[i].version == version)
{
v = &known_abis[i];
break;
}
}
FAIL_IF(NULL == v, "Unknown ABI version");
FAIL_IF((v->module_size != module_size), "Incorrect module size");
// Only check for ABI compatibility if
if (min_loaded_version > 0)
{
FAIL_IF((v->min_compatible_version > min_loaded_version),
"Loading modules from incompatible ABIs");
FAIL_IF((v->max_compatible_version < max_loaded_version),
"Loading modules from incompatible ABIs");
if (min_loaded_version > version)
{
min_loaded_version = version;
}
if (max_loaded_version < version)
{
max_loaded_version = version;
}
}
else
{
min_loaded_version = version;
max_loaded_version = version;
}
// We can't mix GC_None and GC_Required code, but we can mix any other
// combination
FAIL_IF((gc_mode == GC_Required), "GC code is no longer supported!");
return YES;
}

@ -0,0 +1,128 @@
/** A hash table for mapping compatibility aliases to classes.
Copyright (c) 2011 Free Software Foundation, Inc.
Written by: Niels Grewe <niels.grewe@halbordnung.de>
Created: March 2011
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "visibility.h"
#include "objc/runtime.h"
#include "class.h"
#include "lock.h"
#include "string_hash.h"
#include <stdlib.h>
struct objc_alias
{
const char* name;
Class class;
};
typedef struct objc_alias Alias;
static int alias_compare(const char *name, const Alias alias)
{
return string_compare(name, alias.name);
}
static int alias_hash(const Alias alias)
{
return string_hash(alias.name);
}
static int alias_is_null(const Alias alias)
{
return alias.name == NULL;
}
static Alias NullAlias;
#define MAP_TABLE_NAME alias_table_internal
#define MAP_TABLE_COMPARE_FUNCTION alias_compare
#define MAP_TABLE_HASH_KEY string_hash
#define MAP_TABLE_HASH_VALUE alias_hash
#define MAP_TABLE_VALUE_TYPE struct objc_alias
#define MAP_TABLE_VALUE_NULL alias_is_null
#define MAP_TABLE_VALUE_PLACEHOLDER NullAlias
#include "hash_table.h"
static alias_table_internal_table *alias_table;
PRIVATE void init_alias_table(void)
{
alias_table_internal_initialize(&alias_table, 128);
}
static Alias alias_table_get_safe(const char *alias_name)
{
return alias_table_internal_table_get(alias_table, alias_name);
}
OBJC_PUBLIC Class alias_getClass(const char *alias_name)
{
if (NULL == alias_name)
{
return NULL;
}
Alias alias = alias_table_get_safe(alias_name);
if (NULL == alias.name)
{
return NULL;
}
return alias.class;
}
PRIVATE void alias_table_insert(Alias alias)
{
alias_table_internal_insert(alias_table, alias);
}
OBJC_PUBLIC BOOL class_registerAlias_np(Class class, const char *alias)
{
if ((NULL == alias) || (NULL == class))
{
return 0;
}
class = (Class)objc_getClass(class->name);
/*
* If there already exists a matching alias, determine whether we the existing
* alias is the correct one. Please note that objc_getClass() goes through the
* alias lookup and will create the alias table if necessary.
*/
Class existingClass = (Class)objc_getClass(alias);
if (NULL != existingClass)
{
/*
* Return YES if the alias has already been registered for this very
* class, and NO if the alias is already used for another class.
*/
return (class == existingClass);
}
Alias newAlias = { strdup(alias), class };
alias_table_insert(newAlias);
return 1;
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,469 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "objc/runtime.h"
#include "objc/objc-arc.h"
#include "nsobject.h"
#include "spinlock.h"
#include "class.h"
#include "dtable.h"
#include "selector.h"
#include "lock.h"
#include "gc_ops.h"
/**
* A single associative reference. Contains the key, value, and association
* policy.
*/
struct reference
{
/**
* The key used for identifying this object. Opaque pointer, should be set
* to 0 when this slot is unused.
*/
const void *key;
/**
* The associated object. Note, if the policy is assign then this may be
* some other type of pointer...
*/
void *object;
/**
* Association policy.
*/
uintptr_t policy;
};
#define REFERENCE_LIST_SIZE 10
/**
* Linked list of references associated with an object. We assume that there
* won't be very many, so we don't bother with a proper hash table, and just
* iterate over a list.
*/
struct reference_list
{
/**
* Next group of references. This is only ever used if we have more than
* 10 references associated with an object, which seems highly unlikely.
*/
struct reference_list *next;
/**
* Mutex. Only set for the first reference list in a chain. Used for
* @syncronize().
*/
mutex_t lock;
/**
* Garbage collection type. This stores the location of all of the
* instance variables in the object that may contain pointers.
*/
void *gc_type;
/**
* Array of references.
*/
struct reference list[REFERENCE_LIST_SIZE];
};
enum
{
OBJC_ASSOCIATION_ATOMIC = 0x300,
};
static BOOL isAtomic(uintptr_t policy)
{
return (policy & OBJC_ASSOCIATION_ATOMIC) == OBJC_ASSOCIATION_ATOMIC;
}
static struct reference* findReference(struct reference_list *list, const void *key)
{
while (list)
{
for (int i=0 ; i<REFERENCE_LIST_SIZE ; i++)
{
if (list->list[i].key == key)
{
return &list->list[i];
}
}
list = list->next;
}
return NULL;
}
static void cleanupReferenceList(struct reference_list *list)
{
if (NULL == list) { return; }
cleanupReferenceList(list->next);
for (int i=0 ; i<REFERENCE_LIST_SIZE ; i++)
{
struct reference *r = &list->list[i];
if (0 != r->key)
{
r->key = 0;
if (OBJC_ASSOCIATION_ASSIGN != r->policy)
{
// Full barrier - ensure that we've zero'd the key before doing
// this!
__sync_synchronize();
objc_release(r->object);
}
r->object = 0;
r->policy = 0;
}
}
}
static void freeReferenceList(struct reference_list *l)
{
if (NULL == l) { return; }
freeReferenceList(l->next);
gc->free(l);
}
static void setReference(struct reference_list *list,
const void *key,
void *obj,
uintptr_t policy)
{
switch (policy)
{
// Ignore any unknown association policies
default: return;
case OBJC_ASSOCIATION_COPY_NONATOMIC:
case OBJC_ASSOCIATION_COPY:
obj = [(id)obj copy];
break;
case OBJC_ASSOCIATION_RETAIN_NONATOMIC:
case OBJC_ASSOCIATION_RETAIN:
obj = objc_retain(obj);
case OBJC_ASSOCIATION_ASSIGN:
break;
}
// While inserting into the list, we need to lock it temporarily.
volatile int *lock = lock_for_pointer(list);
lock_spinlock(lock);
struct reference *r = findReference(list, key);
// If there's an existing reference, then we can update it, otherwise we
// have to install a new one
if (NULL == r)
{
// Search for an unused slot
r = findReference(list, 0);
if (NULL == r)
{
struct reference_list *l = list;
while (NULL != l->next) { l = l->next; }
l->next = gc->malloc(sizeof(struct reference_list));
r = &l->next->list[0];
}
r->key = key;
}
unlock_spinlock(lock);
// Now we only need to lock if the old or new property is atomic
BOOL needLock = isAtomic(r->policy) || isAtomic(policy);
if (needLock)
{
lock = lock_for_pointer(r);
lock_spinlock(lock);
}
@try
{
if (OBJC_ASSOCIATION_ASSIGN != r->policy)
{
objc_release(r->object);
}
}
@finally
{
r->policy = policy;
r->object = obj;
}
if (needLock)
{
unlock_spinlock(lock);
}
}
static void deallocHiddenClass(id obj, SEL _cmd);
static inline Class findHiddenClass(id obj)
{
Class cls = obj->isa;
while (Nil != cls &&
!objc_test_class_flag(cls, objc_class_flag_assoc_class))
{
cls = class_getSuperclass(cls);
}
return cls;
}
static Class allocateHiddenClass(Class superclass)
{
Class newClass =
calloc(1, sizeof(struct objc_class) + sizeof(struct reference_list));
if (Nil == newClass) { return Nil; }
// Set up the new class
newClass->isa = superclass->isa;
newClass->name = superclass->name;
// Uncomment this for debugging: it makes it easier to track which hidden
// class is which
// static int count;
//asprintf(&newClass->name, "%s%d", superclass->name, count++);
newClass->info = objc_class_flag_resolved | objc_class_flag_user_created |
objc_class_flag_hidden_class | objc_class_flag_assoc_class;
newClass->super_class = superclass;
newClass->dtable = uninstalled_dtable;
newClass->instance_size = superclass->instance_size;
LOCK_RUNTIME_FOR_SCOPE();
newClass->sibling_class = superclass->subclass_list;
superclass->subclass_list = newClass;
return newClass;
}
static inline Class initHiddenClassForObject(id obj)
{
Class hiddenClass = allocateHiddenClass(obj->isa);
assert(!class_isMetaClass(obj->isa));
static SEL cxx_destruct;
if (NULL == cxx_destruct)
{
cxx_destruct = sel_registerName(".cxx_destruct");
}
const char *types = sizeof(void*) == 4 ? "v8@0:4" : "v16@0:8";
class_addMethod(hiddenClass, cxx_destruct,
(IMP)deallocHiddenClass, types);
obj->isa = hiddenClass;
return hiddenClass;
}
static void deallocHiddenClass(id obj, SEL _cmd)
{
LOCK_RUNTIME_FOR_SCOPE();
Class hiddenClass = findHiddenClass(obj);
// After calling [super dealloc], the object will no longer exist.
// Free the hidden class.
struct reference_list *list = object_getIndexedIvars(hiddenClass);
DESTROY_LOCK(&list->lock);
cleanupReferenceList(list);
freeReferenceList(list->next);
//fprintf(stderr, "Deallocating dtable %p\n", hiddenClass->dtable);
free_dtable(hiddenClass->dtable);
// We shouldn't have any subclasses left at this point
assert(hiddenClass->subclass_list == 0);
// Remove the class from the subclass list of its superclass
Class sub = hiddenClass->super_class->subclass_list;
if (sub == hiddenClass)
{
hiddenClass->super_class->subclass_list = hiddenClass->sibling_class;
}
else
{
while (sub != NULL)
{
if ((Class)sub->sibling_class == hiddenClass)
{
sub->sibling_class = hiddenClass->sibling_class;
break;
}
sub = sub->sibling_class;
}
}
obj->isa = hiddenClass->super_class;
// Free the introspection structures:
freeMethodLists(hiddenClass);
freeIvarLists(hiddenClass);
// Free the class
free(hiddenClass);
}
static struct reference_list* referenceListForObject(id object, BOOL create)
{
if (class_isMetaClass(object->isa))
{
Class cls = (Class)object;
if ((NULL == cls->extra_data) && create)
{
volatile int *lock = lock_for_pointer(cls);
struct reference_list *list = gc->malloc(sizeof(struct reference_list));
lock_spinlock(lock);
if (NULL == cls->extra_data)
{
INIT_LOCK(list->lock);
cls->extra_data = list;
unlock_spinlock(lock);
}
else
{
unlock_spinlock(lock);
gc->free(list);
}
}
return cls->extra_data;
}
Class hiddenClass = findHiddenClass(object);
if ((NULL == hiddenClass) && create)
{
volatile int *lock = lock_for_pointer(object);
lock_spinlock(lock);
hiddenClass = findHiddenClass(object);
if (NULL == hiddenClass)
{
hiddenClass = initHiddenClassForObject(object);
struct reference_list *list = object_getIndexedIvars(hiddenClass);
INIT_LOCK(list->lock);
}
unlock_spinlock(lock);
}
return hiddenClass ? object_getIndexedIvars(hiddenClass) : NULL;
}
void objc_setAssociatedObject(id object,
const void *key,
id value,
objc_AssociationPolicy policy)
{
if (isSmallObject(object)) { return; }
struct reference_list *list = referenceListForObject(object, YES);
setReference(list, key, value, policy);
}
id objc_getAssociatedObject(id object, const void *key)
{
if (isSmallObject(object)) { return nil; }
struct reference_list *list = referenceListForObject(object, NO);
if (NULL == list) { return nil; }
struct reference *r = findReference(list, key);
if (NULL != r)
{
return r->object;
}
if (class_isMetaClass(object->isa))
{
return nil;
}
Class cls = object->isa;
while (Nil != cls)
{
while (Nil != cls &&
!objc_test_class_flag(cls, objc_class_flag_assoc_class))
{
cls = class_getSuperclass(cls);
}
if (Nil != cls)
{
struct reference_list *next_list = object_getIndexedIvars(cls);
if (list != next_list)
{
list = next_list;
struct reference *r = findReference(list, key);
if (NULL != r)
{
return r->object;
}
}
cls = class_getSuperclass(cls);
}
}
return nil;
}
void objc_removeAssociatedObjects(id object)
{
if (isSmallObject(object)) { return; }
cleanupReferenceList(referenceListForObject(object, NO));
}
PRIVATE void *gc_typeForClass(Class cls)
{
struct reference_list *list = referenceListForObject(cls, YES);
return list->gc_type;
}
PRIVATE void gc_setTypeForClass(Class cls, void *type)
{
struct reference_list *list = referenceListForObject(cls, YES);
list->gc_type = type;
}
OBJC_PUBLIC
int objc_sync_enter(id object)
{
if ((object == 0) || isSmallObject(object)) { return 0; }
struct reference_list *list = referenceListForObject(object, YES);
LOCK(&list->lock);
return 0;
}
OBJC_PUBLIC
int objc_sync_exit(id object)
{
if ((object == 0) || isSmallObject(object)) { return 0; }
struct reference_list *list = referenceListForObject(object, NO);
if (NULL != list)
{
UNLOCK(&list->lock);
return 0;
}
return 1;
}
static Class hiddenClassForObject(id object)
{
if (isSmallObject(object)) { return nil; }
if (class_isMetaClass(object->isa))
{
return object->isa;
}
Class hiddenClass = findHiddenClass(object);
if (NULL == hiddenClass)
{
volatile int *lock = lock_for_pointer(object);
lock_spinlock(lock);
hiddenClass = findHiddenClass(object);
if (NULL == hiddenClass)
{
hiddenClass = initHiddenClassForObject(object);
struct reference_list *list = object_getIndexedIvars(hiddenClass);
INIT_LOCK(list->lock);
}
unlock_spinlock(lock);
}
return hiddenClass;
}
BOOL object_addMethod_np(id object, SEL name, IMP imp, const char *types)
{
return class_addMethod(hiddenClassForObject(object), name, imp, types);
}
IMP object_replaceMethod_np(id object, SEL name, IMP imp, const char *types)
{
return class_replaceMethod(hiddenClassForObject(object), name, imp, types);
}
static char prototypeKey;
id object_clone_np(id object)
{
if (isSmallObject(object)) { return object; }
// Make sure that the prototype has a hidden class, so that methods added
// to it will appear in the clone.
referenceListForObject(object, YES);
id new = class_createInstance(object->isa, 0);
Class hiddenClass = initHiddenClassForObject(new);
struct reference_list *list = object_getIndexedIvars(hiddenClass);
INIT_LOCK(list->lock);
objc_setAssociatedObject(new, &prototypeKey, object,
OBJC_ASSOCIATION_RETAIN_NONATOMIC);
return new;
}
id object_getPrototype_np(id object)
{
return objc_getAssociatedObject(object, &prototypeKey);
}

@ -0,0 +1,338 @@
// On some platforms, we need _GNU_SOURCE to expose asprintf()
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#endif
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <ctype.h>
#ifndef _WIN32
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#else
#include "safewindows.h"
#endif
#include "objc/runtime.h"
#include "objc/blocks_runtime.h"
#include "blocks_runtime.h"
#include "lock.h"
#include "visibility.h"
#include "asmconstants.h" // For PAGE_SIZE
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if defined(_WIN32) && (defined(__arm__) || defined(__aarch64__))
static inline void __clear_cache(void* start, void* end) {
FlushInstructionCache(GetCurrentProcess(), start, end - start);
}
#define clear_cache __clear_cache
#elif __has_builtin(__builtin___clear_cache)
#define clear_cache __builtin___clear_cache
#else
void __clear_cache(void* start, void* end);
#define clear_cache __clear_cache
#endif
/* QNX needs a special header for asprintf() */
#ifdef __QNXNTO__
#include <nbutil.h>
#endif
#ifdef _WIN32
#if defined(WINAPI_FAMILY) && WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP && _WIN32_WINNT >= 0x0A00
// Prefer the *FromApp versions when we're being built in a Windows Store App context on
// Windows >= 10. *FromApp require the application to be manifested for "codeGeneration".
#define VirtualAlloc VirtualAllocFromApp
#define VirtualProtect VirtualProtectFromApp
#endif // App family partition
#ifndef PROT_READ
#define PROT_READ 0x4
#endif
#ifndef PROT_WRITE
#define PROT_WRITE 0x2
#endif
#ifndef PROT_EXEC
#define PROT_EXEC 0x1
#endif
static int mprotect(void *buffer, size_t len, int prot)
{
DWORD oldProt = 0, newProt = PAGE_NOACCESS;
// Windows doesn't offer values that can be ORed together...
if ((prot & PROT_WRITE))
{
// promote to readwrite as there's no writeonly protection constant
newProt = PAGE_READWRITE;
}
else if ((prot & PROT_READ))
{
newProt = PAGE_READONLY;
}
if ((prot & PROT_EXEC))
{
switch (newProt)
{
case PAGE_NOACCESS: newProt = PAGE_EXECUTE; break;
case PAGE_READONLY: newProt = PAGE_EXECUTE_READ; break;
case PAGE_READWRITE: newProt = PAGE_EXECUTE_READWRITE; break;
}
}
return 0 != VirtualProtect(buffer, len, newProt, &oldProt);
}
#else
# ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON
# endif
#endif
struct block_header
{
void *block;
void(*fnptr)(void);
/**
* On 64-bit platforms, we have 16 bytes for instructions, which ought to
* be enough without padding.
* Note: If we add too much padding, then we waste space but have no other
* ill effects. If we get this too small, then the assert in
* `init_trampolines` will fire on library load.
*
* PowerPC: We need INSTR_CNT * INSTR_LEN = 7*4 = 28 bytes
* for instruction. sizeof(block_header) must be a divisor of
* PAGE_SIZE, so we need to pad block_header to 32 bytes.
* On PowerPC 64-bit where sizeof(void *) = 8 bytes, we
* add 16 bytes of padding.
*/
#if defined(__i386__) || (defined(__mips__) && !defined(__mips_n64)) || (defined(__powerpc__) && !defined(__powerpc64__))
uint64_t padding[3];
#elif defined(__mips__) || defined(__powerpc64__)
uint64_t padding[2];
#elif defined(__arm__)
uint64_t padding;
#endif
};
#define HEADERS_PER_PAGE (PAGE_SIZE/sizeof(struct block_header))
/**
* Structure containing a two pages of block trampolines. Each trampoline
* loads its block and target method address from the corresponding
* block_header (one page before the start of the block structure).
*/
struct trampoline_buffers
{
struct block_header headers[HEADERS_PER_PAGE];
char rx_buffer[PAGE_SIZE];
};
_Static_assert(__builtin_offsetof(struct trampoline_buffers, rx_buffer) == PAGE_SIZE,
"Incorrect offset for read-execute buffer");
_Static_assert(sizeof(struct trampoline_buffers) == 2*PAGE_SIZE,
"Incorrect size for trampoline buffers");
struct trampoline_set
{
struct trampoline_buffers *buffers;
struct trampoline_set *next;
int first_free;
};
static mutex_t trampoline_lock;
struct wx_buffer
{
void *w;
void *x;
};
extern char __objc_block_trampoline;
extern char __objc_block_trampoline_end;
extern char __objc_block_trampoline_sret;
extern char __objc_block_trampoline_end_sret;
PRIVATE void init_trampolines(void)
{
assert(&__objc_block_trampoline_end - &__objc_block_trampoline <= sizeof(struct block_header));
assert(&__objc_block_trampoline_end_sret - &__objc_block_trampoline_sret <= sizeof(struct block_header));
INIT_LOCK(trampoline_lock);
}
static id invalid(id self, SEL _cmd)
{
fprintf(stderr, "Invalid block method called for [%s %s]\n",
class_getName(object_getClass(self)), sel_getName(_cmd));
return nil;
}
static struct trampoline_set *alloc_trampolines(char *start, char *end)
{
struct trampoline_set *metadata = calloc(1, sizeof(struct trampoline_set));
#if _WIN32
metadata->buffers = VirtualAlloc(NULL, sizeof(struct trampoline_buffers), MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
metadata->buffers = mmap(NULL, sizeof(struct trampoline_buffers), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
#endif
for (int i=0 ; i<HEADERS_PER_PAGE ; i++)
{
metadata->buffers->headers[i].fnptr = (void(*)(void))invalid;
metadata->buffers->headers[i].block = &metadata->buffers->headers[i+1].block;
char *block = metadata->buffers->rx_buffer + (i * sizeof(struct block_header));
memcpy(block, start, end-start);
}
metadata->buffers->headers[HEADERS_PER_PAGE-1].block = NULL;
mprotect(metadata->buffers->rx_buffer, PAGE_SIZE, PROT_READ | PROT_EXEC);
clear_cache(metadata->buffers->rx_buffer, &metadata->buffers->rx_buffer[PAGE_SIZE]);
return metadata;
}
static struct trampoline_set *sret_trampolines;
static struct trampoline_set *trampolines;
IMP imp_implementationWithBlock(id block)
{
struct Block_layout *b = (struct Block_layout *)block;
void *start;
void *end;
LOCK_FOR_SCOPE(&trampoline_lock);
struct trampoline_set **setptr;
if ((b->flags & BLOCK_USE_SRET) == BLOCK_USE_SRET)
{
setptr = &sret_trampolines;
start = &__objc_block_trampoline_sret;
end = &__objc_block_trampoline_end_sret;
}
else
{
setptr = &trampolines;
start = &__objc_block_trampoline;
end = &__objc_block_trampoline_end;
}
size_t trampolineSize = end - start;
// If we don't have a trampoline intrinsic for this architecture, return a
// null IMP.
if (0 >= trampolineSize) { return 0; }
block = Block_copy(block);
// Allocate some trampolines if this is the first time that we need to do this.
if (*setptr == NULL)
{
*setptr = alloc_trampolines(start, end);
}
for (struct trampoline_set *set=*setptr ; set!=NULL ; set=set->next)
{
if (set->first_free != -1)
{
int i = set->first_free;
struct block_header *h = &set->buffers->headers[i];
struct block_header *next = h->block;
set->first_free = next ? (next - set->buffers->headers) : -1;
assert(set->first_free < HEADERS_PER_PAGE);
assert(set->first_free >= -1);
h->fnptr = (void(*)(void))b->invoke;
h->block = b;
uintptr_t addr = (uintptr_t)&set->buffers->rx_buffer[i*sizeof(struct block_header)];
#if (__ARM_ARCH_ISA_THUMB == 2)
// If the trampoline is Thumb-2 code, then we must set the low bit
// to 1 so that b[l]x instructions put the CPU in the correct mode.
addr |= 1;
#endif
return (IMP)addr;
}
}
UNREACHABLE("Failed to allocate block");
}
static int indexForIMP(IMP anIMP, struct trampoline_set **setptr)
{
for (struct trampoline_set *set=*setptr ; set!=NULL ; set=set->next)
{
if (((char*)anIMP >= set->buffers->rx_buffer) &&
((char*)anIMP < &set->buffers->rx_buffer[PAGE_SIZE]))
{
*setptr = set;
ptrdiff_t offset = (char*)anIMP - set->buffers->rx_buffer;
return offset / sizeof(struct block_header);
}
}
return -1;
}
id imp_getBlock(IMP anImp)
{
LOCK_FOR_SCOPE(&trampoline_lock);
struct trampoline_set *set = trampolines;
int idx = indexForIMP(anImp, &set);
if (idx == -1)
{
set = sret_trampolines;
indexForIMP(anImp, &set);
}
if (idx == -1)
{
return NULL;
}
return set->buffers->headers[idx].block;
}
BOOL imp_removeBlock(IMP anImp)
{
LOCK_FOR_SCOPE(&trampoline_lock);
struct trampoline_set *set = trampolines;
int idx = indexForIMP(anImp, &set);
if (idx == -1)
{
set = sret_trampolines;
indexForIMP(anImp, &set);
}
if (idx == -1)
{
return NO;
}
struct block_header *h = &set->buffers->headers[idx];
Block_release(h->block);
h->fnptr = (void(*)(void))invalid;
h->block = set->first_free == -1 ? NULL : &set->buffers->headers[set->first_free];
set->first_free = h - set->buffers->headers;
return YES;
}
PRIVATE size_t lengthOfTypeEncoding(const char *types);
char *block_copyIMPTypeEncoding_np(id block)
{
char *buffer = strdup(block_getType_np(block));
if (NULL == buffer) { return NULL; }
char *replace = buffer;
// Skip the return type
replace += lengthOfTypeEncoding(replace);
while (isdigit(*replace)) { replace++; }
// The first argument type should be @? (block), and we need to transform
// it to @, so we have to delete the ?. Assert here because this isn't a
// block encoding at all if the first argument is not a block, and since we
// got it from block_getType_np(), this means something is badly wrong.
assert('@' == *replace);
replace++;
assert('?' == *replace);
// Use strlen(replace) not replace+1, because we want to copy the NULL
// terminator as well.
memmove(replace, replace+1, strlen(replace));
// The next argument should be an object, and we want to replace it with a
// selector
while (isdigit(*replace)) { replace++; }
if ('@' != *replace)
{
free(buffer);
return NULL;
}
*replace = ':';
return buffer;
}

@ -0,0 +1,232 @@
#include "common.S"
#include "asmconstants.h"
#
# This file defines some trampolines for calling blocks. A block function
# looks like this:
#
# retType blockFn(block*, ...)
#
# An IMP looks like this:
#
# retType imp(id, SEL,...)
#
# The trampoline must find the block pointer and then call the block function
# with the correct first argument, the self pointer moved to the second real
# argument (the first block argument) and the _cmd parameter excised
.file "block_trampolines.S"
#if __x86_64
////////////////////////////////////////////////////////////////////////////////
// x86-64 trampoline
////////////////////////////////////////////////////////////////////////////////
.macro trampoline arg0, arg1
mov -0x1007(%rip), \arg1 # Load the block pointer into the second argument
xchg \arg1, \arg0 # Swap the first and second arguments
jmp *-0x1008(%rip) # Call the block function
.endm
// The Win64 and SysV x86-64 ABIs use different registers
# ifdef _WIN64
# define ARG0 %rcx
# define ARG1 %rdx
# define SARG1 %r8
# else
# define ARG0 %rdi
# define ARG1 %rsi
# define SARG1 %rdx
# endif
# define SARG0 ARG1
#elif __i386
////////////////////////////////////////////////////////////////////////////////
// x86-32 trampoline
////////////////////////////////////////////////////////////////////////////////
#ifdef _WIN32
// Mark this compilation unit as SEH-safe
.text
.def @feat.00;
.scl 3;
.type 0;
.endef
.globl @feat.00
.set @feat.00, 1
.data
#endif
.macro trampoline arg0, arg1
call 1f # Store the instruction pointer on the stack
1:
pop %eax # Load the old instruction pointer
mov \arg0(%esp), %ebx # Load the self parameter
mov %ebx, \arg1(%esp) # Store self as the second argument
mov -0x1005(%eax), %ebx # Load the block pointer to %ebx
mov %ebx, \arg0(%esp) # Store the block pointer in the first argument
jmp *-0x1001(%eax) # Call the block function
.endm
// All arguments on i386 are passed on the stack. These values are stack
// offsets - on other platforms they're register values.
# define ARG0 4
# define ARG1 8
# define SARG0 8
# define SARG1 12
#elif __mips__
////////////////////////////////////////////////////////////////////////////////
// MIPS trampoline
////////////////////////////////////////////////////////////////////////////////
# ifdef _ABI64
.macro trampoline arg0, arg1
move \arg1, \arg0
ld \arg0, -4096($25)
ld $25, -4088($25)
jr $25
.endm
# else
// 32-bit variant. This ought to work with both n32 and o32, because they both
// use 32-bit pointers and both use the same registers for the first four
// arguments (and we only care about the first three).
.macro trampoline arg0, arg1
move \arg1, \arg0
lw \arg0, -4096($25)
lw $25, -4092($25)
jr $25
.endm
# endif
#define ARG0 $a0
#define ARG1 $a1
#define ARG2 $a2
#elif defined(__powerpc__)
////////////////////////////////////////////////////////////////////////////////
// PowerPC trampoline
////////////////////////////////////////////////////////////////////////////////
#if defined(__powerpc64__)
#define LOAD ld
#define OFFSET 8
#else
#define LOAD lwz
#define OFFSET 4
#endif
.macro trampoline arg0, arg1
mfctr %r12 # The block trampoline is always called
# via a function pointer. We can thus
# assume that ctr contains the trampline
# entry point address from the previous
# branch to this trampoline (bctrl).
#if PAGE_SHIFT < 16
addi %r12, %r12, -PAGE_SIZE # Substract page size from entry point
#else
addis %r12, %r12, (-0x1 << (PAGE_SHIFT - 16))
#endif
mr \arg1, \arg0
LOAD \arg0, 0(%r12)
LOAD %r12, OFFSET(%r12)
mtctr %r12 # Move block function pointer into ctr
bctr # Branch to block function
.endm
#define ARG0 %r3
#define ARG1 %r4
#define ARG2 %r5
#define SARG0 ARG1
#define SARG1 ARG2
#elif defined(__riscv) && (__riscv_xlen == 64)
////////////////////////////////////////////////////////////////////////////////
// RISC-V trampoline
////////////////////////////////////////////////////////////////////////////////
.macro trampoline arg0, arg1
auipc t6, 0xFFFFF // pc + -0x1000
mv \arg1, \arg0
ld \arg0, 0(t6)
ld t6, 8(t6)
jr t6
.endm
#define ARG0 a0
#define ARG1 a1
#define ARG2 a2
#define SARG0 ARG1
#define SARG1 ARG2
#elif defined(__ARM_ARCH_ISA_A64)
////////////////////////////////////////////////////////////////////////////////
// AArch64 (ARM64) trampoline
////////////////////////////////////////////////////////////////////////////////
.macro trampoline arg0, arg1
adr x17, #-4096
mov \arg1, \arg0
ldp \arg0, x17, [x17]
br x17
.endm
#define ARG0 x0
#define ARG1 x1
#define SARG0 x0
#define SARG1 x1
#elif __arm__
////////////////////////////////////////////////////////////////////////////////
// AArch32 (ARM) trampoline
////////////////////////////////////////////////////////////////////////////////
# if (__ARM_ARCH_ISA_THUMB == 2)
// If we're on a target that supports Thumb 2, then we need slightly more
// instructions to support Thumb/ARM code for the IMP and so we need to make
// the trampolines thumb to be able to fit them in 16 bytes (they fit exactly
// when assembled as Thumb-2).
.thumb
.macro trampoline arg0, arg1
sub r12, pc, #4095
mov \arg1, \arg0 // Move self over _cmd
ldr \arg0, [r12, #-5] // Load the block pointer over self
ldr r12, [r12, #-1] // Jump to the block function
bx r12
.endm
# else
.macro trampoline arg0, arg1
sub r12, pc, #4096
mov \arg1, \arg0 // Move self over _cmd
ldr \arg0, [r12, #-8] // Load the block pointer over self
ldr pc, [r12, #-4] // Jump to the block function
.endm
# endif // (__ARM_ARCH_ISA_THUMB == 2)
#define ARG0 r0
#define ARG1 r1
#define SARG0 r1
#define SARG1 r2
#else
#warning imp_implementationWithBlock() not implemented for your architecture
.macro trampoline arg0, arg1
.endm
#define ARG0 0
#define ARG1 0
#define SARG0 0
#define SARG1 0
#endif
.globl CDECL(__objc_block_trampoline)
CDECL(__objc_block_trampoline):
trampoline ARG0, ARG1
.globl CDECL(__objc_block_trampoline_end)
CDECL(__objc_block_trampoline_end):
.globl CDECL(__objc_block_trampoline_sret)
CDECL(__objc_block_trampoline_sret):
trampoline SARG0, SARG1
.globl CDECL(__objc_block_trampoline_end_sret)
CDECL(__objc_block_trampoline_end_sret):
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

@ -0,0 +1,324 @@
/*
* Copyright (c) 2009 Remy Demarest
* Portions Copyright (c) 2009 David Chisnall
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#import "objc/blocks_runtime.h"
#include "objc/blocks_private.h"
#import "objc/runtime.h"
#import "objc/objc-arc.h"
#include "blocks_runtime.h"
#include "gc_ops.h"
#include "visibility.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
static void *_HeapBlockByRef = (void*)1;
OBJC_PUBLIC bool _Block_has_signature(void *b)
{
const struct Block_layout *block = (struct Block_layout*)b;
return ((NULL != block) && (block->flags & BLOCK_HAS_SIGNATURE));
}
/**
* Returns the Objective-C type encoding for the block.
*/
OBJC_PUBLIC const char * _Block_signature(void *b)
{
const struct Block_layout *block = (struct Block_layout*)b;
if ((NULL == block) || !(block->flags & BLOCK_HAS_SIGNATURE))
{
return NULL;
}
if (!(block->flags & BLOCK_HAS_COPY_DISPOSE))
{
return ((struct Block_descriptor_basic*)block->descriptor)->encoding;
}
return block->descriptor->encoding;
}
static int increment24(int *ref)
{
int old = *ref;
int val = old & BLOCK_REFCOUNT_MASK;
if (val == BLOCK_REFCOUNT_MASK)
{
return val;
}
assert(val < BLOCK_REFCOUNT_MASK);
if (!__sync_bool_compare_and_swap(ref, old, old+1))
{
return increment24(ref);
}
return val + 1;
}
static int decrement24(int *ref)
{
int old = *ref;
int val = old & BLOCK_REFCOUNT_MASK;
if (val == BLOCK_REFCOUNT_MASK)
{
return val;
}
assert(val > 0);
if (!__sync_bool_compare_and_swap(ref, old, old-1))
{
return decrement24(ref);
}
return val - 1;
}
// This is a really ugly hack that works around a buggy register allocator in
// GCC. Compiling nontrivial code using __sync_bool_compare_and_swap() with
// GCC (4.2.1, at least), causes the register allocator to run out of registers
// and fall over and die. We work around this by wrapping this CAS in a
// function, which means the register allocator can trivially handle it. Do
// not remove the noinline attribute - without it, gcc will inline it early on
// and then crash later.
#ifndef __clang__
__attribute__((noinline))
static int cas(void *ptr, void *old, void *new)
{
return __sync_bool_compare_and_swap((void**)ptr, old, new);
}
#define __sync_bool_compare_and_swap cas
#endif
/* Certain field types require runtime assistance when being copied to the
* heap. The following function is used to copy fields of types: blocks,
* pointers to byref structures, and objects (including
* __attribute__((NSObject)) pointers. BLOCK_FIELD_IS_WEAK is orthogonal to
* the other choices which are mutually exclusive. Only in a Block copy helper
* will one see BLOCK_FIELD_IS_BYREF.
*/
OBJC_PUBLIC void _Block_object_assign(void *destAddr, const void *object, const int flags)
{
//printf("Copying %x to %x with flags %x\n", object, destAddr, flags);
// FIXME: Needs to be implemented
//if(flags & BLOCK_FIELD_IS_WEAK)
{
}
//else
{
if (IS_SET(flags, BLOCK_FIELD_IS_BYREF))
{
struct block_byref_obj *src = (struct block_byref_obj *)object;
struct block_byref_obj **dst = destAddr;
src = src->forwarding;
if ((src->flags & BLOCK_REFCOUNT_MASK) == 0)
{
*dst = gc->malloc(src->size);
memcpy(*dst, src, src->size);
(*dst)->isa = _HeapBlockByRef;
// Refcount must be two; one for the copy and one for the
// on-stack version that will point to it.
(*dst)->flags += 2;
if (IS_SET(src->flags, BLOCK_HAS_COPY_DISPOSE))
{
src->byref_keep(*dst, src);
}
(*dst)->forwarding = *dst;
// Concurrency. If we try copying the same byref structure
// from two threads simultaneously, we could end up with two
// versions on the heap that are unaware of each other. That
// would be bad. So we first set up the copy, then try to do
// an atomic compare-and-exchange to point the old version at
// it. If the forwarding pointer in src has changed, then we
// recover - clean up and then return the structure that the
// other thread created.
if (!__sync_bool_compare_and_swap(&src->forwarding, src, *dst))
{
if((size_t)src->size >= sizeof(struct block_byref_obj))
{
src->byref_dispose(*dst);
}
gc->free(*dst);
*dst = src->forwarding;
}
}
else
{
*dst = (struct block_byref_obj*)src;
increment24(&(*dst)->flags);
}
}
else if (IS_SET(flags, BLOCK_FIELD_IS_BLOCK))
{
struct Block_layout *src = (struct Block_layout*)object;
struct Block_layout **dst = destAddr;
*dst = Block_copy(src);
}
else if (IS_SET(flags, BLOCK_FIELD_IS_OBJECT) &&
!IS_SET(flags, BLOCK_BYREF_CALLER))
{
id src = (id)object;
void **dst = destAddr;
*dst = src;
*dst = objc_retain(src);
}
}
}
/* Similarly a compiler generated dispose helper needs to call back for each
* field of the byref data structure. (Currently the implementation only packs
* one field into the byref structure but in principle there could be more).
* The same flags used in the copy helper should be used for each call
* generated to this function:
*/
OBJC_PUBLIC void _Block_object_dispose(const void *object, const int flags)
{
// FIXME: Needs to be implemented
//if(flags & BLOCK_FIELD_IS_WEAK)
{
}
//else
{
if (IS_SET(flags, BLOCK_FIELD_IS_BYREF))
{
struct block_byref_obj *src =
(struct block_byref_obj*)object;
src = src->forwarding;
if (src->isa == _HeapBlockByRef)
{
int refcount = (src->flags & BLOCK_REFCOUNT_MASK) == 0 ? 0 : decrement24(&src->flags);
if (refcount == 0)
{
if(IS_SET(src->flags, BLOCK_HAS_COPY_DISPOSE) && (0 != src->byref_dispose))
{
src->byref_dispose(src);
}
gc->free(src);
}
}
}
else if (IS_SET(flags, BLOCK_FIELD_IS_BLOCK))
{
struct Block_layout *src = (struct Block_layout*)object;
Block_release(src);
}
else if (IS_SET(flags, BLOCK_FIELD_IS_OBJECT) &&
!IS_SET(flags, BLOCK_BYREF_CALLER))
{
id src = (id)object;
objc_release(src);
}
}
}
// Copy a block to the heap if it's still on the stack or increments its retain count.
OBJC_PUBLIC void *_Block_copy(const void *src)
{
if (NULL == src) { return NULL; }
struct Block_layout *self = (struct Block_layout*)src;
struct Block_layout *ret = self;
extern void _NSConcreteStackBlock;
extern void _NSConcreteMallocBlock;
// If the block is Global, there's no need to copy it on the heap.
if(self->isa == &_NSConcreteStackBlock)
{
ret = gc->malloc(self->descriptor->size);
memcpy(ret, self, self->descriptor->size);
ret->isa = &_NSConcreteMallocBlock;
if(self->flags & BLOCK_HAS_COPY_DISPOSE)
{
self->descriptor->copy_helper(ret, self);
}
// We don't need any atomic operations here, because on-stack blocks
// can not be aliased across threads (unless you've done something
// badly wrong).
ret->reserved = 1;
}
else if (self->isa == &_NSConcreteMallocBlock)
{
// We need an atomic increment for malloc'd blocks, because they may be
// shared.
__sync_fetch_and_add(&ret->reserved, 1);
}
return ret;
}
// Release a block and frees the memory when the retain count hits zero.
OBJC_PUBLIC void _Block_release(const void *src)
{
if (NULL == src) { return; }
struct Block_layout *self = (struct Block_layout*)src;
extern void _NSConcreteStackBlock;
extern void _NSConcreteMallocBlock;
if (&_NSConcreteStackBlock == self->isa)
{
fprintf(stderr, "Block_release called upon a stack Block: %p, ignored\n", self);
}
else if (&_NSConcreteMallocBlock == self->isa)
{
if (__sync_sub_and_fetch(&self->reserved, 1) == 0)
{
if(self->flags & BLOCK_HAS_COPY_DISPOSE)
self->descriptor->dispose_helper(self);
objc_delete_weak_refs((id)self);
gc->free(self);
}
}
}
OBJC_PUBLIC bool _Block_isDeallocating(const void* arg)
{
struct Block_layout *block = (struct Block_layout*)arg;
int *refCountPtr = &((struct Block_layout*)arg)->reserved;
int refCount = __sync_fetch_and_add(refCountPtr, 0);
return refCount == 0;
}
OBJC_PUBLIC bool _Block_tryRetain(const void* arg)
{
/* This is used by the weak reference management in ARC. The implementation
* follows the reasoning of `retain_fast()` in arc.mm: We want to abandon the
* retain operation if another thread has started deallocating the object between
* loading the weak pointer and executing the retain operation.
*/
struct Block_layout *block = (struct Block_layout*)arg;
int *refCountPtr = &block->reserved;
int refCountVal = __sync_fetch_and_add(refCountPtr, 0);
int newVal = refCountVal;
do {
refCountVal = newVal;
if (refCountVal <= 0)
{
return false;
}
newVal = __sync_val_compare_and_swap(refCountPtr, refCountVal, newVal + 1);
} while (newVal != refCountVal);
return true;
}

@ -0,0 +1,56 @@
/*
* Copyright (c) 2009 Remy Demarest
* Portions Copyright (c) 2009 David Chisnall
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef EMBEDDED_BLOCKS_RUNTIME
#import "objc/blocks_runtime.h"
#include "blocks_runtime.h"
#else
#import <Block.h>
#import <Block_private.h>
#endif
#include "visibility.h"
OBJC_PUBLIC const char *block_getType_np(const void *b)
{
return _Block_signature((void*)b);
}
/**
* Returns the block pointer, or NULL if the block is already
* being deallocated. The implementation does not employ atomic
* operations, so this function must only be called by the ARC
* subsystem after obtaining the weak-reference lock.
*/
PRIVATE void* block_load_weak(void *block)
{
struct Block_layout *self = block;
#ifdef EMBEDDED_BLOCKS_RUNTIME
return (self->reserved) > 0 ? block : 0;
#else
return (self->flags) & BLOCK_REFCOUNT_MASK ? block : 0;
#endif
}

@ -0,0 +1,42 @@
#include "objc/capabilities.h"
#include <stdint.h>
/**
* Bitmask of all of the capabilities compiled into this version of the
* runtime.
*/
static const int32_t caps =
(1<<OBJC_CAP_EXCEPTIONS) |
(1<<OBJC_CAP_SYNCRONIZE) |
(1<<OBJC_CAP_PROPERTIES) |
(1<<OBJC_CAP_PROPERTY_INTROSPECTION) |
(1<<OBJC_CAP_OPTIONAL_PROTOCOLS) |
(1<<OBJC_CAP_NONFRAGILE_IVARS) |
(1<<OBJC_DEVELOPER_MODE) |
(1<<OBJC_CAP_REGISTERED_COMPATIBILITY_ALIASES) |
(1<<OBJC_CAP_ARC) |
(1<<OBJC_CAP_ASSOCIATED_REFERENCES) |
(1<<OBJC_CAP_PROTOTYPES) |
#ifndef NO_OBJCXX
(1<<OBJC_UNIFIED_EXCEPTION_MODEL) |
#endif
#ifdef TYPE_DEPENDENT_DISPATCH
(1<<OBJC_CAP_TYPE_DEPENDENT_DISPATCH) |
#endif
#ifdef __OBJC_LOW_MEMORY__
(1<<OBJC_CAP_LOW_MEMORY) |
#endif
#ifdef ENABLE_GC
(1<<OBJC_CAP_GARBAGE_COLLECTION) |
#endif
#if defined(WITH_TRACING) && defined (__x86_64)
(1<<OBJC_CAP_TRACING) |
#endif
0;
OBJC_PUBLIC int objc_test_capability(int x)
{
if (x >= 32) { return 0; }
if (caps & (1<<x)) { return 1; }
return 0;
}

@ -0,0 +1,100 @@
#include <stdio.h>
#include "objc/runtime.h"
#include "visibility.h"
#include "loader.h"
#include "dtable.h"
#include "properties.h"
#define BUFFER_TYPE struct objc_category *
#include "buffer.h"
void objc_send_load_message(Class class);
static void register_methods(struct objc_class *cls, struct objc_method_list *l)
{
if (NULL == l) { return; }
// Add the method list at the head of the list of lists.
l->next = cls->methods;
cls->methods = l;
// Update the dtable to catch the new methods, if the dtable has been
// created (don't bother creating dtables for classes when categories are
// loaded if the class hasn't received any messages yet.
if (classHasDtable(cls))
{
add_method_list_to_class(cls, l);
}
}
static void load_category(struct objc_category *cat, struct objc_class *class)
{
register_methods(class, cat->instance_methods);
register_methods(class->isa, cat->class_methods);
//fprintf(stderr, "Loading %s (%s)\n", cat->class_name, cat->name);
if (cat->protocols)
{
objc_init_protocols(cat->protocols);
cat->protocols->next = class->protocols;
class->protocols = cat->protocols;
}
if (cat->properties)
{
cat->properties->next = class->properties;
class->properties = cat->properties;
}
if (cat->class_properties)
{
cat->class_properties->next = class->isa->properties;
class->isa->properties = cat->class_properties;
}
}
static BOOL try_load_category(struct objc_category *cat)
{
Class class = (Class)objc_getClass(cat->class_name);
//fprintf(stderr, "Trying to load %s (%s)\n", cat->class_name, cat->name);
if (Nil != class)
{
load_category(cat, class);
return YES;
}
//fprintf(stderr, "waiting to load %s (%s)\n", cat->class_name, cat->name);
return NO;
}
/**
* Attaches a category to its class, if the class is already loaded. Buffers
* it for future resolution if not.
*/
PRIVATE void objc_try_load_category(struct objc_category *cat)
{
if (!try_load_category(cat))
{
set_buffered_object_at_index(cat, buffered_objects++);
}
}
PRIVATE void objc_load_buffered_categories(void)
{
BOOL shouldReshuffle = NO;
for (unsigned i=0 ; i<buffered_objects ; i++)
{
struct objc_category *c = buffered_object_at_index(i);
if (NULL != c)
{
if (try_load_category(c))
{
set_buffered_object_at_index(NULL, i);
shouldReshuffle = YES;
}
}
}
if (shouldReshuffle)
{
compact_buffer();
}
}

@ -0,0 +1,602 @@
#include "objc/runtime.h"
#include "objc/hooks.h"
#include "objc/developer.h"
#include "alias.h"
#include "class.h"
#include "method.h"
#include "selector.h"
#include "lock.h"
#include "dtable.h"
#include "legacy.h"
#include "visibility.h"
#include <stdlib.h>
#include <assert.h>
void objc_init_protocols(struct objc_protocol_list *protos);
void objc_compute_ivar_offsets(Class class);
////////////////////////////////////////////////////////////////////////////////
// +load method hash table
////////////////////////////////////////////////////////////////////////////////
static int imp_compare(const void *i1, void *i2)
{
return i1 == i2;
}
static int32_t imp_hash(const void *imp)
{
return (int32_t)(((uintptr_t)imp) >> 4);
}
#define MAP_TABLE_NAME load_messages
#define MAP_TABLE_COMPARE_FUNCTION imp_compare
#define MAP_TABLE_HASH_KEY imp_hash
#define MAP_TABLE_HASH_VALUE imp_hash
#include "hash_table.h"
static load_messages_table *load_table;
SEL loadSel;
PRIVATE void objc_init_load_messages_table(void)
{
load_messages_initialize(&load_table, 4096);
loadSel = sel_registerName("load");
}
PRIVATE void objc_send_load_message(Class class)
{
Class meta = class->isa;
for (struct objc_method_list *l=meta->methods ; NULL!=l ; l=l->next)
{
for (int i=0 ; i<l->count ; i++)
{
Method m = method_at_index(l, i);
if (sel_isEqual(m->selector, loadSel))
{
if (load_messages_table_get(load_table, m->imp) == 0)
{
m->imp((id)class, loadSel);
load_messages_insert(load_table, m->imp);
}
}
}
}
}
// Get the functions for string hashing
#include "string_hash.h"
static int class_compare(const char *name, const Class class)
{
return string_compare(name, class->name);
}
static int class_hash(const Class class)
{
return string_hash(class->name);
}
#define MAP_TABLE_NAME class_table_internal
#define MAP_TABLE_COMPARE_FUNCTION class_compare
#define MAP_TABLE_HASH_KEY string_hash
#define MAP_TABLE_HASH_VALUE class_hash
// This defines the maximum number of classes that the runtime supports.
/*
#define MAP_TABLE_STATIC_SIZE 2048
#define MAP_TABLE_STATIC_NAME class_table
*/
#include "hash_table.h"
static class_table_internal_table *class_table;
#define unresolved_class_next subclass_list
#define unresolved_class_prev sibling_class
/**
* Linked list using the subclass_list pointer in unresolved classes.
*/
static Class unresolved_class_list;
static enum objc_developer_mode_np mode;
void objc_setDeveloperMode_np(enum objc_developer_mode_np newMode)
{
mode = newMode;
}
////////////////////////////////////////////////////////////////////////////////
// Class table manipulation
////////////////////////////////////////////////////////////////////////////////
PRIVATE Class zombie_class;
PRIVATE void class_table_insert(Class class)
{
if (!objc_test_class_flag(class, objc_class_flag_resolved))
{
if (Nil != unresolved_class_list)
{
unresolved_class_list->unresolved_class_prev = class;
}
class->unresolved_class_next = unresolved_class_list;
unresolved_class_list = class;
}
if ((0 == zombie_class) && (strcmp("NSZombie", class->name) == 0))
{
zombie_class = class;
}
class_table_internal_insert(class_table, class);
}
PRIVATE Class class_table_get_safe(const char *class_name)
{
if (NULL == class_name) { return Nil; }
return class_table_internal_table_get(class_table, class_name);
}
PRIVATE Class class_table_next(void **e)
{
return class_table_internal_next(class_table,
(struct class_table_internal_table_enumerator**)e);
}
PRIVATE void init_class_tables(void)
{
class_table_internal_initialize(&class_table, 4096);
objc_init_load_messages_table();
}
////////////////////////////////////////////////////////////////////////////////
// Loader functions
////////////////////////////////////////////////////////////////////////////////
PRIVATE BOOL objc_resolve_class(Class cls)
{
// Skip this if the class is already resolved.
if (objc_test_class_flag(cls, objc_class_flag_resolved)) { return YES; }
// We can only resolve the class if its superclass is resolved.
if (cls->super_class)
{
Class super = cls->super_class;
if (!objc_test_class_flag(super, objc_class_flag_resolved))
{
if (!objc_resolve_class(super))
{
return NO;
}
}
}
#ifdef OLDABI_COMPAT
else
{
struct objc_class_gsv1 *ocls = objc_legacy_class_for_class(cls);
if (ocls != NULL)
{
const char *super_name = (const char*)ocls->super_class;
if (super_name)
{
Class super = (Class)objc_getClass(super_name);
if (super == Nil)
{
return NO;
}
cls->super_class = super;
return objc_resolve_class(cls);
}
}
}
#endif
// Remove the class from the unresolved class list
if (Nil == cls->unresolved_class_prev)
{
unresolved_class_list = cls->unresolved_class_next;
}
else
{
cls->unresolved_class_prev->unresolved_class_next =
cls->unresolved_class_next;
}
if (Nil != cls->unresolved_class_next)
{
cls->unresolved_class_next->unresolved_class_prev =
cls->unresolved_class_prev;
}
cls->unresolved_class_prev = Nil;
cls->unresolved_class_next = Nil;
// The superclass for the metaclass. This is the metaclass for the
// superclass if one exists, otherwise it is the root class itself
Class superMeta = Nil;
// The metaclass for the metaclass. This is always the root class's
// metaclass.
Class metaMeta = Nil;
// Resolve the superclass pointer
if (NULL == cls->super_class)
{
superMeta = cls;
metaMeta = cls->isa;
}
else
{
// Resolve the superclass if it isn't already resolved
Class super = cls->super_class;
if (!objc_test_class_flag(super, objc_class_flag_resolved))
{
objc_resolve_class(super);
}
superMeta = super->isa;
// Set the superclass pointer for the class and the superclass
do
{
metaMeta = super->isa;
super = super->super_class;
} while (Nil != super);
}
Class meta = cls->isa;
// Make the root class the superclass of the metaclass (e.g. NSObject is
// the superclass of all metaclasses in classes that inherit from NSObject)
meta->super_class = superMeta;
meta->isa = metaMeta;
// Don't register root classes as children of anything
if (Nil != cls->super_class)
{
// Set up the class links
cls->sibling_class = cls->super_class->subclass_list;
cls->super_class->subclass_list = cls;
}
// Set up the metaclass links
meta->sibling_class = superMeta->subclass_list;
superMeta->subclass_list = meta;
// Mark this class (and its metaclass) as resolved
objc_set_class_flag(cls, objc_class_flag_resolved);
objc_set_class_flag(cls->isa, objc_class_flag_resolved);
// Fix up the ivar offsets
objc_compute_ivar_offsets(cls);
#ifdef OLDABI_COMPAT
struct objc_class_gsv1 *oldCls = objc_legacy_class_for_class(cls);
if (oldCls)
{
oldCls->super_class = cls->super_class;
oldCls->isa->super_class = cls->isa->super_class;
}
#endif
// Send the +load message, if required
if (!objc_test_class_flag(cls, objc_class_flag_user_created))
{
objc_send_load_message(cls);
}
if (_objc_load_callback)
{
_objc_load_callback(cls, 0);
}
return YES;
}
PRIVATE void objc_resolve_class_links(void)
{
LOCK_RUNTIME_FOR_SCOPE();
BOOL resolvedClass;
do
{
Class class = unresolved_class_list;
resolvedClass = NO;
while ((Nil != class))
{
Class next = class->unresolved_class_next;
// If the class has been resolved, then this means that the last
// call to objc_resolve_class resolved it as part of resolving
// superclasses and removed it from the list. We now don't have a
// pointer into the linked list, so abort and try again from the
// start.
if (objc_test_class_flag(class, objc_class_flag_resolved))
{
assert(resolvedClass);
break;
}
objc_resolve_class(class);
if (resolvedClass ||
objc_test_class_flag(class, objc_class_flag_resolved))
{
resolvedClass = YES;
}
class = next;
}
} while (resolvedClass);
}
PRIVATE void __objc_resolve_class_links(void)
{
static BOOL warned = NO;
if (!warned)
{
fprintf(stderr,
"Warning: Calling deprecated private ObjC runtime function %s\n", __func__);
warned = YES;
}
objc_resolve_class_links();
}
static void reload_class(struct objc_class *class, struct objc_class *old)
{
const char *superclassName = (char*)class->super_class;
class->super_class = class_table_get_safe(superclassName);
// Checking the instance sizes are equal here is a quick-and-dirty test.
// It's not actually needed, because we're testing the ivars are at the
// same locations next, but it lets us skip those tests if the total size
// is different.
BOOL equalLayouts = (class->super_class == old->super_class) &&
(class->instance_size == old->instance_size);
// If either of the classes has an empty ivar list, then the other one must too.
if ((NULL == class->ivars) || (NULL == old->ivars))
{
equalLayouts &= (class->ivars == old->ivars);
}
else
{
// If the class sizes are the same, ensure that the ivars have the same
// types, names, and offsets. Note: Renaming an ivar is treated as a
// conflict because name changes are often accompanied by semantic
// changes. For example, an object ivar at offset 16 goes from being
// called 'delegate' to being called 'view' - we almost certainly don't
// want methods that expect to be working with the delegate ivar to
// work with the view ivar now!
for (int i=0 ; equalLayouts && (i<old->ivars->count) ; i++)
{
struct objc_ivar *oldIvar = ivar_at_index(old->ivars, i);
struct objc_ivar *newIvar = ivar_at_index(class->ivars, i);
equalLayouts &= strcmp(oldIvar->name, newIvar->name) == 0;
equalLayouts &= strcmp(oldIvar->type, newIvar->type) == 0;
equalLayouts &= (oldIvar->offset == newIvar->offset);
}
}
// If the layouts are equal, then we can simply tack the class's method
// list on to the front of the old class and update the dtable.
if (equalLayouts)
{
class->methods->next = old->methods;
old->methods = class->methods;
objc_update_dtable_for_class(old);
return;
}
// If we get to here, then we are adding a new class. This is where things
// start to get a bit tricky...
// Ideally, we'd want to capture the subclass list here. Unfortunately,
// this is not possible because the subclass will contain methods that
// refer to ivars in the superclass.
//
// We can't use the non-fragile ABI's offset facility easily, because we'd
// have to have two (or more) offsets for the same ivar. This gets messy
// very quickly. Ideally, we'd want every class to include ivar offsets
// for every single (public) ivar in its superclasses. These could then be
// updated by copies of the class. Defining a development ABI is something
// to consider for a future release.
class->subclass_list = NULL;
// Replace the old class with this one in the class table. New lookups for
// this class will now return this class.
class_table_internal_table_set(class_table, (void*)class->name, class);
// Set the uninstalled dtable. The compiler could do this as well.
class->dtable = uninstalled_dtable;
class->isa->dtable = uninstalled_dtable;
// If this is a root class, make the class into the metaclass's superclass.
// This means that all instance methods will be available to the class.
if (NULL == superclassName)
{
class->isa->super_class = class;
}
if (class->protocols)
{
objc_init_protocols(class->protocols);
}
}
/**
* Loads a class. This function assumes that the runtime mutex is locked.
*/
PRIVATE void objc_load_class(struct objc_class *class)
{
struct objc_class *existingClass = class_table_get_safe(class->name);
if (Nil != existingClass)
{
if (objc_developer_mode_developer != mode)
{
fprintf(stderr,
"Loading two versions of %s. The class that will be used is undefined\n",
class->name);
return;
}
reload_class(class, existingClass);
return;
}
#ifdef _WIN32
// On Windows, the super_class pointer may point to the local __imp_
// symbol, rather than to the external symbol. The runtime must remove the
// extra indirection.
if (class->super_class)
{
Class superMeta = class->super_class->isa;
if (!class_isMetaClass(superMeta))
{
class->super_class = superMeta;
}
}
#endif
// Work around a bug in some versions of GCC that don't initialize the
// class structure correctly.
class->subclass_list = NULL;
// Insert the class into the class table
class_table_insert(class);
// Set the uninstalled dtable. The compiler could do this as well.
class->dtable = uninstalled_dtable;
class->isa->dtable = uninstalled_dtable;
// Mark constant string instances as never needing refcount manipulation.
if (strcmp(class->name, "NSConstantString") == 0)
{
objc_set_class_flag(class, objc_class_flag_permanent_instances);
}
// If this is a root class, make the class into the metaclass's superclass.
// This means that all instance methods will be available to the class.
if (NULL == class->super_class)
{
class->isa->super_class = class;
}
if (class->protocols)
{
objc_init_protocols(class->protocols);
}
}
PRIVATE Class SmallObjectClasses[7];
BOOL objc_registerSmallObjectClass_np(Class class, uintptr_t mask)
{
if ((mask & OBJC_SMALL_OBJECT_MASK) != mask)
{
return NO;
}
if (sizeof(void*) == 4)
{
if (Nil == SmallObjectClasses[0])
{
SmallObjectClasses[0] = class;
return YES;
}
return NO;
}
if (Nil != SmallObjectClasses[mask])
{
return NO;
}
SmallObjectClasses[mask] = class;
return YES;
}
PRIVATE void class_table_remove(Class cls)
{
assert(objc_test_class_flag(cls, objc_class_flag_user_created));
class_table_internal_remove(class_table, (void*)cls->name);
}
////////////////////////////////////////////////////////////////////////////////
// Public API
////////////////////////////////////////////////////////////////////////////////
int objc_getClassList(Class *buffer, int bufferLen)
{
if (buffer == NULL || bufferLen == 0)
{
return class_table->table_used;
}
int count = 0;
struct class_table_internal_table_enumerator *e = NULL;
Class next;
while (count < bufferLen &&
(next = class_table_internal_next(class_table, &e)))
{
buffer[count++] = next;
}
return count;
}
Class *objc_copyClassList(unsigned int *outCount)
{
int count = class_table->table_used;
Class *buffer = calloc(sizeof(Class), count);
if (NULL != outCount)
{
*outCount = count;
}
objc_getClassList(buffer, count);
return buffer;
}
Class class_getSuperclass(Class cls)
{
if (Nil == cls) { return Nil; }
if (!objc_test_class_flag(cls, objc_class_flag_resolved))
{
objc_resolve_class(cls);
}
return cls->super_class;
}
id objc_getClass(const char *name)
{
id class = (id)class_table_get_safe(name);
if (nil != class) { return class; }
// Second chance lookup via @compatibilty_alias:
class = (id)alias_getClass(name);
if (nil != class) { return class; }
// Third chance lookup via the hook:
if (0 != _objc_lookup_class)
{
class = (id)_objc_lookup_class(name);
}
return class;
}
id objc_lookUpClass(const char *name)
{
return (id)class_table_get_safe(name);
}
id objc_getMetaClass(const char *name)
{
Class cls = (Class)objc_getClass(name);
return cls == Nil ? nil : (id)cls->isa;
}
// Legacy interface compatibility
id objc_get_class(const char *name)
{
return objc_getClass(name);
}
id objc_lookup_class(const char *name)
{
return objc_getClass(name);
}
id objc_get_meta_class(const char *name)
{
return objc_getMetaClass(name);
}
Class objc_next_class(void **enum_state)
{
return class_table_next ( enum_state);
}
Class class_pose_as(Class impostor, Class super_class)
{
fprintf(stderr, "Class posing is no longer supported.\n");
fprintf(stderr, "Please use class_replaceMethod() instead.\n");
abort();
}

@ -0,0 +1,18 @@
#if ((defined(_WIN32) || defined(__CYGWIN__)) && defined(__i386__)) || defined(__APPLE__)
#define CDECL(symbol) _##symbol
#else
#define CDECL(symbol) symbol
#endif
#if __ELF__
#define TYPE_DIRECTIVE(symbol, symboltype) .type symbol, symboltype
#else
#define TYPE_DIRECTIVE(symbol, symboltype)
#endif
#if defined(_MSC_VER) && defined(__i386__)
#define STRINGIFY(a) #a
#define EXPORT_SYMBOL(symbol) .ascii " " STRINGIFY(/EXPORT:_##symbol)
#else
#define EXPORT_SYMBOL(symbol) .ascii " /EXPORT:" #symbol
#endif

@ -0,0 +1,861 @@
#define __BSD_VISIBLE 1
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include "objc/runtime.h"
#include "objc/hooks.h"
#include "sarray2.h"
#include "selector.h"
#include "class.h"
#include "lock.h"
#include "method.h"
#include "dtable.h"
#include "visibility.h"
#include "asmconstants.h"
_Static_assert(__builtin_offsetof(struct objc_class, dtable) == DTABLE_OFFSET,
"Incorrect dtable offset for assembly");
_Static_assert(__builtin_offsetof(SparseArray, shift) == SHIFT_OFFSET,
"Incorrect shift offset for assembly");
_Static_assert(__builtin_offsetof(SparseArray, data) == DATA_OFFSET,
"Incorrect data offset for assembly");
// Slots are now a public interface to part of the method structure, so make
// sure that it's safe to use method and slot structures interchangeably.
_Static_assert(__builtin_offsetof(struct objc_slot2, method) == SLOT_OFFSET,
"Incorrect slot offset for assembly");
_Static_assert(__builtin_offsetof(struct objc_method, imp) == SLOT_OFFSET,
"Incorrect slot offset for assembly");
PRIVATE dtable_t uninstalled_dtable;
#if defined(WITH_TRACING) && defined (__x86_64)
PRIVATE dtable_t tracing_dtable;
#endif
#ifndef ENOTSUP
# define ENOTSUP -1
#endif
/** Head of the list of temporary dtables. Protected by initialize_lock. */
PRIVATE InitializingDtable *temporary_dtables;
/** Lock used to protect the temporary dtables list. */
PRIVATE mutex_t initialize_lock;
/** The size of the largest dtable. This is a sparse array shift value, so is
* 2^x in increments of 8. */
static uint32_t dtable_depth = 8;
#ifndef NO_SAFE_CACHING
_Atomic(uint64_t) objc_method_cache_version;
#endif
/**
* Starting at `cls`, finds the class that provides the implementation of the
* method identified by `sel`.
*/
static Class ownerForMethod(Class cls, SEL sel)
{
struct objc_slot2 *slot = objc_get_slot2(cls, sel, NULL);
if (slot == NULL)
{
return Nil;
}
if (cls->super_class == NULL)
{
return cls;
}
if (objc_get_slot2(cls->super_class, sel, NULL) == slot)
{
return ownerForMethod(cls->super_class, sel);
}
return cls;
}
/**
* Returns YES if the class implements a method for the specified selector, NO
* otherwise.
*/
static BOOL ownsMethod(Class cls, SEL sel)
{
return ownerForMethod(cls, sel) == cls;
}
#ifdef DEBUG_ARC_COMPAT
#define ARC_DEBUG_LOG(...) fprintf(stderr, __VA_ARGS__)
#else
#define ARC_DEBUG_LOG(...) do {} while(0)
#endif
/**
* Check whether this class pair implement or override `+alloc`,
* `+allocWithZone`, or `-init` in a way that requires the methods to be
* called.
*/
static void checkFastAllocInit(Class cls)
{
// This needs to be called on the class, not the metaclass
if (class_isMetaClass(cls))
{
return;
}
static SEL alloc, allocWithZone, init, isTrivialAllocInit;
if (NULL == alloc)
{
alloc = sel_registerName("alloc");
allocWithZone = sel_registerName("allocWithZone:");
init = sel_registerName("init");
isTrivialAllocInit = sel_registerName("_TrivialAllocInit");
}
Class metaclass = cls->isa;
Class isTrivialOwner = ownerForMethod(metaclass, isTrivialAllocInit);
// If nothing in this hierarchy opts in to trivial alloc / init behaviour, give up.
if (isTrivialOwner == nil)
{
objc_clear_class_flag(cls, objc_class_flag_fast_alloc_init);
objc_clear_class_flag(metaclass, objc_class_flag_fast_alloc_init);
return;
}
// Check for overrides of alloc or allocWithZone:.
// This check has some false negatives. If you override only one of alloc
// or allocWithZone, both will hit the slow path. That's fine because the
// fast path is an optimisation, not a guarantee.
Class allocOwner = ownerForMethod(metaclass, alloc);
Class allocWithZoneOwner = ownerForMethod(metaclass, allocWithZone);
if (((allocOwner == nil) || (allocOwner == isTrivialOwner)) &&
((allocWithZoneOwner == nil) || (allocWithZoneOwner == isTrivialOwner)))
{
objc_set_class_flag(metaclass, objc_class_flag_fast_alloc_init);
}
else
{
objc_clear_class_flag(metaclass, objc_class_flag_fast_alloc_init);
}
Class initOwner = ownerForMethod(cls, init);
if ((initOwner == nil) || (initOwner->isa == isTrivialOwner))
{
objc_set_class_flag(cls, objc_class_flag_fast_alloc_init);
}
else
{
objc_clear_class_flag(cls, objc_class_flag_fast_alloc_init);
}
}
/**
* Checks whether the class implements memory management methods, and whether
* they are safe to use with ARC.
*/
static void checkARCAccessors(Class cls)
{
checkFastAllocInit(cls);
static SEL retain, release, autorelease, isARC;
if (NULL == retain)
{
retain = sel_registerName("retain");
release = sel_registerName("release");
autorelease = sel_registerName("autorelease");
isARC = sel_registerName("_ARCCompliantRetainRelease");
}
Class owner = ownerForMethod(cls, retain);
if ((NULL != owner) && !ownsMethod(owner, isARC))
{
ARC_DEBUG_LOG("%s does not support ARC correctly (implements retain)\n", cls->name);
objc_clear_class_flag(cls, objc_class_flag_fast_arc);
return;
}
owner = ownerForMethod(cls, release);
if ((NULL != owner) && !ownsMethod(owner, isARC))
{
ARC_DEBUG_LOG("%s does not support ARC correctly (implements release)\n", cls->name);
objc_clear_class_flag(cls, objc_class_flag_fast_arc);
return;
}
owner = ownerForMethod(cls, autorelease);
if ((NULL != owner) && !ownsMethod(owner, isARC))
{
ARC_DEBUG_LOG("%s does not support ARC correctly (implements autorelease)\n", cls->name);
objc_clear_class_flag(cls, objc_class_flag_fast_arc);
return;
}
objc_set_class_flag(cls, objc_class_flag_fast_arc);
}
static BOOL selEqualUnTyped(SEL expected, SEL untyped)
{
return (expected->index == untyped->index)
#ifdef TYPE_DEPENDENT_DISPATCH
|| (get_untyped_idx(expected) == untyped->index)
#endif
;
}
PRIVATE void checkARCAccessorsSlow(Class cls)
{
if (cls->dtable != uninstalled_dtable)
{
return;
}
static SEL retain, release, autorelease, isARC;
if (NULL == retain)
{
retain = sel_registerName("retain");
release = sel_registerName("release");
autorelease = sel_registerName("autorelease");
isARC = sel_registerName("_ARCCompliantRetainRelease");
}
BOOL superIsFast = YES;
if (cls->super_class != Nil)
{
checkARCAccessorsSlow(cls->super_class);
superIsFast = objc_test_class_flag(cls->super_class, objc_class_flag_fast_arc);
}
BOOL selfImplementsRetainRelease = NO;
for (struct objc_method_list *l=cls->methods ; l != NULL ; l= l->next)
{
for (int i=0 ; i<l->count ; i++)
{
SEL s = method_at_index(l, i)->selector;
if (selEqualUnTyped(s, retain) ||
selEqualUnTyped(s, release) ||
selEqualUnTyped(s, autorelease))
{
selfImplementsRetainRelease = YES;
}
else if (selEqualUnTyped(s, isARC))
{
objc_set_class_flag(cls, objc_class_flag_fast_arc);
return;
}
}
}
if (superIsFast && !selfImplementsRetainRelease)
{
objc_set_class_flag(cls, objc_class_flag_fast_arc);
}
}
static void collectMethodsForMethodListToSparseArray(
struct objc_method_list *list,
SparseArray *sarray,
BOOL recurse)
{
if (recurse && (NULL != list->next))
{
collectMethodsForMethodListToSparseArray(list->next, sarray, YES);
}
for (unsigned i=0 ; i<list->count ; i++)
{
SparseArrayInsert(sarray, method_at_index(list, i)->selector->index,
(void*)method_at_index(list, i));
}
}
PRIVATE void init_dispatch_tables ()
{
INIT_LOCK(initialize_lock);
uninstalled_dtable = SparseArrayNewWithDepth(dtable_depth);
#if defined(WITH_TRACING) && defined (__x86_64)
tracing_dtable = SparseArrayNewWithDepth(dtable_depth);
#endif
}
#if defined(WITH_TRACING) && defined (__x86_64)
static int init;
static void free_thread_stack(void* x)
{
free(*(void**)x);
}
static pthread_key_t thread_stack_key;
static void alloc_thread_stack(void)
{
pthread_key_create(&thread_stack_key, free_thread_stack);
init = 1;
}
PRIVATE void* pushTraceReturnStack(void)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
if (!init)
{
pthread_once(&once_control, alloc_thread_stack);
}
void **stack = pthread_getspecific(thread_stack_key);
if (stack == 0)
{
stack = malloc(4096*sizeof(void*));
}
pthread_setspecific(thread_stack_key, stack + 5);
return stack;
}
PRIVATE void* popTraceReturnStack(void)
{
void **stack = pthread_getspecific(thread_stack_key);
stack -= 5;
pthread_setspecific(thread_stack_key, stack);
return stack;
}
#endif
int objc_registerTracingHook(SEL aSel, objc_tracing_hook aHook)
{
#if defined(WITH_TRACING) && defined (__x86_64)
// If this is an untyped selector, register it for every typed variant
if (sel_getType_np(aSel) == 0)
{
SEL buffer[16];
SEL *overflow = 0;
int count = sel_copyTypedSelectors_np(sel_getName(aSel), buffer, 16);
if (count > 16)
{
overflow = calloc(count, sizeof(SEL));
sel_copyTypedSelectors_np(sel_getName(aSel), buffer, 16);
for (int i=0 ; i<count ; i++)
{
SparseArrayInsert(tracing_dtable, overflow[i]->index, aHook);
}
free(overflow);
}
else
{
for (int i=0 ; i<count ; i++)
{
SparseArrayInsert(tracing_dtable, buffer[i]->index, aHook);
}
}
}
SparseArrayInsert(tracing_dtable, aSel->index, aHook);
return 0;
#else
return ENOTSUP;
#endif
}
/**
* Installs a new method in the dtable for `class`. If `replaceMethod` is
* `YES` then this will replace any dtable entry where the original is
* `method_to_replace`. This is used when a superclass method is replaced, to
* replace all subclass dtable entries that are inherited, but not ones that
* are overridden.
*/
static BOOL installMethodInDtable(Class class,
SparseArray *dtable,
struct objc_method *method,
struct objc_method *method_to_replace,
BOOL replaceExisting)
{
ASSERT(uninstalled_dtable != dtable);
uint32_t sel_id = method->selector->index;
struct objc_method *oldMethod = SparseArrayLookup(dtable, sel_id);
// If we're being asked to replace an existing method, don't if it's the
// wrong one.
if ((replaceExisting) && (method_to_replace != oldMethod))
{
return NO;
}
// If we're not being asked to replace existing methods and there is an
// existing one, don't replace it.
if (!replaceExisting && (oldMethod != NULL))
{
return NO;
}
// If this method is the one already installed, pretend to install it again.
if (NULL != oldMethod && (oldMethod->imp == method->imp))
{
return NO;
}
SparseArrayInsert(dtable, sel_id, method);
// In TDD mode, we also register the first typed method that we
// encounter as the untyped version.
#ifdef TYPE_DEPENDENT_DISPATCH
uint32_t untyped_idx = get_untyped_idx(method->selector);
SparseArrayInsert(dtable, untyped_idx, method);
#endif
static SEL cxx_construct, cxx_destruct;
if (NULL == cxx_construct)
{
cxx_construct = sel_registerName(".cxx_construct");
cxx_destruct = sel_registerName(".cxx_destruct");
}
if (selEqualUnTyped(method->selector, cxx_construct))
{
class->cxx_construct = method->imp;
}
else if (selEqualUnTyped(method->selector, cxx_destruct))
{
class->cxx_destruct = method->imp;
}
for (struct objc_class *subclass=class->subclass_list ;
Nil != subclass ; subclass = subclass->sibling_class)
{
// Don't bother updating dtables for subclasses that haven't been
// initialized yet
if (!classHasDtable(subclass)) { continue; }
// Recursively install this method in all subclasses
installMethodInDtable(subclass,
dtable_for_class(subclass),
method,
oldMethod,
YES);
}
// Invalidate the old slot, if there is one.
if (NULL != oldMethod)
{
#ifndef NO_SAFE_CACHING
objc_method_cache_version++;
#endif
}
return YES;
}
static void installMethodsInClass(Class cls,
SparseArray *methods_to_replace,
SparseArray *methods,
BOOL replaceExisting)
{
SparseArray *dtable = dtable_for_class(cls);
assert(uninstalled_dtable != dtable);
uint32_t idx = 0;
struct objc_method *m;
while ((m = SparseArrayNext(methods, &idx)))
{
struct objc_method *method_to_replace = methods_to_replace
? SparseArrayLookup(methods_to_replace, m->selector->index)
: NULL;
if (!installMethodInDtable(cls, dtable, m, method_to_replace, replaceExisting))
{
// Remove this method from the list, if it wasn't actually installed
SparseArrayInsert(methods, idx, 0);
}
}
}
Class class_getSuperclass(Class);
PRIVATE void objc_update_dtable_for_class(Class cls)
{
// Only update real dtables
if (!classHasDtable(cls)) { return; }
LOCK_RUNTIME_FOR_SCOPE();
SparseArray *methods = SparseArrayNewWithDepth(dtable_depth);
collectMethodsForMethodListToSparseArray((void*)cls->methods, methods, YES);
SparseArray *super_dtable = cls->super_class ? dtable_for_class(cls->super_class)
: NULL;
installMethodsInClass(cls, super_dtable, methods, YES);
SparseArrayDestroy(methods);
checkARCAccessors(cls);
}
static void rebaseDtableRecursive(Class cls, Class newSuper)
{
dtable_t parentDtable = dtable_for_class(newSuper);
// Collect all of the methods for this class:
dtable_t temporaryDtable = SparseArrayNewWithDepth(dtable_depth);
for (struct objc_method_list *list = cls->methods ; list != NULL ; list = list->next)
{
for (unsigned i=0 ; i<list->count ; i++)
{
struct objc_method *m = method_at_index(list, i);
uint32_t idx = m->selector->index;
// Don't replace existing methods - we're doing the traversal
// pre-order so we'll see methods from categories first.
if (SparseArrayLookup(temporaryDtable, idx) == NULL)
{
SparseArrayInsert(temporaryDtable, idx, m);
}
}
}
dtable_t dtable = dtable_for_class(cls);
uint32_t idx = 0;
struct objc_method *method;
// Install all methods from the parent that aren't overridden here.
while ((method = SparseArrayNext(parentDtable, &idx)))
{
if (SparseArrayLookup(temporaryDtable, idx) == NULL)
{
SparseArrayInsert(dtable, idx, method);
SparseArrayInsert(temporaryDtable, idx, method);
}
}
idx = 0;
// Now look at all of the methods in the dtable. If they're not ones from
// the dtable that we've just created, then they must have come from the
// original superclass, so remove them by replacing them with NULL.
while ((method = SparseArrayNext(dtable, &idx)))
{
if (SparseArrayLookup(temporaryDtable, idx) == NULL)
{
SparseArrayInsert(dtable, idx, NULL);
}
}
SparseArrayDestroy(temporaryDtable);
// merge can make a class ARC-compatible.
checkARCAccessors(cls);
// Now visit all of our subclasses and propagate the changes downwards.
for (struct objc_class *subclass=cls->subclass_list ;
Nil != subclass ; subclass = subclass->sibling_class)
{
// Don't bother updating dtables for subclasses that haven't been
// initialized yet
if (!classHasDtable(subclass)) { continue; }
rebaseDtableRecursive(subclass, cls);
}
}
PRIVATE void objc_update_dtable_for_new_superclass(Class cls, Class newSuper)
{
// Only update real dtables
if (!classHasDtable(cls)) { return; }
LOCK_RUNTIME_FOR_SCOPE();
rebaseDtableRecursive(cls, newSuper);
// Invalidate all caches after this operation.
#ifndef NO_SAFE_CACHING
objc_method_cache_version++;
#endif
return;
}
PRIVATE void add_method_list_to_class(Class cls,
struct objc_method_list *list)
{
// Only update real dtables
if (!classHasDtable(cls)) { return; }
LOCK_RUNTIME_FOR_SCOPE();
SparseArray *methods = SparseArrayNewWithDepth(dtable_depth);
SparseArray *super_dtable = cls->super_class ? dtable_for_class(cls->super_class)
: NULL;
collectMethodsForMethodListToSparseArray(list, methods, NO);
installMethodsInClass(cls, super_dtable, methods, YES);
// Methods now contains only the new methods for this class.
SparseArrayDestroy(methods);
checkARCAccessors(cls);
}
PRIVATE dtable_t create_dtable_for_class(Class class, dtable_t root_dtable)
{
// Don't create a dtable for a class that already has one
if (classHasDtable(class)) { return dtable_for_class(class); }
LOCK_RUNTIME_FOR_SCOPE();
// Make sure that another thread didn't create the dtable while we were
// waiting on the lock.
if (classHasDtable(class)) { return dtable_for_class(class); }
Class super = class_getSuperclass(class);
dtable_t dtable;
dtable_t super_dtable = NULL;
if (Nil == super)
{
dtable = SparseArrayNewWithDepth(dtable_depth);
}
else
{
super_dtable = dtable_for_class(super);
if (super_dtable == uninstalled_dtable)
{
if (super->isa == class)
{
super_dtable = root_dtable;
}
else
{
abort();
}
}
dtable = SparseArrayCopy(super_dtable);
}
// When constructing the initial dtable for a class, we iterate along the
// method list in forward-traversal order. The first method that we
// encounter is always the one that we want to keep, so we instruct
// installMethodInDtable() to replace only methods that are inherited from
// the superclass.
struct objc_method_list *list = (void*)class->methods;
while (NULL != list)
{
for (unsigned i=0 ; i<list->count ; i++)
{
struct objc_method *super_method = super_dtable
? SparseArrayLookup(super_dtable, method_at_index(list, i)->selector->index)
: NULL;
installMethodInDtable(class, dtable, method_at_index(list, i), super_method, YES);
}
list = list->next;
}
return dtable;
}
Class class_table_next(void **e);
PRIVATE void objc_resize_dtables(uint32_t newSize)
{
// If dtables already have enough space to store all registered selectors, do nothing
if (1<<dtable_depth > newSize) { return; }
LOCK_RUNTIME_FOR_SCOPE();
if (1<<dtable_depth > newSize) { return; }
dtable_depth += 8;
uint32_t oldShift = uninstalled_dtable->shift;
dtable_t old_uninstalled_dtable = uninstalled_dtable;
uninstalled_dtable = SparseArrayExpandingArray(uninstalled_dtable, dtable_depth);
#if defined(WITH_TRACING) && defined (__x86_64)
tracing_dtable = SparseArrayExpandingArray(tracing_dtable, dtable_depth);
#endif
{
LOCK_FOR_SCOPE(&initialize_lock);
for (InitializingDtable *buffer = temporary_dtables ; NULL != buffer ; buffer = buffer->next)
{
buffer->dtable = SparseArrayExpandingArray(buffer->dtable, dtable_depth);
}
}
// Resize all existing dtables
void *e = NULL;
struct objc_class *next;
while ((next = class_table_next(&e)))
{
if (next->dtable == old_uninstalled_dtable)
{
next->dtable = uninstalled_dtable;
next->isa->dtable = uninstalled_dtable;
continue;
}
if (NULL != next->dtable &&
((SparseArray*)next->dtable)->shift == oldShift)
{
next->dtable = SparseArrayExpandingArray((void*)next->dtable, dtable_depth);
next->isa->dtable = SparseArrayExpandingArray((void*)next->isa->dtable, dtable_depth);
}
}
}
PRIVATE dtable_t objc_copy_dtable_for_class(dtable_t old, Class cls)
{
return SparseArrayCopy(old);
}
PRIVATE void free_dtable(dtable_t dtable)
{
SparseArrayDestroy(dtable);
}
LEGACY void update_dispatch_table_for_class(Class cls)
{
static BOOL warned = NO;
if (!warned)
{
fprintf(stderr,
"Warning: Calling deprecated private ObjC runtime function %s\n", __func__);
warned = YES;
}
objc_update_dtable_for_class(cls);
}
void objc_resolve_class(Class);
__attribute__((unused)) static void objc_release_object_lock(id *x)
{
objc_sync_exit(*x);
}
/**
* Macro that is equivalent to @synchronize, for use in C code.
*/
#define LOCK_OBJECT_FOR_SCOPE(obj) \
__attribute__((cleanup(objc_release_object_lock)))\
__attribute__((unused)) id lock_object_pointer = obj;\
objc_sync_enter(obj);
/**
* Remove a buffer from an entry in the initializing dtables list. This is
* called as a cleanup to ensure that it runs even if +initialize throws an
* exception.
*/
static void remove_dtable(InitializingDtable* meta_buffer)
{
LOCK(&initialize_lock);
InitializingDtable *buffer = meta_buffer->next;
// Install the dtable:
meta_buffer->class->dtable = meta_buffer->dtable;
buffer->class->dtable = buffer->dtable;
// Remove the look-aside buffer entry.
if (temporary_dtables == meta_buffer)
{
temporary_dtables = buffer->next;
}
else
{
InitializingDtable *prev = temporary_dtables;
while (prev->next->class != meta_buffer->class)
{
prev = prev->next;
}
prev->next = buffer->next;
}
UNLOCK(&initialize_lock);
}
/**
* Send a +initialize message to the receiver, if required.
*/
OBJC_PUBLIC void objc_send_initialize(id object)
{
Class class = classForObject(object);
// If the first message is sent to an instance (weird, but possible and
// likely for things like NSConstantString, make sure +initialize goes to
// the class not the metaclass.
if (objc_test_class_flag(class, objc_class_flag_meta))
{
class = (Class)object;
}
Class meta = class->isa;
// Make sure that the class is resolved.
objc_resolve_class(class);
// Make sure that the superclass is initialized first.
if (Nil != class->super_class)
{
objc_send_initialize((id)class->super_class);
}
// Lock the runtime while we're creating dtables and before we acquire the
// init lock. This prevents a lock-order reversal when dtable_for_class is
// called from something holding the runtime lock while we're still holding
// the initialize lock. We should ensure that we never acquire the runtime
// lock after acquiring the initialize lock.
LOCK_RUNTIME();
// Superclass +initialize might possibly send a message to this class, in
// which case this method would be called again. See NSObject and
// NSAutoreleasePool +initialize interaction in GNUstep.
if (objc_test_class_flag(class, objc_class_flag_initialized))
{
// We know that initialization has started because the flag is set.
// Check that it's finished by grabbing the class lock. This will be
// released once the class has been fully initialized. The runtime
// lock needs to be released first to prevent a deadlock between the
// runtime lock and the class-specific lock.
UNLOCK_RUNTIME();
objc_sync_enter((id)meta);
objc_sync_exit((id)meta);
assert(dtable_for_class(class) != uninstalled_dtable);
return;
}
// We should try to acquire the class lock before any runtime/init locks.
// If another thread is in the middle of running `allocateHiddenClass()` it
// has acquired a spinlock and will be trying to acquire the runtime lock.
// When this happens there is a small chance we could hit the same spinlock
// and deadlock the process (as any further attempts to acquire the runtime
// will also block forever).
UNLOCK_RUNTIME();
LOCK_OBJECT_FOR_SCOPE((id)meta);
LOCK_RUNTIME();
LOCK(&initialize_lock);
if (objc_test_class_flag(class, objc_class_flag_initialized))
{
UNLOCK(&initialize_lock);
UNLOCK_RUNTIME();
return;
}
BOOL skipMeta = objc_test_class_flag(meta, objc_class_flag_initialized);
// Mark metaclasses as never needing refcount manipulation for their
// instances (classes).
if (!skipMeta)
{
objc_set_class_flag(meta, objc_class_flag_permanent_instances);
}
// Set the initialized flag on both this class and its metaclass, to make
// sure that +initialize is only ever sent once.
objc_set_class_flag(class, objc_class_flag_initialized);
objc_set_class_flag(meta, objc_class_flag_initialized);
dtable_t class_dtable = create_dtable_for_class(class, uninstalled_dtable);
dtable_t dtable = skipMeta ? 0 : create_dtable_for_class(meta, class_dtable);
// Now we've finished doing things that may acquire the runtime lock, so we
// can hold onto the initialise lock to make anything doing
// dtable_for_class block until we've finished updating temporary dtable
// lists.
// If another thread holds the runtime lock, it can now proceed until it
// gets into a dtable_for_class call, and then block there waiting for us
// to finish setting up the temporary dtable.
UNLOCK_RUNTIME();
static SEL initializeSel = 0;
if (0 == initializeSel)
{
initializeSel = sel_registerName("initialize");
}
struct objc_method *initializeSlot = skipMeta ? 0 :
objc_dtable_lookup(dtable, initializeSel->index);
// If there's no initialize method, then don't bother installing and
// removing the initialize dtable, just install both dtables correctly now
if (0 == initializeSlot)
{
if (!skipMeta)
{
meta->dtable = dtable;
}
class->dtable = class_dtable;
checkARCAccessors(class);
UNLOCK(&initialize_lock);
return;
}
// Create an entry in the dtable look-aside buffer for this. When sending
// a message to this class in future, the lookup function will check this
// buffer if the receiver's dtable is not installed, and block if
// attempting to send a message to this class.
InitializingDtable buffer = { class, class_dtable, temporary_dtables };
__attribute__((cleanup(remove_dtable)))
InitializingDtable meta_buffer = { meta, dtable, &buffer };
temporary_dtables = &meta_buffer;
// We now release the initialize lock. We'll reacquire it later when we do
// the cleanup, but at this point we allow other threads to get the
// temporary dtable and call +initialize in other threads.
UNLOCK(&initialize_lock);
// We still hold the class lock at this point. dtable_for_class will block
// there after acquiring the temporary dtable.
checkARCAccessors(class);
// Store the buffer in the temporary dtables list. Note that it is safe to
// insert it into a global list, even though it's a temporary variable,
// because we will clean it up after this function.
initializeSlot->imp((id)class, initializeSel);
}

@ -0,0 +1,772 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "dwarf_eh.h"
#include "objc/runtime.h"
#include "objc/hooks.h"
#include "objc/objc-exception.h"
#include "class.h"
#include "objcxx_eh.h"
#ifndef DEBUG_EXCEPTIONS
#define DEBUG_LOG(...)
#else
#define DEBUG_LOG(str, ...) fprintf(stderr, str, ## __VA_ARGS__)
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if !__has_builtin(__builtin_unreachable)
#define __builtin_unreachable abort
#endif
void test_cxx_eh_implementation();
/**
* The Itanium C++ public structure for in-flight exception status.
*/
struct __cxa_eh_globals
{
/**
* The head exception object. By convention, this is actually the end of
* the `__cxa_exception` structure and points to the address of the thrown
* object. This is either an `id*` or a pointer to a C++ type that we're
* not going to look at.
*/
struct __cxa_exception *caughtExceptions;
/**
* The number of in-flight exceptions thrown.
*/
unsigned int uncaughtExceptions;
};
// Weak references to C++ runtime functions. We don't bother testing that
// these are 0 before calling them, because if they are not resolved then we
// should not be in a code path that involves a C++ exception.
__attribute__((weak)) void *__cxa_begin_catch(void *e);
__attribute__((weak)) void __cxa_end_catch(void);
__attribute__((weak)) void __cxa_rethrow(void);
__attribute__((weak)) struct __cxa_eh_globals *__cxa_get_globals(void);
/**
* Class of exceptions to distinguish between this and other exception types.
*/
static const uint64_t objc_exception_class = EXCEPTION_CLASS('G','N','U','C','O','B','J','C');
/**
* Structure used as a header on thrown exceptions.
*/
struct objc_exception
{
/** The selector value to be returned when installing the catch handler.
* Used at the call site to determine which catch() block should execute.
* This is found in phase 1 of unwinding then installed in phase 2.*/
int handlerSwitchValue;
/** The cached landing pad for the catch handler.*/
void *landingPad;
/**
* Next pointer for chained exceptions.
*/
struct objc_exception *next;
/**
* The number of nested catches that may hold this exception. This is
* negative while an exception is being rethrown.
*/
int catch_count;
/** The language-agnostic part of the exception header. */
struct _Unwind_Exception unwindHeader;
/** Thrown object. This is after the unwind header so that the C++
* exception handler can catch this as a foreign exception. */
id object;
/** C++ exception structure. Used for mixed exceptions. When we are in
* Objective-C++ code, we create this structure for passing to the C++
* exception personality function. It will then handle installing
* exceptions for us. */
struct _Unwind_Exception *cxx_exception;
};
struct objc_exception *objc_exception_from_header(struct _Unwind_Exception *ex)
{
return (struct objc_exception*)((char*)ex -
offsetof(struct objc_exception, unwindHeader));
}
typedef enum
{
handler_none,
handler_cleanup,
handler_catchall_id,
handler_catchall,
handler_class
} handler_type;
enum exception_type
{
NONE,
CXX,
OBJC,
FOREIGN,
BOXED_FOREIGN
};
struct thread_data
{
enum exception_type current_exception_type;
BOOL cxxCaughtException;
struct objc_exception *caughtExceptions;
};
static __thread struct thread_data thread_data;
static struct thread_data *get_thread_data(void)
{
return &thread_data;
}
static struct thread_data *get_thread_data_fast(void)
{
return &thread_data;
}
/**
* Saves the result of the landing pad that we have found. For ARM, this is
* stored in the generic unwind structure, while on other platforms it is
* stored in the Objective-C exception.
*/
static void saveLandingPad(struct _Unwind_Context *context,
struct _Unwind_Exception *ucb,
struct objc_exception *ex,
int selector,
dw_eh_ptr_t landingPad)
{
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
// On ARM, we store the saved exception in the generic part of the structure
ucb->barrier_cache.sp = _Unwind_GetGR(context, 13);
ucb->barrier_cache.bitpattern[1] = (uint32_t)selector;
ucb->barrier_cache.bitpattern[3] = (uint32_t)landingPad;
#else
// Cache the results for the phase 2 unwind, if we found a handler
// and this is not a foreign exception. We can't cache foreign exceptions
// because we don't know their structure (although we could cache C++
// exceptions...)
if (ex)
{
ex->handlerSwitchValue = selector;
ex->landingPad = landingPad;
}
#endif
}
/**
* Loads the saved landing pad. Returns 1 on success, 0 on failure.
*/
static int loadLandingPad(struct _Unwind_Context *context,
struct _Unwind_Exception *ucb,
struct objc_exception *ex,
unsigned long *selector,
dw_eh_ptr_t *landingPad)
{
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
*selector = ucb->barrier_cache.bitpattern[1];
*landingPad = (dw_eh_ptr_t)ucb->barrier_cache.bitpattern[3];
return 1;
#else
if (ex)
{
*selector = ex->handlerSwitchValue;
*landingPad = ex->landingPad;
return 0;
}
return 0;
#endif
}
static inline _Unwind_Reason_Code continueUnwinding(struct _Unwind_Exception *ex,
struct _Unwind_Context *context)
{
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
if (__gnu_unwind_frame(ex, context) != _URC_OK) { return _URC_FAILURE; }
#endif
return _URC_CONTINUE_UNWIND;
}
static void cleanup(_Unwind_Reason_Code reason, struct _Unwind_Exception *e)
{
/*
if (header->exceptionDestructor)
header->exceptionDestructor (e + 1);
free((struct objc_exception*) ((char*)e - offsetof(struct objc_exception,
unwindHeader)));
*/
}
void objc_exception_rethrow(struct _Unwind_Exception *e);
/**
* Throws an Objective-C exception. This function is, unfortunately, used for
* rethrowing caught exceptions too, even in @finally() blocks. Unfortunately,
* this means that we have some problems if the exception is boxed.
*/
void objc_exception_throw(id object)
{
struct thread_data *td = get_thread_data();
DEBUG_LOG("Throwing %p, in flight exception: %p\n", object, td->lastThrownObject);
DEBUG_LOG("Exception caught by C++: %d\n", td->cxxCaughtException);
// If C++ caught the exception, then we may need to make C++ rethrow it if
// we want to preserve exception state. Rethrows should be handled with
// objc_exception_rethrow, but clang appears to do the wrong thing for some
// cases.
if (td->cxxCaughtException)
{
struct __cxa_eh_globals *globals = __cxa_get_globals();
if ((globals->caughtExceptions != NULL) &&
(*(id*)globals->caughtExceptions == object))
{
__cxa_rethrow();
}
}
SEL rethrow_sel = sel_registerName("rethrow");
if ((nil != object) &&
(class_respondsToSelector(classForObject(object), rethrow_sel)))
{
DEBUG_LOG("Rethrowing\n");
IMP rethrow = objc_msg_lookup(object, rethrow_sel);
rethrow(object, rethrow_sel);
// Should not be reached! If it is, then the rethrow method actually
// didn't, so we throw it normally.
}
DEBUG_LOG("Throwing %p\n", object);
struct objc_exception *ex = calloc(1, sizeof(struct objc_exception));
ex->unwindHeader.exception_class = objc_exception_class;
ex->unwindHeader.exception_cleanup = cleanup;
ex->object = object;
td->cxxCaughtException = NO;
_Unwind_Reason_Code err = _Unwind_RaiseException(&ex->unwindHeader);
free(ex);
if (_URC_END_OF_STACK == err && 0 != _objc_unexpected_exception)
{
_objc_unexpected_exception(object);
}
DEBUG_LOG("Throw returned %d\n",(int) err);
abort();
}
static Class get_type_table_entry(struct _Unwind_Context *context,
struct dwarf_eh_lsda *lsda,
int filter)
{
dw_eh_ptr_t record = lsda->type_table -
dwarf_size_of_fixed_size_field(lsda->type_table_encoding)*filter;
dw_eh_ptr_t start = record;
int64_t offset = read_value(lsda->type_table_encoding, &record);
if (0 == offset) { return Nil; }
// ...so we need to resolve it
char *class_name = (char*)(intptr_t)resolve_indirect_value(context,
lsda->type_table_encoding, offset, start);
if (0 == class_name) { return Nil; }
DEBUG_LOG("Class name: %s\n", class_name);
if (strcmp("@id", class_name) == 0) { return (Class)1; }
return (Class)objc_getClass(class_name);
}
static BOOL isKindOfClass(Class thrown, Class type)
{
do
{
if (thrown == type)
{
return YES;
}
thrown = class_getSuperclass(thrown);
} while (Nil != thrown);
return NO;
}
static handler_type check_action_record(struct _Unwind_Context *context,
BOOL foreignException,
struct dwarf_eh_lsda *lsda,
dw_eh_ptr_t action_record,
Class thrown_class,
unsigned long *selector)
{
if (!action_record) { return handler_cleanup; }
while (action_record)
{
int filter = read_sleb128(&action_record);
dw_eh_ptr_t action_record_offset_base = action_record;
int displacement = read_sleb128(&action_record);
*selector = filter;
DEBUG_LOG("Filter: %d\n", filter);
if (filter > 0)
{
Class type = get_type_table_entry(context, lsda, filter);
DEBUG_LOG("%p type: %d\n", type, !foreignException);
// Catchall
if (Nil == type)
{
return handler_catchall;
}
// We treat id catches as catchalls when an object is thrown and as
// nothing when a foreign exception is thrown
else if ((Class)1 == type)
{
DEBUG_LOG("Found id catch\n");
if (!foreignException)
{
return handler_catchall_id;
}
}
else if (!foreignException && isKindOfClass(thrown_class, type))
{
DEBUG_LOG("found handler for %s\n", type->name);
return handler_class;
}
else if (thrown_class == type)
{
return handler_class;
}
}
else if (filter == 0)
{
DEBUG_LOG("0 filter\n");
// Cleanup? I think the GNU ABI doesn't actually use this, but it
// would be a good way of indicating a non-id catchall...
return handler_cleanup;
}
else
{
DEBUG_LOG("Filter value: %d\n"
"Your compiler and I disagree on the correct layout of EH data.\n",
filter);
abort();
}
*selector = 0;
action_record = displacement ?
action_record_offset_base + displacement : 0;
}
return handler_none;
}
/**
* The Objective-C exception personality function implementation. This is
* shared by the GCC-compatible and the new implementation.
*
* The key difference is that the new implementation always returns the
* exception object and boxes it.
*/
static inline _Unwind_Reason_Code internal_objc_personality(int version,
_Unwind_Action actions,
uint64_t exceptionClass,
struct _Unwind_Exception *exceptionObject,
struct _Unwind_Context *context,
BOOL isNew)
{
DEBUG_LOG("%s personality function called %p\n", isNew ? "New" : "Old", exceptionObject);
// This personality function is for version 1 of the ABI. If you use it
// with a future version of the ABI, it won't know what to do, so it
// reports a fatal error and give up before it breaks anything.
if (1 != version)
{
return _URC_FATAL_PHASE1_ERROR;
}
struct objc_exception *ex = 0;
#ifdef DEBUG_EXCEPTIONS
char *cls = (char*)&exceptionClass;
#endif
DEBUG_LOG("Class: %c%c%c%c%c%c%c%c\n", cls[7], cls[6], cls[5], cls[4], cls[3], cls[2], cls[1], cls[0]);
// Check if this is a foreign exception. If it is a C++ exception, then we
// have to box it. If it's something else, like a LanguageKit exception
// then we ignore it (for now)
BOOL foreignException = exceptionClass != objc_exception_class;
// Is this a C++ exception containing an Objective-C++ object?
BOOL objcxxException = NO;
// The object to return
void *object = NULL;
#ifndef NO_OBJCXX
if (cxx_exception_class == 0)
{
test_cxx_eh_implementation();
}
if (exceptionClass == cxx_exception_class)
{
int objcxx;
id obj = objc_object_for_cxx_exception(exceptionObject, &objcxx);
objcxxException = objcxx;
if (objcxxException)
{
object = obj;
DEBUG_LOG("ObjC++ object exception %p\n", object);
// This is a foreign exception, buy for the purposes of exception
// matching, we pretend that it isn't.
foreignException = NO;
}
}
#endif
Class thrown_class = Nil;
if (objcxxException)
{
thrown_class = (object == 0) ? Nil : classForObject((id)object);
}
// If it's not a foreign exception, then we know the layout of the
// language-specific exception stuff.
else if (!foreignException)
{
ex = objc_exception_from_header(exceptionObject);
if (ex->object != nil)
{
thrown_class = classForObject(ex->object);
}
}
else if (_objc_class_for_boxing_foreign_exception)
{
thrown_class = _objc_class_for_boxing_foreign_exception(exceptionClass);
DEBUG_LOG("Foreign class: %p\n", thrown_class);
}
unsigned char *lsda_addr = (void*)_Unwind_GetLanguageSpecificData(context);
DEBUG_LOG("LSDA: %p\n", lsda_addr);
// No LSDA implies no landing pads - try the next frame
if (0 == lsda_addr)
{
return continueUnwinding(exceptionObject, context);
}
// These two variables define how the exception will be handled.
struct dwarf_eh_action action = {0};
unsigned long selector = 0;
if (actions & _UA_SEARCH_PHASE)
{
DEBUG_LOG("Search phase...\n");
struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr);
action = dwarf_eh_find_callsite(context, &lsda);
handler_type handler = check_action_record(context, foreignException,
&lsda, action.action_record, thrown_class, &selector);
DEBUG_LOG("handler: %d\n", handler);
// If there's no action record, we've only found a cleanup, so keep
// searching for something real
if (handler == handler_class ||
((handler == handler_catchall_id) && !foreignException) ||
(handler == handler_catchall))
{
saveLandingPad(context, exceptionObject, ex, selector, action.landing_pad);
DEBUG_LOG("Found handler! %d\n", handler);
return _URC_HANDLER_FOUND;
}
return continueUnwinding(exceptionObject, context);
}
DEBUG_LOG("Phase 2: Fight!\n");
// TODO: If this is a C++ exception, we can cache the lookup and cheat a
// bit
if (!(actions & _UA_HANDLER_FRAME))
{
DEBUG_LOG("Not the handler frame, looking up the cleanup again\n");
struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr);
action = dwarf_eh_find_callsite(context, &lsda);
// If there's no cleanup here, continue unwinding.
if (0 == action.landing_pad)
{
return continueUnwinding(exceptionObject, context);
}
handler_type handler = check_action_record(context, foreignException,
&lsda, action.action_record, thrown_class, &selector);
DEBUG_LOG("handler! %d %d\n", (int)handler, (int)selector);
// On ARM, we occasionally get called to install a handler without
// phase 1 running (no idea why, I suspect a bug in the generic
// unwinder), so skip this check.
#if !(defined(__arm__) && !defined(__ARM_DWARF_EH__))
// If this is not a cleanup, ignore it and keep unwinding.
if ((handler != handler_cleanup) && !objcxxException)
{
DEBUG_LOG("Ignoring handler! %d\n",handler);
return continueUnwinding(exceptionObject, context);
}
#endif
DEBUG_LOG("Installing cleanup...\n");
// If there is a cleanup, we need to return the exception structure
// (not the object) to the calling frame. The exception object
object = exceptionObject;
}
else if (foreignException || objcxxException)
{
struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr);
action = dwarf_eh_find_callsite(context, &lsda);
check_action_record(context, foreignException, &lsda,
action.action_record, thrown_class, &selector);
// If it's a foreign exception, then box it. If it's an Objective-C++
// exception, then we need to delete the exception object.
if (foreignException)
{
DEBUG_LOG("Doing the foreign exception thing...\n");
//[thrown_class exceptionWithForeignException: exceptionObject];
SEL box_sel = sel_registerName("exceptionWithForeignException:");
IMP boxfunction = objc_msg_lookup((id)thrown_class, box_sel);
if (!isNew)
{
object = boxfunction((id)thrown_class, box_sel, exceptionObject);
DEBUG_LOG("Boxed as %p\n", object);
}
}
else if (!isNew) // ObjCXX exception
{
_Unwind_DeleteException(exceptionObject);
}
// In the new EH ABI, we call objc_begin_catch() / and
// objc_end_catch(), which will wrap their __cxa* versions.
}
else
{
// Restore the saved info if we saved some last time.
loadLandingPad(context, exceptionObject, ex, &selector, &action.landing_pad);
object = ex->object;
if (!isNew)
{
free(ex);
}
}
_Unwind_SetIP(context, (uintptr_t)action.landing_pad);
_Unwind_SetGR(context, __builtin_eh_return_data_regno(0),
(uintptr_t)(isNew ? exceptionObject : object));
_Unwind_SetGR(context, __builtin_eh_return_data_regno(1), selector);
DEBUG_LOG("Installing context, selector %d\n", (int)selector);
get_thread_data()->cxxCaughtException = NO;
return _URC_INSTALL_CONTEXT;
}
OBJC_PUBLIC
BEGIN_PERSONALITY_FUNCTION(__gnu_objc_personality_v0)
return internal_objc_personality(version, actions, exceptionClass,
exceptionObject, context, NO);
}
OBJC_PUBLIC
BEGIN_PERSONALITY_FUNCTION(__gnustep_objc_personality_v0)
return internal_objc_personality(version, actions, exceptionClass,
exceptionObject, context, YES);
}
OBJC_PUBLIC
BEGIN_PERSONALITY_FUNCTION(__gnustep_objcxx_personality_v0)
#ifndef NO_OBJCXX
if (cxx_exception_class == 0)
{
test_cxx_eh_implementation();
}
if (exceptionClass == objc_exception_class)
{
struct objc_exception *ex = objc_exception_from_header(exceptionObject);
if (0 == ex->cxx_exception)
{
ex->cxx_exception = objc_init_cxx_exception(ex->object);
}
// We now have two copies of the _Unwind_Exception object (which stores
// state for the unwinder) in flight. Make sure that they're in sync.
COPY_EXCEPTION(ex->cxx_exception, exceptionObject);
exceptionObject = ex->cxx_exception;
exceptionClass = cxx_exception_class;
int ret = CALL_PERSONALITY_FUNCTION(__gxx_personality_v0);
COPY_EXCEPTION(exceptionObject, ex->cxx_exception);
if (ret == _URC_INSTALL_CONTEXT)
{
get_thread_data()->cxxCaughtException = YES;
}
return ret;
}
#endif
return CALL_PERSONALITY_FUNCTION(__gxx_personality_v0);
}
OBJC_PUBLIC id objc_begin_catch(struct _Unwind_Exception *exceptionObject)
{
struct thread_data *td = get_thread_data();
DEBUG_LOG("Beginning catch %p\n", exceptionObject);
td->cxxCaughtException = NO;
if (exceptionObject->exception_class == objc_exception_class)
{
td->current_exception_type = OBJC;
struct objc_exception *ex = objc_exception_from_header(exceptionObject);
if (ex->catch_count == 0)
{
// If this is the first catch, add it to the list.
ex->catch_count = 1;
ex->next = td->caughtExceptions;
td->caughtExceptions = ex;
}
else if (ex->catch_count < 0)
{
// If this is being thrown, mark it as caught again and increment
// the refcount
ex->catch_count = -ex->catch_count + 1;
}
else
{
// Otherwise, just increment the catch count
ex->catch_count++;
}
DEBUG_LOG("objc catch\n");
return ex->object;
}
// If we have a foreign exception while we have stacked exceptions, we have
// a problem. We can't chain them, so we follow the example of C++ and
// just abort.
if (td->caughtExceptions != 0)
{
// FIXME: Actually, we can handle a C++ exception if only ObjC
// exceptions are in-flight
abort();
}
#ifndef NO_OBJCXX
// If this is a C++ exception, let the C++ runtime handle it.
if (exceptionObject->exception_class == cxx_exception_class)
{
DEBUG_LOG("c++ catch\n");
td->current_exception_type = CXX;
return __cxa_begin_catch(exceptionObject);
}
#endif
DEBUG_LOG("foreign exception catch\n");
// Box if we have a boxing function.
if (_objc_class_for_boxing_foreign_exception)
{
Class thrown_class =
_objc_class_for_boxing_foreign_exception(exceptionObject->exception_class);
SEL box_sel = sel_registerName("exceptionWithForeignException:");
id(*boxfunction)(Class,SEL,struct _Unwind_Exception*) =
(id(*)(Class,SEL,struct _Unwind_Exception*))objc_msg_lookup((id)thrown_class, box_sel);
if (boxfunction != 0)
{
id boxed = boxfunction(thrown_class, box_sel, exceptionObject);
td->caughtExceptions = (struct objc_exception*)boxed;
td->current_exception_type = BOXED_FOREIGN;
return boxed;
}
}
td->current_exception_type = FOREIGN;
td->caughtExceptions = (struct objc_exception*)exceptionObject;
// If this is some other kind of exception, then assume that the value is
// at the end of the exception header.
return (id)((char*)exceptionObject + sizeof(struct _Unwind_Exception));
}
OBJC_PUBLIC void objc_end_catch(void)
{
struct thread_data *td = get_thread_data_fast();
// If this is a boxed foreign exception then the boxing class is
// responsible for cleaning it up
if (td->current_exception_type == BOXED_FOREIGN)
{
td->caughtExceptions = 0;
td->current_exception_type = NONE;
return;
}
DEBUG_LOG("Ending catch\n");
// If this is a C++ exception, then just let the C++ runtime handle it.
if (td->current_exception_type == CXX)
{
__cxa_end_catch();
td->current_exception_type = OBJC;
return;
}
if (td->current_exception_type == FOREIGN)
{
struct _Unwind_Exception *e = ((struct _Unwind_Exception*)td->caughtExceptions);
e->exception_cleanup(_URC_FOREIGN_EXCEPTION_CAUGHT, e);
td->current_exception_type = NONE;
td->caughtExceptions = 0;
return;
}
// Otherwise we should do the cleanup thing. Nested catches are possible,
// so we only clean up the exception if this is the last reference.
assert(td->caughtExceptions != 0);
struct objc_exception *ex = td->caughtExceptions;
// If this is being rethrown decrement its (negated) catch count, but don't
// delete it even if its catch count would be 0.
if (ex->catch_count < 0)
{
ex->catch_count++;
return;
}
ex->catch_count--;
if (ex->catch_count == 0)
{
td->caughtExceptions = ex->next;
free(ex);
}
}
OBJC_PUBLIC void objc_exception_rethrow(struct _Unwind_Exception *e)
{
struct thread_data *td = get_thread_data_fast();
// If this is an Objective-C exception, then
if (td->current_exception_type == OBJC)
{
struct objc_exception *ex = objc_exception_from_header(e);
assert(e->exception_class == objc_exception_class);
assert(ex == td->caughtExceptions);
assert(ex->catch_count > 0);
// Negate the catch count, so that we can detect that this is a
// rethrown exception in objc_end_catch
ex->catch_count = -ex->catch_count;
_Unwind_Reason_Code err = _Unwind_Resume_or_Rethrow(e);
free(ex);
if (_URC_END_OF_STACK == err && 0 != _objc_unexpected_exception)
{
_objc_unexpected_exception(ex->object);
}
abort();
}
#ifndef NO_OBJCXX
else if (td->current_exception_type == CXX)
{
assert(e->exception_class == cxx_exception_class);
__cxa_rethrow();
}
#endif
if (td->current_exception_type == BOXED_FOREIGN)
{
SEL rethrow_sel = sel_registerName("rethrow");
id object = (id)td->caughtExceptions;
if ((nil != object) &&
(class_respondsToSelector(classForObject(object), rethrow_sel)))
{
DEBUG_LOG("Rethrowing boxed exception\n");
IMP rethrow = objc_msg_lookup(object, rethrow_sel);
rethrow(object, rethrow_sel);
}
}
assert(e == (struct _Unwind_Exception*)td->caughtExceptions);
_Unwind_Resume_or_Rethrow(e);
abort();
}
objc_uncaught_exception_handler objc_setUncaughtExceptionHandler(objc_uncaught_exception_handler handler)
{
return __atomic_exchange_n(&_objc_unexpected_exception, handler, __ATOMIC_SEQ_CST);
}

@ -0,0 +1,118 @@
.file "eh_trampoline.cc"
.text
.align 2
.type _ZZ13eh_trampolinevEN1XD2Ev, @function
_ZZ13eh_trampolinevEN1XD2Ev:
.LFB2:
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
movq %rdi, -8(%rbp)
nop
popq %rbp
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE2:
.size _ZZ13eh_trampolinevEN1XD2Ev, .-_ZZ13eh_trampolinevEN1XD2Ev
.set _ZZ13eh_trampolinevEN1XD1Ev,_ZZ13eh_trampolinevEN1XD2Ev
.globl _Z13eh_trampolinev
.hidden _Z13eh_trampolinev
.type _Z13eh_trampolinev, @function
_Z13eh_trampolinev:
.LFB0:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.test_eh_personality
.cfi_lsda 0x1b,.LLSDA0
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %rbx
subq $24, %rsp
.cfi_offset 3, -24
movq %fs:40, %rax
movq %rax, -24(%rbp)
xorl %eax, %eax
.LEHB0:
call _Z9cxx_throwv@PLT
.LEHE0:
movl $0, %ebx
leaq -25(%rbp), %rax
movq %rax, %rdi
call _ZZ13eh_trampolinevEN1XD1Ev
movl %ebx, %eax
movq -24(%rbp), %rdx
subq %fs:40, %rdx
je .L5
jmp .L7
.L6:
endbr64
movq %rax, %rbx
leaq -25(%rbp), %rax
movq %rax, %rdi
call _ZZ13eh_trampolinevEN1XD1Ev
movq %rbx, %rax
movq %rax, %rdi
.LEHB1:
call _Unwind_Resume@PLT
.LEHE1:
.L7:
call __stack_chk_fail@PLT
.L5:
movq -8(%rbp), %rbx
leave
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE0:
.globl test_eh_personality
.section .gcc_except_table,"a",@progbits
.LLSDA0:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE0-.LLSDACSB0
.LLSDACSB0:
.uleb128 .LEHB0-.LFB0
.uleb128 .LEHE0-.LEHB0
.uleb128 .L6-.LFB0
.uleb128 0
.uleb128 .LEHB1-.LFB0
.uleb128 .LEHE1-.LEHB1
.uleb128 0
.uleb128 0
.LLSDACSE0:
.text
.size _Z13eh_trampolinev, .-_Z13eh_trampolinev
.hidden DW.ref.test_eh_personality
.weak DW.ref.test_eh_personality
.section .data.rel.local.DW.ref.test_eh_personality,"awG",@progbits,DW.ref.test_eh_personality,comdat
.align 8
.type DW.ref.test_eh_personality, @object
.size DW.ref.test_eh_personality, 8
DW.ref.test_eh_personality:
.quad test_eh_personality
.ident "GCC: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:

@ -0,0 +1,9 @@
void cxx_throw();
__attribute((visibility("hidden")))
int eh_trampoline()
{
struct X { ~X() {} } x;
cxx_throw();
return 0;
}

@ -0,0 +1,287 @@
#include <cstdlib>
#include <cstdio>
#include <string>
#include <vector>
#include "objc/runtime.h"
#include "objc/objc-exception.h"
#include "visibility.h"
#include <windows.h>
#define RtlAddGrowableFunctionTable ClangIsConfusedByTypedefReturnTypes
#include <rtlsupportapi.h>
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if !__has_builtin(__builtin_unreachable)
#define __builtin_unreachable abort
#endif
#define EH_EXCEPTION_NUMBER ('msc' | 0xE0000000)
#define EH_MAGIC_NUMBER1 0x19930520
#define EXCEPTION_NONCONTINUABLE 0x1
struct _MSVC_TypeDescriptor
{
const void* pVFTable;
void* spare;
char name[0];
};
struct _MSVC_CatchableType
{
unsigned int flags;
unsigned long type;
int mdisp;
int pdisp;
int vdisp;
int size;
unsigned long copyFunction;
};
struct _MSVC_CatchableTypeArray
{
int count;
unsigned long types[0];
};
struct _MSVC_ThrowInfo
{
unsigned int attributes;
unsigned long pfnUnwind;
unsigned long pfnForwardCompat;
unsigned long pCatchableTypeArray;
};
static LPTOP_LEVEL_EXCEPTION_FILTER originalUnhandledExceptionFilter = nullptr;
void (*_objc_unexpected_exception)(id exception);
LONG WINAPI _objc_unhandled_exception_filter(struct _EXCEPTION_POINTERS* exceptionInfo);
#if defined(_WIN64)
#define IMAGE_RELATIVE(ptr, base) (static_cast<unsigned long>((ptr ? ((uintptr_t)ptr - (uintptr_t)base) : (uintptr_t)nullptr)))
#else
#define IMAGE_RELATIVE(ptr, base) reinterpret_cast<unsigned long>((ptr))
#endif
extern "C" void __stdcall _CxxThrowException(void*, _MSVC_ThrowInfo*);
namespace
{
static std::string mangleObjcObject()
{
#if defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ == 8
return ".PEAUobjc_object@@";
#else
return ".PAUobjc_object@@";
#endif
}
static std::string mangleStructNamed(const char* className)
{
// 32-bit:
// .PAUxxx@@ = ?? struct xxx * `RTTI Type Descriptor'
// 64-bit:
// .PEAUxxx@@ = ?? struct xxx * __ptr64 `RTTI Type Descriptor'
//return
auto r =
#if defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ == 8
std::string(".PEAU") +
#else
std::string(".PAU") +
#endif
className + "@@";
return r;
}
void fillCatchableType(_MSVC_CatchableType* exceptType)
{
exceptType->flags = 1;
exceptType->mdisp = 0;
exceptType->pdisp = -1;
exceptType->vdisp = 0;
exceptType->size = sizeof(id);
exceptType->copyFunction = 0;
}
} // <anonymous-namespace>
struct X {};
OBJC_PUBLIC extern "C" void objc_exception_rethrow(void* exc);
OBJC_PUBLIC extern "C" void objc_exception_throw(id object)
{
// Base used for image-relative addresses.
char x;
// This is the base vtable for all RTTI entries
static const void* typeid_vtable = *(void**)&typeid(void *);
SEL rethrow_sel = sel_registerName("rethrow");
if ((nil != object) &&
(class_respondsToSelector(object_getClass(object), rethrow_sel)))
{
IMP rethrow = objc_msg_lookup(object, rethrow_sel);
rethrow(object, rethrow_sel);
// Should not be reached! If it is, then the rethrow method actually
// didn't, so we throw it normally.
}
SEL processException_sel = sel_registerName("_processException");
if ((nil != object) &&
(class_respondsToSelector(object_getClass(object), processException_sel)))
{
IMP processException = objc_msg_lookup(object, processException_sel);
processException(object, processException_sel);
}
// The 'id' base type will be taking up a spot in the list:
size_t typeCount = 1;
// Get count of all types in exception
for (Class cls = object_getClass(object); cls != Nil; cls = class_getSuperclass(cls), ++typeCount)
;
// Unfortunately we can't put this in a real function since the alloca has to be in this stack frame:
#define CREATE_TYPE_DESCRIPTOR(desc, symName) \
desc = reinterpret_cast<_MSVC_TypeDescriptor*>(alloca(sizeof(_MSVC_TypeDescriptor) + symName.size() + 1 /* null terminator */)); \
desc->pVFTable = typeid_vtable; \
desc->spare = nullptr; \
strcpy_s(desc->name, symName.size() + 1, symName.c_str());
auto exceptTypes =
(_MSVC_CatchableTypeArray*)_alloca(sizeof(_MSVC_CatchableTypeArray) + sizeof(_MSVC_CatchableType*) * typeCount);
exceptTypes->count = typeCount;
// Add exception type and all base types to throw information
size_t curTypeIndex = 0;
for (Class cls = object_getClass(object); cls != Nil; cls = class_getSuperclass(cls))
{
auto exceptType = (_MSVC_CatchableType*)_alloca(sizeof(_MSVC_CatchableType));
fillCatchableType(exceptType);
auto mangledName = mangleStructNamed(class_getName(cls));
_MSVC_TypeDescriptor *ty;
CREATE_TYPE_DESCRIPTOR(ty, mangledName);
exceptType->type = IMAGE_RELATIVE(ty, &x);
exceptTypes->types[curTypeIndex++] = IMAGE_RELATIVE(exceptType, &x);
}
// Add id (struct objc_object*)
auto exceptType = (_MSVC_CatchableType*)_alloca(sizeof(_MSVC_CatchableType));
fillCatchableType(exceptType);
auto idName = mangleObjcObject();
_MSVC_TypeDescriptor *ty;
CREATE_TYPE_DESCRIPTOR(ty, idName);
exceptType->type = IMAGE_RELATIVE(ty, &x);
exceptTypes->types[curTypeIndex++] = IMAGE_RELATIVE(exceptType, &x);
_MSVC_ThrowInfo ti = {
0, // attributes
0, // pfnUnwind
0, // pfnForwardCompat
IMAGE_RELATIVE(exceptTypes, &x) // pCatchableTypeArray
};
EXCEPTION_RECORD exception;
exception.ExceptionCode = EH_EXCEPTION_NUMBER;
exception.ExceptionFlags = EXCEPTION_NONCONTINUABLE;
exception.ExceptionRecord = nullptr;
exception.ExceptionAddress = nullptr;
// The fourth parameter is the base address of the image (for us, this stack
// frame), but we only use image-relative 32-bit addresses on 64-bit
// platforms. On 32-bit platforms, we use 32-bit absolute addresses.
exception.NumberParameters = sizeof(void*) == 4 ? 3 : 4;
exception.ExceptionInformation[0] = EH_MAGIC_NUMBER1;
exception.ExceptionInformation[1] = reinterpret_cast<ULONG_PTR>(&object);
exception.ExceptionInformation[2] = reinterpret_cast<ULONG_PTR>(&ti);
exception.ExceptionInformation[3] = reinterpret_cast<ULONG_PTR>(&x);
#ifdef _WIN64
RtlRaiseException(&exception);
#else
RaiseException(exception.ExceptionCode,
exception.ExceptionFlags,
exception.NumberParameters,
exception.ExceptionInformation);
#endif
__builtin_unreachable();
}
OBJC_PUBLIC extern "C" void objc_exception_rethrow(void* exc)
{
_CxxThrowException(nullptr, nullptr);
__builtin_unreachable();
}
// rebase_and_cast adds a constant offset to a U value, converting it into a T
template <typename T, typename U>
static std::add_const_t<std::decay_t<T>> rebase_and_cast(intptr_t base, U value) {
// U value -> const T* (base+value)
return reinterpret_cast<std::add_const_t<std::decay_t<T>>>(base + (long)(value));
}
/**
* Unhandled exception filter that we install to get called when an exception is
* not otherwise handled in a process that is not being debugged. In here we
* check if the exception is an Objective C exception raised by
* objc_exception_throw() above, and if so call the _objc_unexpected_exception
* hook with the Objective-C exception object.
*
* https://docs.microsoft.com/en-us/windows/win32/api/errhandlingapi/nf-errhandlingapi-setunhandledexceptionfilter
*/
LONG WINAPI _objc_unhandled_exception_filter(struct _EXCEPTION_POINTERS* exceptionInfo)
{
const EXCEPTION_RECORD* ex = exceptionInfo->ExceptionRecord;
if (_objc_unexpected_exception != 0
&& ex->ExceptionCode == EH_EXCEPTION_NUMBER
&& ex->ExceptionInformation[0] == EH_MAGIC_NUMBER1
&& ex->NumberParameters >= 3)
{
// On 64-bit platforms, thrown exception catch data are relative virtual addresses off the module base.
intptr_t imageBase = ex->NumberParameters >= 4 ? (intptr_t)(ex->ExceptionInformation[3]) : 0;
auto throwInfo = reinterpret_cast<_MSVC_ThrowInfo*>(ex->ExceptionInformation[2]);
if (throwInfo && throwInfo->pCatchableTypeArray) {
auto catchableTypes = rebase_and_cast<_MSVC_CatchableTypeArray*>(imageBase, throwInfo->pCatchableTypeArray);
bool foundobjc_object = false;
for (int i = 0; i < catchableTypes->count; ++i) {
const _MSVC_CatchableType* catchableType = rebase_and_cast<_MSVC_CatchableType*>(imageBase, catchableTypes->types[i]);
const _MSVC_TypeDescriptor* typeDescriptor = rebase_and_cast<_MSVC_TypeDescriptor*>(imageBase, catchableType->type);
if (strcmp(typeDescriptor->name, mangleObjcObject().c_str()) == 0) {
foundobjc_object = true;
break;
}
}
if (foundobjc_object) {
id exception = *reinterpret_cast<id*>(ex->ExceptionInformation[1]);
_objc_unexpected_exception(exception);
}
}
}
// call original exception filter if any
if (originalUnhandledExceptionFilter) {
return originalUnhandledExceptionFilter(exceptionInfo);
}
// EXCEPTION_CONTINUE_SEARCH instructs the exception handler to continue searching for appropriate exception handlers.
// Since this is the last one, it is not likely to find any more.
return EXCEPTION_CONTINUE_SEARCH;
}
OBJC_PUBLIC extern "C" objc_uncaught_exception_handler objc_setUncaughtExceptionHandler(objc_uncaught_exception_handler handler)
{
objc_uncaught_exception_handler previousHandler = __atomic_exchange_n(&_objc_unexpected_exception, handler, __ATOMIC_SEQ_CST);
// set unhandled exception filter to support hook
LPTOP_LEVEL_EXCEPTION_FILTER previousExceptionFilter = SetUnhandledExceptionFilter(&_objc_unhandled_exception_filter);
if (previousExceptionFilter != &_objc_unhandled_exception_filter) {
originalUnhandledExceptionFilter = previousExceptionFilter;
}
return previousHandler;
}

@ -0,0 +1,639 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <ctype.h>
#include "objc/runtime.h"
#include "objc/encoding.h"
#include "method.h"
#include "visibility.h"
#ifdef max
# undef max
#endif
size_t objc_alignof_type (const char *type);
// It would be so nice if this works, but in fact it returns nonsense:
//#define alignof(x) __alignof__(x)
//
#define alignof(type) __builtin_offsetof(struct { const char c; type member; }, member)
OBJC_PUBLIC
const char *objc_skip_type_qualifiers (const char *type)
{
static const char *type_qualifiers = "rnNoORVA";
while('\0' != *type && strchr(type_qualifiers, *type))
{
type++;
}
return type;
}
static const char *sizeof_type(const char *type, size_t *size);
OBJC_PUBLIC
const char *objc_skip_typespec(const char *type)
{
size_t ignored = 0;
return sizeof_type(type, &ignored);
}
OBJC_PUBLIC
const char *objc_skip_argspec(const char *type)
{
type = objc_skip_typespec(type);
while(isdigit(*type)) { type++; }
return type;
}
PRIVATE size_t lengthOfTypeEncoding(const char *types)
{
if ((NULL == types) || ('\0' == types[0])) { return 0; }
const char *end = objc_skip_typespec(types);
size_t length = end - types;
return length;
}
static char* copyTypeEncoding(const char *types)
{
size_t length = lengthOfTypeEncoding(types);
char *copy = malloc(length + 1);
memcpy(copy, types, length);
copy[length] = '\0';
return copy;
}
static const char * findParameterStart(const char *types, unsigned int index)
{
// the upper bound of the loop is inclusive because the return type
// is the first element in the method signature
for (unsigned int i=0 ; i <= index ; i++)
{
types = objc_skip_argspec(types);
if ('\0' == *types)
{
return NULL;
}
}
return types;
}
typedef const char *(*type_parser)(const char*, void*);
static int parse_array(const char **type, type_parser callback, void *context)
{
// skip [
(*type)++;
int element_count = (int)strtol(*type, (char**)type, 10);
*type = callback(*type, context);
// skip ]
(*type)++;
return element_count;
}
static void parse_struct_or_union(const char **type, type_parser callback, void *context, char endchar)
{
// Skip the ( and structure name
do
{
(*type)++;
// Opaque type has no =definition
if (endchar == **type) { (*type)++; return; }
} while('=' != **type);
// Skip =
(*type)++;
while (**type != endchar)
{
// Structure elements sometimes have their names in front of each
// element, as in {NSPoint="x"f"y"f} - We need to skip the type name
// here.
//
// TODO: In a future version we should provide a callback that lets
// users of this code get the field name
if ('"'== **type)
{
do
{
(*type)++;
} while ('"' != **type);
// Skip the closing "
(*type)++;
}
*type = callback(*type, context);
}
// skip }
(*type)++;
}
static void parse_union(const char **type, type_parser callback, void *context)
{
parse_struct_or_union(type, callback, context, ')');
}
static void parse_struct(const char **type, type_parser callback, void *context)
{
parse_struct_or_union(type, callback, context, '}');
}
inline static void round_up(size_t *v, size_t b)
{
if (0 == b)
{
return;
}
if (*v % b)
{
*v += b - (*v % b);
}
}
inline static size_t max(size_t v, size_t v2)
{
return v>v2 ? v : v2;
}
static const char *skip_object_extended_qualifiers(const char *type)
{
if (*(type+1) == '?')
{
type++;
if (*(type+1) == '<')
{
type += 2;
while (*type != '>')
{
type++;
}
}
}
else if (type[1] == '"')
{
type += 2;
while (*type != '"')
{
type++;
}
}
return type;
}
static const char *sizeof_union_field(const char *type, size_t *size);
static const char *sizeof_type(const char *type, size_t *size)
{
type = objc_skip_type_qualifiers(type);
switch (*type)
{
// For all primitive types, we round up the current size to the
// required alignment of the type, then add the size
#define APPLY_TYPE(typeName, name, capitalizedName, encodingChar) \
case encodingChar:\
{\
round_up(size, (alignof(typeName) * 8));\
*size += (sizeof(typeName) * 8);\
return type + 1;\
}
#define SKIP_ID 1
#define NON_INTEGER_TYPES 1
#include "type_encoding_cases.h"
case '@':
{
round_up(size, (alignof(id) * 8));
*size += (sizeof(id) * 8);
return skip_object_extended_qualifiers(type) + 1;
}
case '?':
case 'v': return type+1;
case 'j':
{
type++;
switch (*type)
{
#define APPLY_TYPE(typeName, name, capitalizedName, encodingChar) \
case encodingChar:\
{\
round_up(size, (alignof(_Complex typeName) * 8));\
*size += (sizeof(_Complex typeName) * 8);\
return type + 1;\
}
#include "type_encoding_cases.h"
}
}
case '{':
{
const char *t = type;
parse_struct(&t, (type_parser)sizeof_type, size);
size_t align = objc_alignof_type(type);
round_up(size, align * 8);
return t;
}
case '[':
{
const char *t = type;
size_t element_size = 0;
// FIXME: aligned size
int element_count = parse_array(&t, (type_parser)sizeof_type, &element_size);
(*size) += element_size * element_count;
return t;
}
case '(':
{
const char *t = type;
size_t union_size = 0;
parse_union(&t, (type_parser)sizeof_union_field, &union_size);
*size += union_size;
return t;
}
case 'b':
{
// Consume the b
type++;
// Ignore the offset
strtol(type, (char**)&type, 10);
// Consume the element type
type++;
// Read the number of bits
*size += strtol(type, (char**)&type, 10);
return type;
}
case '^':
{
// All pointers look the same to me.
*size += sizeof(void*) * 8;
size_t ignored = 0;
// Skip the definition of the pointeee type.
return sizeof_type(type+1, &ignored);
}
}
abort();
return NULL;
}
static const char *sizeof_union_field(const char *type, size_t *size)
{
size_t field_size = 0;
const char *end = sizeof_type(type, &field_size);
*size = max(*size, field_size);
return end;
}
static const char *alignof_type(const char *type, size_t *align)
{
type = objc_skip_type_qualifiers(type);
switch (*type)
{
// For all primitive types, we return the maximum of the new alignment
// and the old one
#define APPLY_TYPE(typeName, name, capitalizedName, encodingChar) \
case encodingChar:\
{\
*align = max((alignof(typeName) * 8), *align);\
return type + 1;\
}
#define NON_INTEGER_TYPES 1
#define SKIP_ID 1
#include "type_encoding_cases.h"
case '@':
{
*align = max((alignof(id) * 8), *align);\
return skip_object_extended_qualifiers(type) + 1;
}
case '?':
case 'v': return type+1;
case 'j':
{
type++;
switch (*type)
{
#define APPLY_TYPE(typeName, name, capitalizedName, encodingChar) \
case encodingChar:\
{\
*align = max((alignof(_Complex typeName) * 8), *align);\
return type + 1;\
}
#include "type_encoding_cases.h"
}
}
case '{':
{
const char *t = type;
parse_struct(&t, (type_parser)alignof_type, align);
return t;
}
case '(':
{
const char *t = type;
parse_union(&t, (type_parser)alignof_type, align);
return t;
}
case '[':
{
const char *t = type;
parse_array(&t, (type_parser)alignof_type, &align);
return t;
}
case 'b':
{
// Consume the b
type++;
// Ignore the offset
strtol(type, (char**)&type, 10);
// Alignment of a bitfield is the alignment of the type that
// contains it
type = alignof_type(type, align);
// Ignore the number of bits
strtol(type, (char**)&type, 10);
return type;
}
case '^':
{
*align = max((alignof(void*) * 8), *align);
// All pointers look the same to me.
size_t ignored = 0;
// Skip the definition of the pointeee type.
return alignof_type(type+1, &ignored);
}
}
abort();
return NULL;
}
OBJC_PUBLIC
size_t objc_sizeof_type(const char *type)
{
size_t size = 0;
sizeof_type(type, &size);
return size / 8;
}
OBJC_PUBLIC
size_t objc_alignof_type (const char *type)
{
size_t align = 0;
alignof_type(type, &align);
return align / 8;
}
OBJC_PUBLIC
size_t objc_aligned_size(const char *type)
{
size_t size = objc_sizeof_type(type);
size_t align = objc_alignof_type(type);
return size + (size % align);
}
OBJC_PUBLIC
size_t objc_promoted_size(const char *type)
{
size_t size = objc_sizeof_type(type);
return size + (size % sizeof(void*));
}
OBJC_PUBLIC
void method_getReturnType(Method method, char *dst, size_t dst_len)
{
if (NULL == method) { return; }
//TODO: Coped and pasted code. Factor it out.
const char *types = method_getTypeEncoding(method);
size_t length = lengthOfTypeEncoding(types);
if (length < dst_len)
{
memcpy(dst, types, length);
dst[length] = '\0';
}
else
{
memcpy(dst, types, dst_len);
}
}
OBJC_PUBLIC
const char *method_getTypeEncoding(Method method)
{
if (NULL == method) { return NULL; }
return sel_getType_np(method->selector);
}
OBJC_PUBLIC
void method_getArgumentType(Method method,
unsigned int index,
char *dst,
size_t dst_len)
{
if (NULL == method) { return; }
const char *types = findParameterStart(method_getTypeEncoding(method), index);
if (NULL == types)
{
if (dst_len > 0)
{
*dst = '\0';
}
return;
}
size_t length = lengthOfTypeEncoding(types);
if (length < dst_len)
{
memcpy(dst, types, length);
dst[length] = '\0';
}
else
{
memcpy(dst, types, dst_len);
}
}
OBJC_PUBLIC
unsigned method_getNumberOfArguments(Method method)
{
if (NULL == method) { return 0; }
const char *types = method_getTypeEncoding(method);
unsigned int count = 0;
while('\0' != *types)
{
types = objc_skip_argspec(types);
count++;
}
return count - 1;
}
OBJC_PUBLIC
unsigned method_get_number_of_arguments(struct objc_method *method)
{
return method_getNumberOfArguments(method);
}
OBJC_PUBLIC
char* method_copyArgumentType(Method method, unsigned int index)
{
if (NULL == method) { return NULL; }
const char *types = findParameterStart(method_getTypeEncoding(method), index);
if (NULL == types)
{
return NULL;
}
return copyTypeEncoding(types);
}
OBJC_PUBLIC
char* method_copyReturnType(Method method)
{
if (NULL == method) { return NULL; }
return copyTypeEncoding(method_getTypeEncoding(method));
}
OBJC_PUBLIC
unsigned objc_get_type_qualifiers (const char *type)
{
unsigned flags = 0;
#define MAP(chr, bit) case chr: flags |= bit; break;
do
{
switch (*(type++))
{
default: return flags;
MAP('r', _F_CONST)
MAP('n', _F_IN)
MAP('o', _F_OUT)
MAP('N', _F_INOUT)
MAP('O', _F_BYCOPY)
MAP('V', _F_ONEWAY)
MAP('R', _F_BYREF)
}
} while (1);
}
// Note: The implementations of these functions is horrible.
OBJC_PUBLIC
void objc_layout_structure (const char *type,
struct objc_struct_layout *layout)
{
layout->original_type = type;
layout->type = 0;
}
static const char *layout_structure_callback(const char *type, struct objc_struct_layout *layout)
{
size_t align = 0;
size_t size = 0;
const char *end = sizeof_type(type, &size);
alignof_type(type, &align);
//printf("Callback called with %s\n", type);
if (layout->prev_type < type)
{
if (layout->record_align == 0)
{
layout->record_align = align;
layout->type = type;
}
}
else
{
size_t rsize = (size_t)layout->record_size;
round_up(&rsize, align);
layout->record_size = rsize + size;
}
return end;
}
OBJC_PUBLIC
BOOL objc_layout_structure_next_member(struct objc_struct_layout *layout)
{
const char *end = layout->type;
layout->record_size = 0;
layout->record_align = 0;
layout->prev_type = layout->type;
const char *type = layout->original_type;
parse_struct(&type, (type_parser)layout_structure_callback, layout);
//printf("Calculated: (%s) %s %d %d\n", layout->original_type, layout->type, layout->record_size, layout->record_align);
//printf("old start %s, new start %s\n", end, layout->type);
return layout->type != end;
}
OBJC_PUBLIC
void objc_layout_structure_get_info (struct objc_struct_layout *layout,
unsigned int *offset,
unsigned int *align,
const char **type)
{
//printf("%p\n", layout);
*type = layout->type;
size_t off = layout->record_size / 8;
*align= layout->record_align / 8;
round_up(&off, (size_t)*align);
*offset = (unsigned int)off;
}
#ifdef ENCODING_TESTS
#define TEST(type) do {\
if (alignof(type) != objc_alignof_type(@encode(type)))\
printf("Incorrect alignment for %s: %d != %d\n", @encode(type), objc_alignof_type(@encode(type)), alignof(type));\
if (sizeof(type) != objc_sizeof_type(@encode(type)))\
printf("Incorrect size for %s: %d != %d\n", @encode(type), objc_sizeof_type(@encode(type)), sizeof(type));\
} while(0)
struct foo
{
int a[2];
int b:5;
struct
{
double d;
const char *str;
float e;
}c;
long long **g;
union { const char c; long long b; } h;
long long f;
_Complex int z;
_Complex double y;
char v;
};
typedef struct
{
float x,y;
} Point;
typedef struct
{
Point a, b;
} Rect;
int main(void)
{
TEST(int);
TEST(const char);
TEST(unsigned long long);
TEST(_Complex int);
TEST(struct foo);
struct objc_struct_layout layout;
objc_layout_structure(@encode(Rect), &layout);
while (objc_layout_structure_next_member (&layout))
{
unsigned offset;
unsigned align;
const char *ftype;
struct objc_struct_layout layout2;
objc_layout_structure_get_info (&layout, &offset, &align, &ftype);
printf("%s: offset: %d, alignment: %d\n", ftype, offset, align);
objc_layout_structure(ftype, &layout2);
while (objc_layout_structure_next_member (&layout2))
{
objc_layout_structure_get_info (&layout2, &offset, &align, &ftype);
printf("%s: offset: %d, alignment: %d\n", ftype, offset, align);
}
}
printf("%d\n", offsetof(Rect, a.x));
printf("%d\n", offsetof(Rect, a.y));
printf("%d\n", offsetof(Rect, b.x));
printf("%d\n", offsetof(Rect, b.y));
}
#endif

@ -0,0 +1,62 @@
#include "objc/runtime.h"
#include "class.h"
typedef struct _NSZone NSZone;
@interface RootMethods
- (id)alloc;
- (id)allocWithZone: (NSZone*)aZone;
- (id)init;
@end
#include <stdio.h>
/**
* Equivalent to [cls alloc]. If there's a fast path opt-in, then this skips the message send.
*/
OBJC_PUBLIC
id
objc_alloc(Class cls)
{
if (UNLIKELY(!objc_test_class_flag(cls->isa, objc_class_flag_initialized)))
{
objc_send_initialize(cls);
}
if (objc_test_class_flag(cls->isa, objc_class_flag_fast_alloc_init))
{
return class_createInstance(cls, 0);
}
return [cls alloc];
}
/**
* Equivalent to [cls allocWithZone: null]. If there's a fast path opt-in, then this skips the message send.
*/
OBJC_PUBLIC
id
objc_allocWithZone(Class cls)
{
if (UNLIKELY(!objc_test_class_flag(cls->isa, objc_class_flag_initialized)))
{
objc_send_initialize(cls);
}
if (objc_test_class_flag(cls->isa, objc_class_flag_fast_alloc_init))
{
return class_createInstance(cls, 0);
}
return [cls allocWithZone: NULL];
}
/**
* Equivalent to [[cls alloc] init]. If there's a fast path opt-in, then this
* skips the message send.
*/
OBJC_PUBLIC
id
objc_alloc_init(Class cls)
{
id instance = objc_alloc(cls);
if (objc_test_class_flag(cls, objc_class_flag_fast_alloc_init))
{
return instance;
}
return [instance init];
}

@ -0,0 +1,121 @@
#include "visibility.h"
#include "objc/runtime.h"
#include "gc_ops.h"
#include "class.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
static id allocate_class(Class cls, size_t extraBytes)
{
size_t size = cls->instance_size + extraBytes + sizeof(intptr_t);
intptr_t *addr =
#ifdef _WIN32
// Malloc on Windows doesn't guarantee 32-byte alignment, but we
// require this for any class that may contain vectors
_aligned_malloc(size, 32);
memset(addr, 0, size);
#else
calloc(size, 1);
#endif
return (id)(addr + 1);
}
static void free_object(id obj)
{
#ifdef _WIN32
_aligned_free((void*)(((intptr_t*)obj) - 1));
#else
free((void*)(((intptr_t*)obj) - 1));
#endif
}
static void *alloc(size_t size)
{
return calloc(size, 1);
}
void objc_registerThreadWithCollector(void) {}
void objc_unregisterThreadWithCollector(void) {}
void objc_assertRegisteredThreadWithCollector() {}
PRIVATE struct gc_ops gc_ops_none =
{
.allocate_class = allocate_class,
.free_object = free_object,
.malloc = alloc,
.free = free
};
PRIVATE struct gc_ops *gc = &gc_ops_none;
void objc_set_collection_threshold(size_t threshold) {}
void objc_set_collection_ratio(size_t ratio) {}
void objc_collect(unsigned long options) {}
BOOL objc_collectingEnabled(void) { return NO; }
BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation)
{
return __sync_bool_compare_and_swap(objectLocation, predicate, replacement);
}
BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation)
{
return __sync_bool_compare_and_swap(objectLocation, predicate, replacement);
}
BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation)
{
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
}
BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation)
{
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
}
BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation)
{
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
}
BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation)
{
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
}
id objc_assign_strongCast(id val, id *ptr)
{
*ptr = val;
return val;
}
id objc_assign_global(id val, id *ptr)
{
*ptr = val;
return val;
}
id objc_assign_ivar(id val, id dest, ptrdiff_t offset)
{
*(id*)((char*)dest+offset) = val;
return val;
}
void *objc_memmove_collectable(void *dst, const void *src, size_t size)
{
return memmove(dst, src, size);
}
id objc_read_weak(id *location)
{
return *location;
}
id objc_assign_weak(id value, id *location)
{
*location = value;
return value;
}
id objc_allocate_object(Class cls, int extra)
{
return class_createInstance(cls, extra);
}
BOOL objc_collecting_enabled(void) { return NO; }
void objc_startCollectorThread(void) {}
void objc_clear_stack(unsigned long options) {}
BOOL objc_is_finalized(void *ptr) { return NO; }
void objc_finalizeOnMainThread(Class cls) {}

@ -0,0 +1,3 @@
#include "objc/runtime.h"
#define OBJC_HOOK OBJC_PUBLIC
#include "objc/hooks.h"

@ -0,0 +1,190 @@
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "objc/runtime.h"
#include "objc/objc-arc.h"
#include "class.h"
#include "visibility.h"
#include "gc_ops.h"
#include "legacy.h"
ptrdiff_t objc_alignof_type(const char *);
ptrdiff_t objc_sizeof_type(const char *);
PRIVATE void objc_compute_ivar_offsets(Class class)
{
if (class->ivars == NULL)
{
Class super_class = class_getSuperclass(class);
if (super_class != Nil)
{
class->instance_size = super_class->instance_size;
}
return;
}
if (class->ivars->size != sizeof(struct objc_ivar))
{
fprintf(stderr, "Downgrading ivar struct not yet implemented");
abort();
}
int i = 0;
/* If this class was compiled with support for late-bound ivars, the
* instance_size field will contain 0 - {the size of the instance variables
* declared for just this class}. The individual instance variable offset
* fields will then be the offsets from the start of the class, and so must
* have the size of the parent class prepended. */
if (class->instance_size <= 0)
{
Class super = class_getSuperclass(class);
long ivar_start = 0;
if (Nil != super)
{
if (super->instance_size <= 0)
{
objc_compute_ivar_offsets(super);
}
ivar_start = super->instance_size;
}
class->instance_size = ivar_start;
/* For each instance variable, we add the offset if required (it will be zero
* if this class is compiled with a static ivar layout). We then set the
* value of a global variable to the offset value.
*
* Any class compiled with support for the non-fragile ABI, but not actually
* using it, will export the ivar offset field as a symbol.
*
* Note that using non-fragile ivars breaks @defs(). If you need equivalent
* functionality, provide an alternative @interface with all variables
* declared @public.
*/
if (class->ivars)
{
// If the first instance variable had any alignment padding, then we need
// to discard it. We will recompute padding ourself later.
long next_ivar = ivar_start;
long last_offset = LONG_MIN;
long last_size = 0;
long last_computed_offset = -1;
size_t refcount_size = sizeof(uintptr_t);
for (i = 0 ; i < class->ivars->count ; i++)
{
struct objc_ivar *ivar = ivar_at_index(class->ivars, i);
// Clang 7 and 8 have a bug where the size of _Bool is encoded
// as 0, not 1. Silently fix this up when we see it.
if (ivar->size == 0 && ivar->type[0] == 'B')
{
ivar->size = 1;
}
// We are going to be allocating an extra word for the reference count
// in front of the object. This doesn't matter for aligment most of
// the time, but if we have an instance variable that is a vector type
// then we will need to ensure that we are properly aligned again.
long ivar_size = ivar->size;
// Bitfields have the same offset - the base of the variable
// that contains them. If we are in a bitfield, then we need
// to make sure that we don't add any displacement from the
// previous value.
if (*ivar->offset < last_offset + last_size)
{
*ivar->offset = last_computed_offset + (*ivar->offset - last_offset);
ivar_size = 0;
continue;
}
last_offset = *ivar->offset;
*ivar->offset = next_ivar;
last_computed_offset = *ivar->offset;
next_ivar += ivar_size;
last_size = ivar->size;
size_t align = ivarGetAlign(ivar);
if ((*ivar->offset + refcount_size) % align != 0)
{
long padding = align - ((*ivar->offset + refcount_size) % align);
*ivar->offset += padding;
class->instance_size += padding;
next_ivar += padding;
}
assert((*ivar->offset + sizeof(uintptr_t)) % ivarGetAlign(ivar) == 0);
class->instance_size += ivar_size;
}
#ifdef OLDABI_COMPAT
// If we have a legacy ivar list, update the offset in it too -
// code from older compilers may access this directly!
struct objc_class_gsv1* legacy = objc_legacy_class_for_class(class);
if (legacy)
{
for (i = 0 ; i < class->ivars->count ; i++)
{
legacy->ivars->ivar_list[i].offset = *ivar_at_index(class->ivars, i)->offset;
}
}
#endif
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Public API functions
////////////////////////////////////////////////////////////////////////////////
void object_setIvar(id object, Ivar ivar, id value)
{
id *addr = (id*)((char*)object + ivar_getOffset(ivar));
switch (ivarGetOwnership(ivar))
{
case ownership_strong:
objc_storeStrong(addr, value);
break;
case ownership_weak:
objc_storeWeak(addr, value);
break;
case ownership_unsafe:
case ownership_invalid:
*addr = value;
break;
}
}
Ivar object_setInstanceVariable(id obj, const char *name, void *value)
{
Ivar ivar = class_getInstanceVariable(object_getClass(obj), name);
if (ivar_getTypeEncoding(ivar)[0] == '@')
{
object_setIvar(obj, ivar, *(id*)value);
}
else
{
size_t size = objc_sizeof_type(ivar_getTypeEncoding(ivar));
memcpy((char*)obj + ivar_getOffset(ivar), value, size);
}
return ivar;
}
id object_getIvar(id object, Ivar ivar)
{
id *addr = (id*)((char*)object + ivar_getOffset(ivar));
switch (ivarGetOwnership(ivar))
{
case ownership_strong:
return objc_retainAutoreleaseReturnValue(*addr);
case ownership_weak:
return objc_loadWeak(addr);
break;
case ownership_unsafe:
case ownership_invalid:
return *addr;
return nil;
}
}
Ivar object_getInstanceVariable(id obj, const char *name, void **outValue)
{
Ivar ivar = class_getInstanceVariable(object_getClass(obj), name);
if (NULL != outValue)
{
*outValue = (((char*)obj) + ivar_getOffset(ivar));
}
return ivar;
}

@ -0,0 +1,461 @@
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "objc/runtime.h"
#include "objc/encoding.h"
#include "legacy.h"
#include "properties.h"
#include "class.h"
#include "loader.h"
PRIVATE size_t lengthOfTypeEncoding(const char *types);
enum objc_class_flags_gsv1
{
/** This class structure represents a class. */
objc_class_flag_class_gsv1 = (1<<0),
/** This class structure represents a metaclass. */
objc_class_flag_meta_gsv1 = (1<<1),
/**
* The class uses the new, Objective-C 2, runtime ABI. This ABI defines an
* ABI version field inside the class, and so will be used for all
* subsequent versions that retain some degree of compatibility.
*/
objc_class_flag_new_abi_gsv1 = (1<<4)
};
static inline BOOL objc_test_class_flag_gsv1(struct objc_class_gsv1 *aClass,
enum objc_class_flags_gsv1 flag)
{
return (aClass->info & (unsigned long)flag) == (unsigned long)flag;
}
/**
* Checks the version of a class. Return values are:
* 0. Legacy GCC ABI compatible class.
* 1. First release of GNUstep ABI.
* 2. Second release of the GNUstep ABI, adds strong / weak ivar bitmaps.
* 3. Third release of the GNUstep ABI. Many cleanups.
*/
static inline int objc_get_class_version_gsv1(struct objc_class_gsv1 *aClass)
{
if (!objc_test_class_flag_gsv1(aClass, objc_class_flag_new_abi_gsv1))
{
return 0;
}
return aClass->abi_version + 1;
}
static objc_ivar_ownership ownershipForIvar(struct objc_class_gsv1 *cls, int idx)
{
if (objc_get_class_version_gsv1(cls) < 2)
{
return ownership_unsafe;
}
if (objc_bitfield_test(cls->strong_pointers, idx))
{
return ownership_strong;
}
if (objc_bitfield_test(cls->weak_pointers, idx))
{
return ownership_weak;
}
return ownership_unsafe;
}
static struct objc_ivar_list *upgradeIvarList(struct objc_class_gsv1 *cls)
{
struct objc_ivar_list_gcc *l = cls->ivars;
if (l == NULL)
{
return NULL;
}
struct objc_ivar_list *n = calloc(1, sizeof(struct objc_ivar_list) +
l->count*sizeof(struct objc_ivar));
n->size = sizeof(struct objc_ivar);
n->count = l->count;
for (int i=0 ; i<l->count ; i++)
{
BOOL isBitfield = NO;
int bitfieldSize = 0;
int nextOffset;
// Bitfields have the same offset, but should have their size set to
// the size of the bitfield. We calculate the size of the bitfield by
// looking for the next ivar after the current one that has a different
// offset.
if (i+1 < l->count)
{
nextOffset = l->ivar_list[i+1].offset;
if (l->ivar_list[i].offset == l->ivar_list[i+1].offset)
{
isBitfield = YES;
for (int j=i+2 ; j<l->count ; j++)
{
if (l->ivar_list[i].offset != l->ivar_list[j].offset)
{
bitfieldSize = l->ivar_list[j].offset - l->ivar_list[i].offset;
break;
}
}
if (bitfieldSize == 0)
{
bitfieldSize = cls->instance_size - l->ivar_list[i].offset;
}
}
}
else
{
nextOffset = cls->instance_size;
}
if (nextOffset < 0)
{
nextOffset = -nextOffset;
}
const char *type = l->ivar_list[i].type;
int size = nextOffset - l->ivar_list[i].offset;
n->ivar_list[i].name = l->ivar_list[i].name;
n->ivar_list[i].type = type;
n->ivar_list[i].size = isBitfield ? bitfieldSize : size;
if (objc_test_class_flag_gsv1(cls, objc_class_flag_new_abi_gsv1))
{
n->ivar_list[i].offset = cls->ivar_offsets[i];
}
else
{
n->ivar_list[i].offset = &l->ivar_list[i].offset;
}
ivarSetAlign(&n->ivar_list[i], ((type == NULL) || type[0] == 0) ? __alignof__(void*) : objc_alignof_type(type));
if (type[0] == '\0')
{
ivarSetAlign(&n->ivar_list[i], size);
}
ivarSetOwnership(&n->ivar_list[i], ownershipForIvar(cls, i));
}
return n;
}
static struct objc_method_list *upgradeMethodList(struct objc_method_list_gcc *old)
{
if (old == NULL)
{
return NULL;
}
if (old->count == 0)
{
return NULL;
}
struct objc_method_list *l = calloc(sizeof(struct objc_method_list) + old->count * sizeof(struct objc_method), 1);
l->count = old->count;
if (old->next)
{
l->next = upgradeMethodList(old->next);
}
l->size = sizeof(struct objc_method);
for (int i=0 ; i<old->count ; i++)
{
l->methods[i].imp = old->methods[i].imp;
l->methods[i].selector = old->methods[i].selector;
l->methods[i].types = old->methods[i].types;
}
return l;
}
static inline BOOL checkAttribute(char field, int attr)
{
return (field & attr) == attr;
}
static void upgradeProperty(struct objc_property *n, struct objc_property_gsv1 *o)
{
char *typeEncoding;
ptrdiff_t typeSize;
if (o->name[0] == '\0')
{
n->name = o->name + o->name[1];
n->attributes = o->name + 2;
// If we have an attribute string, then it will contain a more accurate
// version of the types than we'll find in the getter (qualifiers such
// as _Atomic and volatile may be dropped)
assert(n->attributes[0] == 'T');
const char *type_start = &n->attributes[1];
const char *type_end = strchr(type_start, ',');
if (type_end == NULL)
{
type_end = type_start + strlen(type_start);
}
typeSize = type_end - type_start;
typeEncoding = malloc(typeSize + 1);
memcpy(typeEncoding, type_start, typeSize);
typeEncoding[typeSize] = 0;
}
else
{
typeSize = (ptrdiff_t)lengthOfTypeEncoding(o->getter_types);
typeEncoding = malloc(typeSize + 1);
memcpy(typeEncoding, o->getter_types, typeSize);
typeEncoding[typeSize] = 0;
}
n->type = typeEncoding;
if (o->getter_name)
{
n->getter = sel_registerTypedName_np(o->getter_name, o->getter_types);
}
if (o->setter_name)
{
n->setter = sel_registerTypedName_np(o->setter_name, o->setter_types);
}
if (o->name[0] == '\0')
{
return;
}
n->name = o->name;
const char *name = o->name;
size_t nameSize = (NULL == name) ? 0 : strlen(name);
// Encoding is T{type},V{name}, so 4 bytes for the "T,V" that we always
// need. We also need two bytes for the leading null and the length.
size_t encodingSize = typeSize + nameSize + 6;
char flags[20];
size_t i = 0;
// Flags that are a comma then a character
if (checkAttribute(o->attributes, OBJC_PR_readonly))
{
flags[i++] = ',';
flags[i++] = 'R';
}
if (checkAttribute(o->attributes, OBJC_PR_retain))
{
flags[i++] = ',';
flags[i++] = '&';
}
if (checkAttribute(o->attributes, OBJC_PR_copy))
{
flags[i++] = ',';
flags[i++] = 'C';
}
if (checkAttribute(o->attributes2, OBJC_PR_weak))
{
flags[i++] = ',';
flags[i++] = 'W';
}
if (checkAttribute(o->attributes2, OBJC_PR_dynamic))
{
flags[i++] = ',';
flags[i++] = 'D';
}
if ((o->attributes & OBJC_PR_nonatomic) == OBJC_PR_nonatomic)
{
flags[i++] = ',';
flags[i++] = 'N';
}
encodingSize += i;
flags[i] = '\0';
size_t setterLength = 0;
size_t getterLength = 0;
if ((o->attributes & OBJC_PR_getter) == OBJC_PR_getter)
{
getterLength = strlen(o->getter_name);
encodingSize += 2 + getterLength;
}
if ((o->attributes & OBJC_PR_setter) == OBJC_PR_setter)
{
setterLength = strlen(o->setter_name);
encodingSize += 2 + setterLength;
}
unsigned char *encoding = malloc(encodingSize);
// Set the leading 0 and the offset of the name
unsigned char *insert = encoding;
BOOL needsComma = NO;
*(insert++) = 0;
*(insert++) = 0;
// Set the type encoding
*(insert++) = 'T';
memcpy(insert, typeEncoding, typeSize);
insert += typeSize;
needsComma = YES;
// Set the flags
memcpy(insert, flags, i);
insert += i;
if ((o->attributes & OBJC_PR_getter) == OBJC_PR_getter)
{
if (needsComma)
{
*(insert++) = ',';
}
i++;
needsComma = YES;
*(insert++) = 'G';
memcpy(insert, o->getter_name, getterLength);
insert += getterLength;
}
if ((o->attributes & OBJC_PR_setter) == OBJC_PR_setter)
{
if (needsComma)
{
*(insert++) = ',';
}
i++;
needsComma = YES;
*(insert++) = 'S';
memcpy(insert, o->setter_name, setterLength);
insert += setterLength;
}
if (needsComma)
{
*(insert++) = ',';
}
*(insert++) = 'V';
memcpy(insert, name, nameSize);
insert += nameSize;
*(insert++) = '\0';
n->attributes = (const char*)encoding;
}
static struct objc_property_list *upgradePropertyList(struct objc_property_list_gsv1 *l)
{
if (l == NULL)
{
return NULL;
}
size_t data_size = l->count * sizeof(struct objc_property);
struct objc_property_list *n = calloc(1, sizeof(struct objc_property_list) + data_size);
n->count = l->count;
n->size = sizeof(struct objc_property);
for (int i=0 ; i<l->count ; i++)
{
upgradeProperty(&n->properties[i], &l->properties[i]);
}
return n;
}
static int legacy_key;
PRIVATE struct objc_class_gsv1* objc_legacy_class_for_class(Class cls)
{
return (struct objc_class_gsv1*)objc_getAssociatedObject((id)cls, &legacy_key);
}
PRIVATE Class objc_upgrade_class(struct objc_class_gsv1 *oldClass)
{
Class cls = calloc(sizeof(struct objc_class), 1);
cls->isa = oldClass->isa;
// super_class is left nil and we upgrade it later.
cls->name = oldClass->name;
cls->version = oldClass->version;
cls->info = objc_class_flag_meta;
cls->instance_size = oldClass->instance_size;
cls->ivars = upgradeIvarList(oldClass);
cls->methods = upgradeMethodList(oldClass->methods);
cls->protocols = oldClass->protocols;
cls->abi_version = oldClass->abi_version;
cls->properties = upgradePropertyList(oldClass->properties);
objc_register_selectors_from_class(cls);
if (!objc_test_class_flag_gsv1(oldClass, objc_class_flag_meta_gsv1))
{
cls->info = 0;
cls->isa = objc_upgrade_class((struct objc_class_gsv1*)cls->isa);
objc_setAssociatedObject((id)cls, &legacy_key, (id)oldClass, OBJC_ASSOCIATION_ASSIGN);
}
else
{
cls->instance_size = sizeof(struct objc_class);
}
return cls;
}
PRIVATE struct objc_category *objc_upgrade_category(struct objc_category_gcc *old)
{
struct objc_category *cat = calloc(1, sizeof(struct objc_category));
memcpy(cat, old, sizeof(struct objc_category_gcc));
cat->instance_methods = upgradeMethodList(old->instance_methods);
cat->class_methods = upgradeMethodList(old->class_methods);
if (cat->instance_methods != NULL)
{
objc_register_selectors_from_list(cat->instance_methods);
}
if (cat->class_methods != NULL)
{
objc_register_selectors_from_list(cat->class_methods);
}
for (int i=0 ; i<cat->protocols->count ; i++)
{
objc_init_protocols(cat->protocols);
}
return cat;
}
static struct objc_protocol_method_description_list*
upgrade_protocol_method_list_gcc(struct objc_protocol_method_description_list_gcc *l)
{
if ((l == NULL) || (l->count == 0))
{
return NULL;
}
struct objc_protocol_method_description_list *n =
malloc(sizeof(struct objc_protocol_method_description_list) +
l->count * sizeof(struct objc_protocol_method_description));
n->count = l->count;
n->size = sizeof(struct objc_protocol_method_description);
for (int i=0 ; i<n->count ; i++)
{
n->methods[i].selector = sel_registerTypedName_np(l->methods[i].name, l->methods[i].types);
n->methods[i].types = l->methods[i].types;
}
return n;
}
PRIVATE struct objc_protocol *objc_upgrade_protocol_gcc(struct objc_protocol_gcc *p)
{
// If the protocol has already been upgraded, the don't try to upgrade it twice.
if (p->isa == objc_getClass("ProtocolGCC"))
{
return objc_getProtocol(p->name);
}
p->isa = objc_getClass("ProtocolGCC");
Protocol *proto =
(Protocol*)class_createInstance((Class)objc_getClass("Protocol"),
sizeof(struct objc_protocol) - sizeof(id));
proto->name = p->name;
// Aliasing of this between the new and old structures means that when this
// returns these will all be updated.
proto->protocol_list = p->protocol_list;
proto->instance_methods = upgrade_protocol_method_list_gcc(p->instance_methods);
proto->class_methods = upgrade_protocol_method_list_gcc(p->class_methods);
assert(proto->isa);
return proto;
}
PRIVATE struct objc_protocol *objc_upgrade_protocol_gsv1(struct objc_protocol_gsv1 *p)
{
// If the protocol has already been upgraded, the don't try to upgrade it twice.
if (p->isa == objc_getClass("ProtocolGSv1"))
{
return objc_getProtocol(p->name);
}
Protocol *n =
(Protocol*)class_createInstance((Class)objc_getClass("Protocol"),
sizeof(struct objc_protocol) - sizeof(id));
n->instance_methods = upgrade_protocol_method_list_gcc(p->instance_methods);
// Aliasing of this between the new and old structures means that when this
// returns these will all be updated.
n->name = p->name;
n->protocol_list = p->protocol_list;
n->class_methods = upgrade_protocol_method_list_gcc(p->class_methods);
n->properties = upgradePropertyList(p->properties);
n->optional_properties = upgradePropertyList(p->optional_properties);
n->isa = objc_getClass("Protocol");
// We do in-place upgrading of these, because they might be referenced
// directly
p->instance_methods = (struct objc_protocol_method_description_list_gcc*)n->instance_methods;
p->class_methods = (struct objc_protocol_method_description_list_gcc*)n->class_methods;
p->properties = (struct objc_property_list_gsv1*)n->properties;
p->optional_properties = (struct objc_property_list_gsv1*)n->optional_properties;
p->isa = objc_getClass("ProtocolGSv1");
assert(p->isa);
return n;
}

@ -0,0 +1,43 @@
#include <stdlib.h>
void *valloc(size_t);
// Stubs that just call the libc implementations when you call these.
void *objc_malloc(size_t size)
{
return malloc(size);
}
void *objc_atomic_malloc(size_t size)
{
return malloc(size);
}
#ifdef __MINGW32__
void *objc_valloc(size_t size)
{
return malloc(size);
}
#else
void *objc_valloc(size_t size)
{
return valloc(size);
}
#endif
void *objc_realloc(void *mem, size_t size)
{
return realloc(mem, size);
}
void * objc_calloc(size_t nelem, size_t size)
{
return calloc(nelem, size);
}
void objc_free(void *mem)
{
free(mem);
}

@ -0,0 +1,404 @@
#include <stdlib.h>
#include <assert.h>
#include "objc/runtime.h"
#include "objc/objc-auto.h"
#include "objc/objc-arc.h"
#include "lock.h"
#include "loader.h"
#include "visibility.h"
#include "legacy.h"
#ifdef ENABLE_GC
#include <gc/gc.h>
#endif
#include <stdio.h>
#include <string.h>
/**
* Runtime lock. This is exposed in
*/
PRIVATE mutex_t runtime_mutex;
#ifndef __MINGW32__ || __MINGW64__
LEGACY void *__objc_runtime_mutex = &runtime_mutex;
#else
void *__objc_runtime_mutex = &runtime_mutex;
#endif
void init_alias_table(void);
void init_arc(void);
void init_class_tables(void);
void init_dispatch_tables(void);
void init_gc(void);
void init_protocol_table(void);
void init_selector_tables(void);
void init_trampolines(void);
void init_early_blocks(void);
void objc_send_load_message(Class class);
void log_selector_memory_usage(void);
static void log_memory_stats(void)
{
log_selector_memory_usage();
}
/* Number of threads that are alive. */
int __objc_runtime_threads_alive = 1; /* !T:MUTEX */
// libdispatch hooks for registering threads
__attribute__((weak)) void (*dispatch_begin_thread_4GC)(void);
__attribute__((weak)) void (*dispatch_end_thread_4GC)(void);
__attribute__((weak)) void *(*_dispatch_begin_NSAutoReleasePool)(void);
__attribute__((weak)) void (*_dispatch_end_NSAutoReleasePool)(void *);
__attribute__((used))
static void link_protos(void)
{
link_protocol_classes();
}
static void init_runtime(void)
{
static BOOL first_run = YES;
if (first_run)
{
#if ENABLE_GC
init_gc();
#endif
// Create the main runtime lock. This is not safe in theory, but in
// practice the first time that this function is called will be in the
// loader, from the main thread. Future loaders may run concurrently,
// but that is likely to break the semantics of a lot of languages, so
// we don't have to worry about it for a long time.
//
// The only case when this can potentially go badly wrong is when a
// pure-C main() function spawns two threads which then, concurrently,
// call dlopen() or equivalent, and the platform's implementation of
// this does not perform any synchronization.
INIT_LOCK(runtime_mutex);
// Create the various tables that the runtime needs.
init_selector_tables();
init_dispatch_tables();
init_protocol_table();
init_class_tables();
init_alias_table();
init_early_blocks();
init_arc();
init_trampolines();
first_run = NO;
if (getenv("LIBOBJC_MEMORY_PROFILE"))
{
atexit(log_memory_stats);
}
if (dispatch_begin_thread_4GC != 0) {
dispatch_begin_thread_4GC = objc_registerThreadWithCollector;
}
if (dispatch_end_thread_4GC != 0) {
dispatch_end_thread_4GC = objc_unregisterThreadWithCollector;
}
if (_dispatch_begin_NSAutoReleasePool != 0) {
_dispatch_begin_NSAutoReleasePool = objc_autoreleasePoolPush;
}
if (_dispatch_end_NSAutoReleasePool != 0) {
_dispatch_end_NSAutoReleasePool = objc_autoreleasePoolPop;
}
}
}
/**
* Structure for a class alias.
*/
struct objc_alias
{
/**
* The name by which this class is referenced.
*/
const char *alias_name;
/**
* A pointer to the indirection variable for this class.
*/
Class *alias;
};
/**
* Type of the NSConstantString structure.
*/
struct nsstr
{
/** Class pointer. */
id isa;
/**
* Flags. Low 2 bits store the encoding:
* 0: ASCII
* 1: UTF-8
* 2: UTF-16
* 3: UTF-32
*
* Low 16 bits are reserved for the compiler, high 32 bits are reserved for
* the Foundation framework.
*/
uint32_t flags;
/**
* Number of UTF-16 code units in the string.
*/
uint32_t length;
/**
* Number of bytes in the string.
*/
uint32_t size;
/**
* Hash (Foundation framework defines the hash algorithm).
*/
uint32_t hash;
/**
* Character data.
*/
const char *data;
};
// begin: objc_init
struct objc_init
{
uint64_t version;
SEL sel_begin;
SEL sel_end;
Class *cls_begin;
Class *cls_end;
Class *cls_ref_begin;
Class *cls_ref_end;
struct objc_category *cat_begin;
struct objc_category *cat_end;
struct objc_protocol *proto_begin;
struct objc_protocol *proto_end;
struct objc_protocol **proto_ref_begin;
struct objc_protocol **proto_ref_end;
struct objc_alias *alias_begin;
struct objc_alias *alias_end;
struct nsstr *strings_begin;
struct nsstr *strings_end;
};
// end: objc_init
#ifdef DEBUG_LOADING
#include <dlfcn.h>
#endif
static enum {
LegacyABI,
NewABI,
UnknownABI
} CurrentABI = UnknownABI;
void registerProtocol(Protocol *proto);
OBJC_PUBLIC void __objc_load(struct objc_init *init)
{
init_runtime();
#ifdef DEBUG_LOADING
Dl_info info;
if (dladdr(init, &info))
{
fprintf(stderr, "Loading %p from object: %s (%p)\n", init, info.dli_fname, __builtin_return_address(0));
}
else
{
fprintf(stderr, "Loading %p from unknown object\n", init);
}
#endif
LOCK_RUNTIME_FOR_SCOPE();
BOOL isFirstLoad = NO;
switch (CurrentABI)
{
case LegacyABI:
fprintf(stderr, "Version 2 Objective-C ABI may not be mixed with earlier versions.\n");
abort();
case UnknownABI:
isFirstLoad = YES;
CurrentABI = NewABI;
break;
case NewABI:
break;
}
// If we've already loaded this module, don't load it again.
if (init->version == ULONG_MAX)
{
return;
}
assert(init->version == 0);
assert((((uintptr_t)init->sel_end-(uintptr_t)init->sel_begin) % sizeof(*init->sel_begin)) == 0);
assert((((uintptr_t)init->cls_end-(uintptr_t)init->cls_begin) % sizeof(*init->cls_begin)) == 0);
assert((((uintptr_t)init->cat_end-(uintptr_t)init->cat_begin) % sizeof(*init->cat_begin)) == 0);
for (SEL sel = init->sel_begin ; sel < init->sel_end ; sel++)
{
if (sel->name == 0)
{
continue;
}
objc_register_selector(sel);
}
for (struct objc_protocol *proto = init->proto_begin ; proto < init->proto_end ;
proto++)
{
if (proto->name == NULL)
{
continue;
}
registerProtocol((struct objc_protocol*)proto);
}
for (struct objc_protocol **proto = init->proto_ref_begin ; proto < init->proto_ref_end ;
proto++)
{
if (*proto == NULL)
{
continue;
}
struct objc_protocol *p = objc_getProtocol((*proto)->name);
assert(p);
*proto = p;
}
for (Class *cls = init->cls_begin ; cls < init->cls_end ; cls++)
{
if (*cls == NULL)
{
continue;
}
// As a special case, allow using legacy ABI code with a new runtime.
if (isFirstLoad && (strcmp((*cls)->name, "Protocol") == 0))
{
CurrentABI = UnknownABI;
}
#ifdef DEBUG_LOADING
fprintf(stderr, "Loading class %s\n", (*cls)->name);
#endif
objc_load_class(*cls);
}
#if 0
// We currently don't do anything with these pointers. They exist to
// provide a level of indirection that will permit us to completely change
// the `objc_class` struct without breaking the ABI (again)
for (Class *cls = init->cls_ref_begin ; cls < init->cls_ref_end ; cls++)
{
}
#endif
for (struct objc_category *cat = init->cat_begin ; cat < init->cat_end ;
cat++)
{
if ((cat == NULL) || (cat->class_name == NULL))
{
continue;
}
objc_try_load_category(cat);
#ifdef DEBUG_LOADING
fprintf(stderr, "Loading category %s (%s)\n", cat->class_name, cat->name);
#endif
}
// Load categories and statics that were deferred.
objc_load_buffered_categories();
// Fix up the class links for loaded classes.
objc_resolve_class_links();
for (struct objc_category *cat = init->cat_begin ; cat < init->cat_end ;
cat++)
{
Class class = (Class)objc_getClass(cat->class_name);
if ((Nil != class) &&
objc_test_class_flag(class, objc_class_flag_resolved))
{
objc_send_load_message(class);
}
}
// Register aliases
for (struct objc_alias *alias = init->alias_begin ; alias < init->alias_end ;
alias++)
{
if (alias->alias_name)
{
class_registerAlias_np(*alias->alias, alias->alias_name);
}
}
#if 0
// If future versions of the ABI need to do anything with constant strings,
// they may do so here.
for (struct nsstr *string = init->strings_begin ; string < init->strings_end ;
string++)
{
if (string->isa)
{
}
}
#endif
init->version = ULONG_MAX;
}
#ifdef OLDABI_COMPAT
OBJC_PUBLIC void __objc_exec_class(struct objc_module_abi_8 *module)
{
init_runtime();
switch (CurrentABI)
{
case UnknownABI:
CurrentABI = LegacyABI;
break;
case LegacyABI:
break;
case NewABI:
fprintf(stderr, "Version 2 Objective-C ABI may not be mixed with earlier versions.\n");
abort();
}
// Check that this module uses an ABI version that we recognise.
// In future, we should pass the ABI version to the class / category load
// functions so that we can change various structures more easily.
assert(objc_check_abi_version(module));
// The runtime mutex is held for the entire duration of a load. It does
// not need to be acquired or released in any of the called load functions.
LOCK_RUNTIME_FOR_SCOPE();
struct objc_symbol_table_abi_8 *symbols = module->symbol_table;
// Register all of the selectors used in this module.
if (symbols->selectors)
{
objc_register_selector_array(symbols->selectors,
symbols->selector_count);
}
unsigned short defs = 0;
// Load the classes from this module
for (unsigned short i=0 ; i<symbols->class_count ; i++)
{
objc_load_class(objc_upgrade_class(symbols->definitions[defs++]));
}
unsigned int category_start = defs;
// Load the categories from this module
for (unsigned short i=0 ; i<symbols->category_count; i++)
{
objc_try_load_category(objc_upgrade_category(symbols->definitions[defs++]));
}
// Load the static instances
struct objc_static_instance_list **statics = (void*)symbols->definitions[defs];
while (NULL != statics && NULL != *statics)
{
objc_init_statics(*(statics++));
}
// Load categories and statics that were deferred.
objc_load_buffered_categories();
objc_init_buffered_statics();
// Fix up the class links for loaded classes.
objc_resolve_class_links();
for (unsigned short i=0 ; i<symbols->category_count; i++)
{
struct objc_category *cat = (struct objc_category*)
symbols->definitions[category_start++];
Class class = (Class)objc_getClass(cat->class_name);
if ((Nil != class) &&
objc_test_class_flag(class, objc_class_flag_resolved))
{
objc_send_load_message(class);
}
}
}
#endif

@ -0,0 +1,13 @@
#include <stdio.h>
#include <stdlib.h>
#include "objc/runtime.h"
// This function is exported as a weak symbol to enable GNUstep or some other
// framework to replace it trivially
OBJC_PUBLIC
void __attribute__((weak)) objc_enumerationMutation(id obj)
{
fprintf(stderr, "Mutation occurred during enumeration.");
abort();
}

@ -0,0 +1,20 @@
#include "common.S"
#include "asmconstants.h"
#if __x86_64
#include "objc_msgSend.x86-64.S"
#elif __i386
#include "objc_msgSend.x86-32.S"
#elif __arm__
#include "objc_msgSend.arm.S"
#elif defined(__ARM_ARCH_ISA_A64)
#include "objc_msgSend.aarch64.S"
#elif defined(__riscv) && (__riscv_xlen == 64) && defined(__riscv_float_abi_double)
#include "objc_msgSend.riscv64.S"
#elif defined(__mips_n64) || defined(__mips_n32)
#include "objc_msgSend.mips.S"
#else
#warning objc_msgSend() not implemented for your architecture
#endif
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

@ -0,0 +1,250 @@
#define ARGUMENT_SPILL_SIZE (8*10 + 8*16)
/* Windows ARM64 Exception Handling
*
* Structured Exception Handling (SEH) on Windows ARM64 differs from the x64
* implementation. Functions consist of a single prologue and zero or more
* epilogues. Instead of using offsets for the .seh* directives to manipulate the
* stack frame, each directive corresponds to a single instruction.
*
* This presents a challenge for our objc_msgSend function, which only modifies
* the stack when a slow lookup is needed (see label "5").
*
* To address this, we move the directive marking the start of a function deep
* into the msgSend body to prevent marking every instruction as ".seh_nop."
*
* For Windows:
* - EH_START(x): Start of function (no effect on Windows)
* - EH_END(x): End of function (no effect on Windows)
* - EH_START_AT_OFFSET(x): Mark Start of function (Delayed)
* - EH_END_AT_OFFSET(x): Mark End of function (Delayed)
* - EH_END_PROLOGUE: End of function prologue
* - EH_START_EPILOGUE: Start of function epilogue
* - EH_END_EPILOGUE: End of function epilogue
* - EH_SAVE_FP_LR(x): Save Frame Pointer and Link Register
* - EH_STACK_ALLOC(x): Stack allocation (inside prologue)
* - EH_ADD_FP(x): Add to Frame Pointer
* - EH_NOP: Mark instruction with no unwinding relevance
*
* For non-64-bit Windows systems or other platforms, these macros have no effect and can be used without causing issues.
*/
#ifdef _WIN32
# define EH_START
# define EH_END
# define EH_START_AT_OFFSET(x) .seh_proc x
# define EH_END_AT_OFFSET(x) .seh_endproc x
# define EH_END_PROLOGUE .seh_endprologue
# define EH_START_EPILOGUE .seh_startepilogue
# define EH_END_EPILOGUE .seh_endepilogue
# define EH_SAVE_FP_LR(x) .seh_save_fplr x
# define EH_STACK_ALLOC(x) .seh_stackalloc x
# define EH_ADD_FP(x) .seh_add_fp x
# define EH_NOP .seh_nop
#else
// Marks the real start and end of the function
# define EH_START .cfi_startproc
# define EH_END .cfi_endproc
// The following directives are either not
// needed or not available with CFI
# define EH_START_AT_OFFSET(x)
# define EH_END_AT_OFFSET(x)
# define EH_END_PROLOGUE
# define EH_START_EPILOGUE
# define EH_END_EPILOGUE
# define EH_SAVE_FP_LR(x)
# define EH_STACK_ALLOC(x)
# define EH_ADD_FP(x)
# define EH_NOP
#endif
.macro MSGSEND fnname receiver, sel
EH_START
cbz \receiver, 4f // Skip everything if the receiver is nil
// Jump to 6: if this is a small object
ubfx x9, \receiver, #0, #SMALLOBJ_BITS
cbnz x9, 6f
ldr x9, [\receiver] // Load class to x9 if not a small int
1:
ldr x9, [x9, #DTABLE_OFFSET] // Dtable -> x9
ldr w10, [\sel] // selector->index -> x10
ldr w11, [x9, #SHIFT_OFFSET] // dtable->shift -> x11
cmp x11, #8 // If this is a small dtable, jump to the
// small dtable handlers
b.eq 2f
cbz x11, 3f
ubfx x11, x10, #16, #8 // Put byte 3 of the sel id in x12
add x11, x9, x11, lsl #3 // x11 = dtable address + dtable data offset
ldr x9, [x11, #DATA_OFFSET] // Load, adding in the data offset
2: // dtable16
ubfx x11, x10, #8, #8 // Put byte 2 of the sel id in x12
add x11, x9, x11, lsl #3 // x11 = dtable address + dtable data offset
ldr x9, [x11, #DATA_OFFSET] // Load, adding in the data offset
3: // dtable8
ubfx x11, x10, #0, #8 // Put low byte of the sel id in x12
add x11, x9, x11, lsl #3 // x11 = dtable address + dtable data offset
ldr x9, [x11, #DATA_OFFSET] // Load, adding in the data offset.
// Slot pointer is now in x9
cbz x9, 5f // If the slot is nil, go to the C path
ldr x9, [x9, #SLOT_OFFSET] // Load the method from the slot
br x9 // Tail-call the method
4: // Nil receiver
mov \receiver, #0
mov v0.d[0], \receiver
mov v0.d[1], \receiver
br lr
5: // Slow lookup
EH_START_AT_OFFSET(\fnname)
// Save anything that will be clobbered by
// the call.
// Note that we pre-index (see "!"), meaning
// that we adjust the sp before storing the pair
// of registers.
stp x0, x1, [sp, #-(ARGUMENT_SPILL_SIZE)]!
EH_STACK_ALLOC((ARGUMENT_SPILL_SIZE))
stp x2, x3, [sp, #16]
EH_NOP // The following instructions can be ignored by SEH
stp x4, x5, [sp, #32]
EH_NOP
stp x6, x7, [sp, #48]
EH_NOP
stp q0, q1, [sp, #64]
EH_NOP
stp q2, q3, [sp, #96]
EH_NOP
stp q4, q5, [sp, #128]
EH_NOP
stp q6, q7, [sp, #160]
EH_NOP
stp fp, lr, [sp, #192] // The order is arbitrary, except that
EH_SAVE_FP_LR(192) // fp and lr must be spilled together
add fp, sp, 192 // Adjust frame pointer
EH_ADD_FP(192)
stp \receiver, x8, [sp, #-16]! // it's convenient if \receiver is spilled at sp
EH_STACK_ALLOC(16) // stp performed pre-indexing by sp-16
EH_END_PROLOGUE
#ifndef _WIN32
.cfi_def_cfa fp, 16
.cfi_offset fp, -16
.cfi_offset lr, -8
#endif
// We now have all argument registers, the link
// register and the receiver spilled on the
// stack, with sp containing
// the address of the receiver
mov x0, sp // &self, _cmd in arguments
mov x1, \sel
bl CDECL(slowMsgLookup) // This is the only place where the EH directives
// have to be accurate...
mov x9, x0 // IMP -> x9
EH_START_EPILOGUE
ldp x0, x1, [sp, #16] // Reload spilled argument registers
EH_NOP
ldp x2, x3, [sp, #32]
EH_NOP
ldp x4, x5, [sp, #48]
EH_NOP
ldp x6, x7, [sp, #64]
EH_NOP
ldp q0, q1, [sp, #80]
EH_NOP
ldp q2, q3, [sp, #112]
EH_NOP
ldp q4, q5, [sp, #144]
EH_NOP
ldp q6, q7, [sp, #176]
EH_NOP
ldp fp, lr, [sp, #208]
EH_SAVE_FP_LR(208)
// Post-increment sp += ARGUMENT_SPILL_SIZE +16
ldp \receiver, x8, [sp], #(ARGUMENT_SPILL_SIZE + 16)
EH_STACK_ALLOC((ARGUMENT_SPILL_SIZE + 16))
EH_END_EPILOGUE
EH_END_AT_OFFSET(\fnname)
br x9
6:
// Load 63:12 of SmallObjectClasses address
// We use the CDECL macro as Windows prefixes
// cdecl conforming symbols with "_".
adrp x10, CDECL(SmallObjectClasses) // The macro handles this transparently.
// Add lower 12-bits of SmallObjectClasses address to x10
add x10, x10, :lo12:CDECL(SmallObjectClasses)
ldr x9, [x10, x9, lsl #3]
b 1b
EH_END
.endm
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), %function)
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), %function)
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), %function)
CDECL(objc_msgSend):
CDECL(objc_msgSend_fpret):
CDECL(objc_msgSend_stret):
MSGSEND objc_msgSend, x0, x1
/*
In AAPCS, an SRet is passed in x8, not x0 like a normal pointer parameter.
On Windows, this is only the case for POD (plain old data) types. Non trivial
types with constructors and destructors are passed in x0 on sret.
We thus need two objc_msgSend functions on Windows on ARM64 for Sret:
1. objc_msgSend_stret for POD Sret
2. objc_msgSend_stret2 for non-trivial Sret (like C++ class instances)
*/
#ifdef _WIN32
.globl CDECL(objc_msgSend_stret2)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret2), %function)
CDECL(objc_msgSend_stret2):
MSGSEND objc_msgSend_stret2, x1, x2
.text
.def objc_msgSend;
.scl 2;
.type 32;
.endef
.def objc_msgSend_fpret;
.scl 2;
.type 32;
.endef
.def objc_msgSend_stret;
.scl 2;
.type 32;
.endef
.def objc_msgSend_stret2;
.scl 2;
.type 32;
.endef
.section .drectve,"yn"
.ascii " /EXPORT:objc_msgSend"
.ascii " /EXPORT:objc_msgSend_fpret"
.ascii " /EXPORT:objc_msgSend_stret"
.ascii " /EXPORT:objc_msgSend_stret2"
#endif

@ -0,0 +1,146 @@
.syntax unified
.fpu neon
#if ((__ARM_ARCH >= 7) || defined (__ARM_ARCH_6T2__))
#define RELOC_OFFSET 4
// If we're using a CPU that supports Thumb-2, use it. This makes the
// objc_msgSend function 130 bytes instead of 176. The fast path drops from 108
// bytes to 82, meaning that it will fit in 3 32-byte i-cache lines, rather
// than 4. For comparison, the i386 version is 119 for objc_msgSend and
// another 117 for objc_msgSend_fpret (the two are the same on ARM), with 70
// bytes for the fast path..
.thumb
.macro byte1 dst, src
uxtb \dst, \src
.endm
.macro byte2 dst, src
ubfx \dst, \src, #8, #8
.endm
.macro byte3 dst, src
ubfx \dst, \src, #16, #8
.endm
#else
#define RELOC_OFFSET 8
.macro byte1 dst, src
and \dst, \src, #0xff
.endm
.macro byte2 dst, src
and \dst, \src, #0xff00
lsr \dst, \dst, 8
.endm
.macro byte3 dst, src
and \dst, \src, #0xff00
lsr \dst, \dst, 16
.endm
#endif
// Macro for testing: logs a register value to standard error
.macro LOG reg
push {r0-r3, ip,lr}
mov r0, \reg
bl logInt(PLT)
pop {r0-r3, ip,lr}
.endm
.macro MSGSEND receiver, sel
.fnstart
teq \receiver, 0
beq 4f // Skip everything if the receiver is nil
push {r4-r6} // We're going to use these three as
.save {r4-r6}
// scratch registers, so save them now.
// These are callee-save, so the unwind library
// must be able to restore them, so we need CFI
// directives for them, but not for any other pushes
tst \receiver, SMALLOBJ_MASK // Sets Z if this is not a small int
ldr r4, 7f
6:
add r4, pc
itte ne
ldrne r4, [r4]
ldrne r4, [r4] // Small Int class -> r4 if this is a small int
ldreq r4, [\receiver] // Load class to r4 if not a small int
ldr r4, [r4, #DTABLE_OFFSET] // Dtable -> r4
ldr r5, [\sel] // selector->index -> r5
ldr r6, [r4, #SHIFT_OFFSET] // dtable->shift -> r6
teq r6, #8 // If this is a small dtable, jump to the small dtable handlers
beq 1f
teq r6, #0
beq 2f
byte3 r6, r5 // Put byte 3 of the sel id in r6
add r6, r4, r6, lsl #2 // r6 = dtable address + dtable data offset
ldr r4, [r6, #DATA_OFFSET] // Load, adding in the data offset
1: // dtable16
byte2 r6, r5 // Put byte 2 of the sel id in r6
add r6, r4, r6, lsl #2 // r6 = dtable address + dtable data offset
ldr r4, [r6, #DATA_OFFSET] // Load, adding in the data offset
2: // dtable8
byte1 r6, r5 // Low byte of sel id into r5
add r6, r4, r6, lsl #2 // r6 = dtable address + dtable data offset
ldr ip, [r6, #DATA_OFFSET] // Load, adding in the data offset
cmp ip, #0 // If the slot is nil
ittt ne
ldrne ip, [ip, #SLOT_OFFSET] // Load the method from the slot
popne {r4-r6} // Restore the saved callee-save registers
bxne ip
5: // Slow lookup
push {r0-r4, lr} // Save anything that will be clobbered by the call
.save {r0-r4, lr}
#ifndef __SOFTFP__
vpush {q0-q3}
.vsave {q0-q3}
#endif
push {\receiver} // &self, _cmd in arguments
.save {\receiver}
mov r0, sp
mov r1, \sel
bl CDECL(slowMsgLookup)(PLT) // This is the only place where the CFI directives have to be accurate...
mov ip, r0 // IMP -> ip
pop {r5} // restore (modified) self to r5
#ifndef __SOFTFP__
vpop {q0-q3}
#endif
pop {r0-r4, lr} // Load clobbered registers
mov \receiver, r5
pop {r4-r6} // Restore the saved callee-save registers
bx ip
4: // Nil receiver
mov r0, 0
mov r1, 0
#ifndef __SOFTFP__
# ifdef __ARM_NEON__
vmov.i64 d0, #0 // Return 0 as a float / double
# else
fmdrr d0, r0, r1
# endif
#endif
bx lr
7:
.long SmallObjectClasses(GOT_PREL)-((6b+RELOC_OFFSET)-7b)
.align 2
.fnend
.endm
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), %function)
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), %function)
CDECL(objc_msgSend):
CDECL(objc_msgSend_fpret):
MSGSEND r0, r1
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), %function)
CDECL(objc_msgSend_stret):
MSGSEND r1, r2

@ -0,0 +1,207 @@
.set noreorder
# Some macros for n32 / n64 compatibility
#ifdef _ABI64
#define LP ld
#define SP sd
#else
#warning N32 is untested, O32 is unsupported.
#define LP lw
#define SP sw
#endif
.macro dump_and_crash reg
nop
move $a0, \reg
ld $25, %got_disp(logInt)($t8)
jalr $25
nop
lw $zero, ($zero)
.endm
// FIXME: CHERI needs (or, at least, strongly encourages) 32-byte aligned
// stacks.
#ifndef __mips_soft_float
#define SAVE_SIZE 136
#else
#define SAVE_SIZE 72
#endif
.macro MSGSEND receiver, sel
0:
.cfi_startproc # Start emitting unwind data. We
# don't actually care about any of
# the stuff except the slow call,
# because that's the only one that
# can throw.
beq \receiver, $0, 4f # If the receiver is nil, return nil
nop
lui $t8, %hi(%neg(%gp_rel(0b))) # Load the GOT address that we use for relocations into $t8
daddu $t8, $t8, $t9
daddiu $t8, $t8, %lo(%neg(%gp_rel(0b)))
andi $t0, \receiver, SMALLOBJ_MASK # Check if the receiver is a small object
bne $t0, $0, 6f # Get the small object class
nop
LP $t1, (\sel)
# By this point, we have a non-nil
# receiver that is a real pointer
LP $t0, (\receiver) # Load the class
1: # class loaded, stored in $t0
LP $t0, DTABLE_OFFSET($t0) # Load the dtable from the class
lw $t2, SHIFT_OFFSET($t0) # Load the shift (dtable size)
# $t0 = dtable, $t1 = sel index
daddi $t3, $t0, DATA_OFFSET # Compute the address of the start of the array
beq $0, $t2, 3f # If this is a small dtable, jump to the small dtable handlers
daddi $v0, $t2, -8
beq $0, $v0, 2f
lui $t2, 0x00ff # The mask for a big dtable won't fit in an and immediate
and $t2, $t2, $t1 # mask the selector
#ifdef _ABI64
dsrl $t2, $t2, 13 # Right shift 16, but then left shift by pointer size
#else
srl $t2, $t2, 14
#endif
dadd $t2, $t2, $t3
LP $t3, ($t2)
daddi $t3, $t3, DATA_OFFSET # Compute the address of the start of the array
2: # dtable16:
andi $t2, $t1, 0xff00 # mask the selector
#ifdef _ABI64
dsrl $t2, $t2, 5 # Right shift 8, but then left shift by pointer size
#else
srl $t2, $t2, 6
#endif
dadd $t2, $t2, $t3
LP $t3, ($t2)
daddi $t3, $t3, DATA_OFFSET # Compute the address of the start of the array
3: # dtable8:
andi $t2, $t1, 0xff # mask the selector
#ifdef _ABI64
dsll $t2, $t2, 3 # Left shift by pointer size
#else
sll $t2, $t2, 2
#endif
dadd $t2, $t2, $t3
LP $t3, ($t2)
beq $0, $t3, 5f # Nil slot - invoke some kind of forwarding mechanism
nop
LP $25, SLOT_OFFSET($t3)
jr $25
nop
4: # returnNil:
# All of the return registers are
# callee-save, so we can
# return 0 in both in the same code:
#ifndef __mips_soft_float
dmtc1 $0, $f0 # Return 0 as a floating point value (only if we're not a soft-float target)
dmtc1 $0, $f2
#endif
daddi $v0, $0, 0 # Return 0 as an integer
jr $ra
daddi $v1, $0, 0
5: # slowSend:
# Load the address of the slow lookup function now, so that we don't get
# pipeline stalls on the jump. This is more important on CHERI than proper
# MIPS implementations.
# Note: A better linker ought to be able to turn this into a single
# jump-immediate, so revisit this decision later...
LP $25, %got_disp(CDECL(slowMsgLookup))($t8)
daddiu $sp, $sp, -SAVE_SIZE # We need to preserve all registers that may contain arguments:
SP $a0, ($sp)
SP $a1, 8($sp)
SP $a2, 16($sp)
SP $a3, 24($sp)
SP $a4, 32($sp)
SP $a5, 40($sp)
SP $a6, 48($sp)
SP $a7, 56($sp)
SP $ra, 64($sp)
#ifndef __mips_soft_float
sdc1 $f12, 72($sp)
sdc1 $f13, 80($sp)
sdc1 $f14, 88($sp)
sdc1 $f15, 96($sp)
sdc1 $f16, 104($sp)
sdc1 $f17, 112($sp)
sdc1 $f18, 120($sp)
sdc1 $f19, 128($sp)
#endif
# We're (potentially) modifying the self argument with the lookup. Use the
# address of the stack save slot for the address so that when we reload it
# we get the old or new version automatically. Note that we must reload it
# anyway, because argument registers are not guaranteed to be preserved
# across calls.
.ifc "\receiver", "$a0"
daddiu $a0, $sp, 0 # replace self with &self in $a0
.else
daddiu $a0, $sp, 8 # replace sret pointer with &self in $a0
daddiu $a1, $a2, 0 # replace self with _cmd in $a1
.endif
.cfi_def_cfa_offset SAVE_SIZE
.cfi_offset 31, (64 - SAVE_SIZE)
jalr $25 # Call the slow lookup function
nop
move $25, $v0 # Move the return value to $25 for use with the call
LP $a0, ($sp) # Restore all of the arguments. Note
LP $a1, 8($sp) # that the receiver may have been
LP $a2, 16($sp) # modified during the call
LP $a3, 24($sp)
LP $a4, 32($sp)
LP $a5, 40($sp)
LP $a6, 48($sp)
LP $a7, 56($sp)
LP $ra, 64($sp)
#ifndef __mips_soft_float
ldc1 $f12, 72($sp)
ldc1 $f13, 80($sp)
ldc1 $f14, 88($sp)
ldc1 $f15, 96($sp)
ldc1 $f16, 104($sp)
ldc1 $f17, 112($sp)
ldc1 $f18, 120($sp)
ldc1 $f19, 128($sp)
#endif
jr $25
daddiu $sp, $sp, SAVE_SIZE
6: # smallObject:
#if _ABI64
dsll $t0, $t0, 3 # Convert tag to pointer offset
LP $t2, %got_disp(CDECL(SmallObjectClasses))($t8) # Load small object classes array address
daddu $t0, $t0, $t2 # Add the base address to the offset
b 1b # Return to the normal path
LP $t0, ($t0) # Load the class (in delay slot)
#else
b 1b
LP $t0, %got_disp(CDECL(SmallIntClass))($t8)
#endif
.cfi_endproc
.endm
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), @function)
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), @function)
CDECL(objc_msgSend_fpret):
CDECL(objc_msgSend):
MSGSEND $a0, $a1
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), @function)
CDECL(objc_msgSend_stret):
MSGSEND $a1, $a2

@ -0,0 +1,141 @@
#define ARGUMENT_SPILL_SIZE (10*8 + 8*8)
.macro MSGSEND receiver, sel
.cfi_startproc
beqz \receiver, 3f // Skip everything if receiver is nil
andi t0, \receiver, SMALLOBJ_MASK
bnez t0, 5f
ld t0, 0(\receiver) // Load class into t0
0:
ld t0, DTABLE_OFFSET(t0) // dtable -> t0
ld t1, 0(\sel) // selector->index -> t1
ld t2, SHIFT_OFFSET(t0) // dtable->shift -> t2
li t3, 8
beq t2, t3, 1f
beqz t2, 2f
srli t2, t1, 16-3 // Extract byte 3 of sel index and multiply by 2^3
and t2, t2, 0x7F8 // Mask target byte
// Example: ((0xCAFEBA >> 13) & 0x7f8) == (0xCA << 3)
add t2, t0, t2 // t2 = dtable address + offset
ld t0, DATA_OFFSET(t2) // Load, adding in the data offset
1:
srli t2, t1, 8-3 // Extract byte 2 of sel index and multiply by 2^3
and t2, t2, 0x7F8 // Mask target byte
add t2, t0, t2 // t2 = dtable address + offset
ld t0, DATA_OFFSET(t2) // Load, adding in the data offset
2:
slli t2, t1, 3 // Multiply by 2^3
and t2, t2, 0x7F8 // Mask target byte
add t2, t0, t2 // t2 = dtable address + offset
ld t0, DATA_OFFSET(t2) // Load, adding in the data offset
// Slot pointer is now in t0
beqz t0, 4f // If the slot is nil, go to the C path
ld t0, SLOT_OFFSET(t0) // Load the method from the slot
jalr zero, t0, 0 // Tail-call the method
3:
li \receiver, 0
li \sel, 0
fmv.d.x fa0, zero
fmv.d.x fa1, zero
jalr zero, ra, 0
4:
add sp, sp, -(ARGUMENT_SPILL_SIZE)
// Spill function arguments
sd a0, 0(sp)
sd a1, 8(sp)
sd a2, 16(sp)
sd a3, 24(sp)
sd a4, 32(sp)
sd a5, 40(sp)
sd a6, 48(sp)
sd a7, 56(sp)
// Spill FP arguments
fsd fa0, 64(sp)
fsd fa1, 72(sp)
fsd fa2, 80(sp)
fsd fa3, 88(sp)
fsd fa4, 96(sp)
fsd fa5, 104(sp)
fsd fa6, 112(sp)
fsd fa7, 120(sp)
sd fp, 128(sp)
sd ra, 136(sp)
add fp, sp, 128
add sp, sp, -16
sd \receiver, 0(sp) // it is convenient if \receiver is spilled at sp
.cfi_def_cfa fp, 16
.cfi_offset fp, -16
.cfi_offset ra, -8
add a0, sp, zero // &self in first argument
call CDECL(slowMsgLookup)
add t0, a0, zero // IMP -> t0
ld a0, 16(sp)
ld a1, 24(sp)
ld a2, 32(sp)
ld a3, 40(sp)
ld a4, 48(sp)
ld a5, 56(sp)
ld a6, 64(sp)
ld a7, 72(sp)
fld fa0, 80(sp)
fld fa1, 88(sp)
fld fa2, 96(sp)
fld fa3, 104(sp)
fld fa4, 112(sp)
fld fa5, 120(sp)
fld fa6, 128(sp)
fld fa7, 136(sp)
ld fp, 144(sp)
ld ra, 152(sp)
ld \receiver, 0(sp)
add sp, sp, ARGUMENT_SPILL_SIZE
add sp, sp, 16
jalr zero, t0, 0 // Tail-call the method
5:
// Load address of SmallObjectClasses
auipc t1, %pcrel_hi(CDECL(SmallObjectClasses))
addi t1, t1, %pcrel_lo(5b)
// Calculate array offset (INDEX * 2^3)
slli t0, t0, 3
add t0, t1, t0
ld t0, 0(t0)
j 0b
.cfi_endproc
.endm
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), %function)
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), %function)
.globl CDECL(objc_msgSend_stret)
CDECL(objc_msgSend):
CDECL(objc_msgSend_fpret):
MSGSEND a0, a1
CDECL(objc_msgSend_stret):
MSGSEND a1, a2 // Pointer to stack frame in a0

@ -0,0 +1,132 @@
.macro MSGSEND receiver, sel, fpret
.cfi_startproc
movl \receiver(%esp), %eax
test %eax, %eax # If the receiver is nil
jz 4f # return nil
test $SMALLOBJ_MASK, %eax # Check if the receiver is a small object
jnz 6f # Get the small object class
mov (%eax), %eax # Load the class
1: # classLoaded
movl \sel(%esp), %ecx
mov DTABLE_OFFSET(%eax), %eax # Load the dtable from the class
mov (%ecx), %ecx # Load the selector index
# Register use at this point:
# %eax: dtable
# %ecx: Selector index
# %edx: selector index fragment
mov SHIFT_OFFSET(%eax), %edx # Load the shift (dtable size)
cmpl $8, %edx # If this is a small dtable, jump to the small dtable handlers
je 2f
cmpl $0, %edx
je 3f
mov %ecx, %edx
shrl $16, %edx
movl DATA_OFFSET(%eax, %edx, 4), %eax
2: # dtable16:
movzbl %ch, %edx
movl DATA_OFFSET(%eax, %edx, 4), %eax
3: # dtable8:
movzbl %cl, %edx
movl DATA_OFFSET(%eax, %edx, 4), %eax
test %eax, %eax
jz 5f # Nil slot - invoke some kind of forwarding mechanism
mov SLOT_OFFSET(%eax), %ecx
#ifdef _MSC_VER
call *CDECL(__guard_check_icall_fptr)
#endif
jmp *%ecx
4: # returnNil:
.if \fpret
fldz
.else
xor %eax, %eax # return 0 (int)
xor %edx, %edx # Return 64-bit zero (%edx is
# caller-save, so it's safe to do this in the general case.
.endif
ret
5: # slowSend:
mov \sel(%esp), %ecx
lea \receiver(%esp), %eax
push %ecx # Unused, stack alignment
push %ecx # _cmd
push %eax # &self
.cfi_def_cfa_offset 16
call CDECL(slowMsgLookup)@PLT
add $12, %esp # restore the stack
#ifdef _MSC_VER
mov %eax, %ecx
call *CDECL(__guard_check_icall_fptr)
jmp *%ecx
#else
jmp *%eax
#endif
6: # smallObject:
push %ebx # Save old %ebx
calll 7f
7:
popl %ebx;
8:
#if __ELF__
# ELF can support GOT-relative addressing;
# PE/COFF and Mach-O need a text relocation.
addl $_GLOBAL_OFFSET_TABLE_+(8b-7b), %ebx
leal SmallObjectClasses@GOTOFF(%ebx), %eax
#else
leal CDECL(SmallObjectClasses), %eax
#endif
mov (%eax), %eax
popl %ebx
jmp 1b
.cfi_endproc
.endm
#ifdef _WIN32
.text
.def @feat.00;
.scl 3;
.type 0;
.endef
.globl @feat.00
@feat.00 = 1
.def _objc_msgSend;
.scl 2;
.type 32;
.endef
.def _objc_msgSend_fpret;
.scl 2;
.type 32;
.endef
.def _objc_msgSend_stret;
.scl 2;
.type 32;
.endef
#endif
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), @function)
CDECL(objc_msgSend_fpret):
MSGSEND 4, 8, 1
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), @function)
CDECL(objc_msgSend):
MSGSEND 4, 8, 0
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), @function)
CDECL(objc_msgSend_stret):
MSGSEND 8, 12, 0
#ifdef _WIN32
.section .drectve,"yn"
EXPORT_SYMBOL(objc_msgSend)
EXPORT_SYMBOL(objc_msgSend_stret)
EXPORT_SYMBOL(objc_msgSend_fpret)
#endif

@ -0,0 +1,315 @@
#ifdef _WIN64
# define START_PROC(x) .seh_proc x
# define END_PROC(x) .seh_endproc
# define FRAME_OFFSET(x) .seh_stackalloc x
# define FIRST_ARGUMENT_STR "%rcx"
# define FIRST_ARGUMENT %rcx
# define SECOND_ARGUMENT %rdx
# define THIRD_ARGUMENT %r8
#else
# define START_PROC(x) .cfi_startproc
# define END_PROC(x) .cfi_endproc
# define FRAME_OFFSET(x) .cfi_adjust_cfa_offset x
# define FIRST_ARGUMENT_STR "%rdi"
# define FIRST_ARGUMENT %rdi
# define SECOND_ARGUMENT %rsi
# define THIRD_ARGUMENT %rdx
#endif
.macro MSGSEND fnname receiver, sel
START_PROC(\fnname) # Start emitting unwind data. We
# don't actually care about any of
# the stuff except the slow call,
# because that's the only one that
# can throw.
test \receiver, \receiver # If the receiver is nil
jz 4f # return nil
movq $SMALLOBJ_MASK, %r10 # Load the small object mask
test \receiver, %r10 # Check if the receiver is a small object
jnz 6f # Get the small object class
mov (\receiver), %r10 # Load the dtable from the class
1: # classLoaded
mov DTABLE_OFFSET(%r10), %r10 # Load the dtable from the class into r10
mov %rax, -8(%rsp) # %rax contains information for variadic calls
mov %rbx, -16(%rsp) # On the fast path, spill into the red zone
mov (\sel), %eax # Load the selector index into %eax
mov SHIFT_OFFSET(%r10), %r11d # Load the shift (dtable size) into r11
cmpl $8, %r11d # If this is a small dtable, jump to the small dtable handlers
je 2f
cmpl $0, %r11d
je 3f
movl %eax, %r11d
shrl $16, %r11d
movq DATA_OFFSET(%r10, %r11, 8), %r10
2: # dtable16:
movzbl %ah, %ebx
movq DATA_OFFSET(%r10, %rbx, 8), %r10
3: # dtable8:
movzbl %al, %ebx
mov -8(%rsp), %rax
movq DATA_OFFSET(%r10, %rbx, 8), %r10
mov -16(%rsp), %rbx
test %r10, %r10
jz 5f # Nil slot - invoke some kind of forwarding mechanism
mov SLOT_OFFSET(%r10), %r10
7:
#ifdef WITH_TRACING
push %r12
push %r13
push %r10
mov (\sel), %r11 # Load the selector index
lea tracing_dtable(%rip), %r10
mov (%r10), %r10
mov SHIFT_OFFSET(%r10), %r13 # Load the shift (dtable size)
mov DATA_OFFSET(%r10), %r12 # load the address of the start of the array
pop %r10
cmpl $8, %r13d # If this is a small dtable, jump to the small dtable handlers
je 10f
cmpl $0, %r13d
je 11f
mov %r11, %r13
and $0xff0000, %r13
shrl $13, %r13d # Right shift 16, but then left shift by 3 *sizeof(void*)
add %r13, %r12
mov (%r12), %r12
mov DATA_OFFSET(%r12), %r12
10: # dtable16:
mov %r11, %r13
and $0xff00, %r13
shrl $5, %r13d
add %r13, %r12
mov (%r12), %r12
mov DATA_OFFSET(%r12), %r12
11: # dtable8:
mov %r11, %r13
and $0xff, %r13
shll $3, %r13d
add %r13, %r12
mov (%r12), %r11
pop %r13
pop %r12
test %r11, %r11
jz 12f
push %rax # We need to preserve all registers that may contain arguments:
push %rdi
push %rsi
push %rdx
push %rcx
push %r8
push %r9
push %r10
push %r11
mov \receiver, %rdi # Arg 0 is receiver
mov \sel, %rsi # Arg 1 is selector
mov %r10, %rdx # Arg 2 is IMP
mov $0, %rcx # Arg 3 is entry / exit (0/1)
mov $0, %r8 # Arg 4 is return value (0 on entry)
call *%r11 # Call the tracing function
cmpq $0, %rax
jz 13f # If it returns 0, don't call the end-tracing function.
cmpq $1, %rax # If it returns 1, do call the tracing function
jne 14f # Any other value is an interposition
# function to call instead of the method
call pushTraceReturnStack # rax now contains a thread-local buffer for storing returns
pop %r11 # Restore all of the argument registers
pop %r10 # except rax, which we'll need before the call
pop %r9
pop %r8
pop %rcx
pop %rdx
pop %rsi
pop %rdi
mov \receiver, (%rax) # Store the receiver in TLS
mov \sel, 8(%rax) # Store the selector in TLS
mov %r10, 16(%rax) # Store the method in TLS
mov %r11, 24(%rax) # Store the tracing function in TLS
mov 8(%rsp), %r11 # r11 now contains the return address
mov %r11, 32(%rax) # Store the method-return address in TLS
pop %rax
pop %r11 # r11 now contains the return address, but we don't care
call *%r10 # Call the IMP. The stack should now be in the same state
# that it was on entry into this function
push %rax # Now we are free to clobber argument
push %rdx # registers, but we must preserve return registers...
call popTraceReturnStack # rax now contains a thread-local buffer for storing returns
push %rax # save the return value, because we'll need it after the tracing function call
mov (%rax), %rdi # Load the receiver into arg 0
mov 8(%rax), %rsi # Load the selector into arg 1
mov 16(%rax), %rdx # Load the IMP into arg 3
mov $1, %rcx # Arg 4 is 1 (tracing on exit)
mov %rax, %r8 # Arg 5 is the return result
mov 24(%rax), %r11 # Reload the address of the tracing function
call *%r11 # Call the tracing function
pop %rax # Reload the real return address
mov 32(%rax), %r11
pop %rdx # Reload saved values
pop %rax
jmp *%r11 # Simulate a return by jumping to the cached return address
13: # Skip tracing on exit and just tail-call the method
pop %r11
pop %r10
pop %r9
pop %r8
pop %rcx
pop %rdx
pop %rsi
pop %rdi
pop %rax
jmp *%r10
14:
mov %rax, %r10
pop %r9
pop %r9
pop %r9
pop %r8
pop %rcx
pop %rdx
pop %rsi
pop %rdi
pop %rax
12:
#endif // WITH_TRACING
#ifdef _MSC_VER
mov %r10, %rax
jmp *__guard_dispatch_icall_fptr(%rip)
#else
jmp *%r10
#endif
4: # returnNil:
# Both of the return registers are
# callee-save on x86-64, so we can
# return 0 in both in the same code:
xor %rax, %rax # Return 0 as an integer
pxor %xmm0, %xmm0 # Return 0 as a floating point value
ret
5: # slowSend:
push %rax # We need to preserve all registers that may contain arguments:
push %rbx
push %rcx
push %r8
push %r9
sub $0x98, %rsp
movups %xmm0, 0x80(%rsp)
movups %xmm1, 0x70(%rsp)
movups %xmm2, 0x60(%rsp)
movups %xmm3, 0x50(%rsp)
movups %xmm4, 0x40(%rsp)
movups %xmm5, 0x30(%rsp)
movups %xmm6, 0x20(%rsp)
movups %xmm7, 0x10(%rsp)
#rdi rsi rdx
# We're (potentially) modifying the self argument with the lookup, so we don't want to be
.ifc "\receiver", FIRST_ARGUMENT_STR
push FIRST_ARGUMENT
mov %rsp, FIRST_ARGUMENT
push SECOND_ARGUMENT # Save _cmd (not preserved across calls)
push THIRD_ARGUMENT
.else
push FIRST_ARGUMENT # Save the sret pointer
push SECOND_ARGUMENT # Save self where it can be modified
mov %rsp, FIRST_ARGUMENT
push THIRD_ARGUMENT
mov THIRD_ARGUMENT, SECOND_ARGUMENT # move _cmd to where the callee expects it to be
.endif
FRAME_OFFSET(0xD8)
call CDECL(slowMsgLookup) # Call the slow lookup function
mov %rax, %r10 # Load the returned IMP
pop THIRD_ARGUMENT
pop SECOND_ARGUMENT
pop FIRST_ARGUMENT
movups 0x80(%rsp), %xmm0
movups 0x70(%rsp), %xmm1
movups 0x60(%rsp), %xmm2
movups 0x50(%rsp), %xmm3
movups 0x40(%rsp), %xmm4
movups 0x30(%rsp), %xmm5
movups 0x20(%rsp), %xmm6
movups 0x10(%rsp), %xmm7
add $0x98, %rsp
pop %r9
pop %r8
pop %rcx
pop %rbx
pop %rax
jmp 7b
6: # smallObject:
and \receiver, %r10 # Find the small int type
lea CDECL(SmallObjectClasses)(%rip), %r11
mov (%r11, %r10, 8), %r10
jmp 1b
END_PROC(\fnname)
.endm
#ifdef _WIN64
.text
.def objc_msgSend;
.scl 2;
.type 32;
.endef
.def objc_msgSend_fpret;
.scl 2;
.type 32;
.endef
.def objc_msgSend_stret;
.scl 2;
.type 32;
.endef
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), @function)
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), @function)
CDECL(objc_msgSend_fpret):
CDECL(objc_msgSend):
MSGSEND objc_msgSend, %rcx, %rdx
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), @function)
CDECL(objc_msgSend_stret):
MSGSEND objc_msgSend_stret, %rdx, %r8
.section .drectve,"yn"
EXPORT_SYMBOL(objc_msgSend)
EXPORT_SYMBOL(objc_msgSend_fpret)
EXPORT_SYMBOL(objc_msgSend_stret)
#else
.globl CDECL(objc_msgSend)
TYPE_DIRECTIVE(CDECL(objc_msgSend), @function)
.globl CDECL(objc_msgSend_fpret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_fpret), @function)
CDECL(objc_msgSend_fpret):
CDECL(objc_msgSend):
MSGSEND objc_msgSend, %rdi, %rsi
.globl CDECL(objc_msgSend_stret)
TYPE_DIRECTIVE(CDECL(objc_msgSend_stret), @function)
CDECL(objc_msgSend_stret):
MSGSEND objc_msgSend_stret, %rsi, %rdx
#endif

@ -0,0 +1,311 @@
#include <atomic>
#include <stdlib.h>
#include <stdio.h>
#include "dwarf_eh.h"
#include "objcxx_eh_private.h"
#include "objcxx_eh.h"
#include "objc/objc-arc.h"
/**
* Helper function that has a custom personality function.
* This calls `cxx_throw` and has a destructor that must be run. We intercept
* the personality function calls and inspect the in-flight C++ exception.
*/
int eh_trampoline();
uint64_t cxx_exception_class;
using namespace __cxxabiv1;
namespace
{
/**
* Helper needed by the unwind helper headers.
*/
inline _Unwind_Reason_Code continueUnwinding(struct _Unwind_Exception *ex,
struct _Unwind_Context *context)
{
#if defined(__arm__) && !defined(__ARM_DWARF_EH__)
if (__gnu_unwind_frame(ex, context) != _URC_OK) { return _URC_FAILURE; }
#endif
return _URC_CONTINUE_UNWIND;
}
/**
* Flag indicating that we've already inspected a C++ exception and found all
* of the offsets.
*/
std::atomic<bool> done_setup;
/**
* The offset of the C++ type_info object in a thrown exception from the unwind
* header in a `__cxa_exception`.
*/
std::atomic<ptrdiff_t> type_info_offset;
/**
* The size of the `_Unwind_Exception` (including padding) in a
* `__cxa_exception`.
*/
std::atomic<size_t> exception_struct_size;
/**
* Exception cleanup function for C++ exceptions that wrap Objective-C
* exceptions.
*/
void exception_cleanup(_Unwind_Reason_Code reason,
struct _Unwind_Exception *ex)
{
// __cxa_exception takes a pointer to the end of the __cxa_exception
// structure, and so we find that by adding the size of the generic
// exception structure + padding to the pointer to the generic exception
// structure field of the enclosing structure.
auto *cxxEx = pointer_add<__cxa_exception>(ex, exception_struct_size);
__cxa_free_exception(cxxEx);
}
}
using namespace std;
static BOOL isKindOfClass(Class thrown, Class type)
{
do
{
if (thrown == type)
{
return YES;
}
thrown = class_getSuperclass(thrown);
} while (Nil != thrown);
return NO;
}
namespace gnustep
{
namespace libobjc
{
__objc_type_info::__objc_type_info(const char *name) : type_info(name) {}
bool __objc_type_info::__is_pointer_p() const { return true; }
bool __objc_type_info::__is_function_p() const { return false; }
bool __objc_type_info::__do_catch(const type_info *thrown_type,
void **thrown_object,
unsigned) const
{
assert(0);
return false;
};
bool __objc_type_info::__do_upcast(
const __class_type_info *target,
void **thrown_object) const
{
return false;
};
/**
* The `id` type is mangled to `@id`, which is not a valid mangling
* of anything else.
*/
__objc_id_type_info::__objc_id_type_info() : __objc_type_info("@id") {};
}
static inline id dereference_thrown_object_pointer(void** obj) {
/* libc++-abi does not have __is_pointer_p and won't do the double dereference
* required to get the object pointer. We need to do it ourselves if we have
* caught an exception with libc++'s exception class. */
#ifndef __MINGW32__
if (cxx_exception_class == llvm_cxx_exception_class) {
return **(id**)obj;
}
return *(id*)obj;
#else
#ifdef _LIBCPP_VERSION
return **(id**)obj;
#else
return *(id*)obj;
#endif // _LIBCPP_VERSION
#endif // __MINGW32__
}
};
static bool AppleCompatibleMode = true;
extern "C" int objc_set_apple_compatible_objcxx_exceptions(int newValue)
{
bool old = AppleCompatibleMode;
AppleCompatibleMode = newValue;
return old;
}
gnustep::libobjc::__objc_class_type_info::~__objc_class_type_info() {}
gnustep::libobjc::__objc_id_type_info::~__objc_id_type_info() {}
bool gnustep::libobjc::__objc_class_type_info::__do_catch(const type_info *thrownType,
void **obj,
unsigned outer) const
{
id thrown = nullptr;
bool found = false;
// Id throw matches any ObjC catch. This may be a silly idea!
if (dynamic_cast<const __objc_id_type_info*>(thrownType)
|| (AppleCompatibleMode &&
dynamic_cast<const __objc_class_type_info*>(thrownType)))
{
thrown = dereference_thrown_object_pointer(obj);
// nil only matches id catch handlers in Apple-compatible mode, or when thrown as an id
if (0 == thrown)
{
return false;
}
// Check whether the real thrown object matches the catch type.
found = isKindOfClass(object_getClass(thrown),
(Class)objc_getClass(name()));
}
else if (dynamic_cast<const __objc_class_type_info*>(thrownType))
{
thrown = dereference_thrown_object_pointer(obj);
found = isKindOfClass((Class)objc_getClass(thrownType->name()),
(Class)objc_getClass(name()));
}
if (found)
{
*obj = (void*)thrown;
}
return found;
};
bool gnustep::libobjc::__objc_id_type_info::__do_catch(const type_info *thrownType,
void **obj,
unsigned outer) const
{
// Id catch matches any ObjC throw
if (dynamic_cast<const __objc_class_type_info*>(thrownType))
{
*obj = dereference_thrown_object_pointer(obj);
DEBUG_LOG("gnustep::libobjc::__objc_id_type_info::__do_catch caught 0x%x\n", *obj);
return true;
}
if (dynamic_cast<const __objc_id_type_info*>(thrownType))
{
*obj = dereference_thrown_object_pointer(obj);
DEBUG_LOG("gnustep::libobjc::__objc_id_type_info::__do_catch caught 0x%x\n", *obj);
return true;
}
DEBUG_LOG("gnustep::libobjc::__objc_id_type_info::__do_catch returning false\n");
return false;
};
/**
* Public interface to the Objective-C++ exception mechanism
*/
extern "C"
{
/**
* The public symbol that the compiler uses to indicate the Objective-C id type.
*/
OBJC_PUBLIC gnustep::libobjc::__objc_id_type_info __objc_id_type_info;
struct _Unwind_Exception *objc_init_cxx_exception(id obj)
{
id *newEx = static_cast<id*>(__cxa_allocate_exception(sizeof(id)));
*newEx = obj;
_Unwind_Exception *ex = pointer_add<_Unwind_Exception>(newEx, -exception_struct_size);
*pointer_add<std::type_info*>(ex, type_info_offset) = &__objc_id_type_info;
ex->exception_class = cxx_exception_class;
ex->exception_cleanup = exception_cleanup;
__cxa_get_globals()->uncaughtExceptions++;
return ex;
}
void* objc_object_for_cxx_exception(void *thrown_exception, int *isValid)
{
ptrdiff_t type_offset = type_info_offset;
if (type_offset == 0)
{
*isValid = 0;
return nullptr;
}
const std::type_info *thrownType =
*pointer_add<const std::type_info*>(thrown_exception, type_offset);
if (!dynamic_cast<const gnustep::libobjc::__objc_id_type_info*>(thrownType) &&
!dynamic_cast<const gnustep::libobjc::__objc_class_type_info*>(thrownType))
{
*isValid = 0;
return 0;
}
*isValid = 1;
return *pointer_add<id>(thrown_exception, exception_struct_size);
}
} // extern "C"
MagicValueHolder::MagicValueHolder() { magic_value = magic; }
/**
* Function that simply throws an instance of `MagicValueHolder`.
*/
PRIVATE void cxx_throw()
{
MagicValueHolder x;
throw x;
}
/**
* Personality function that wraps the C++ personality and inspects the C++
* exception structure on the way past. This should be used only for the
* `eh_trampoline` function.
*/
extern "C"
PRIVATE
BEGIN_PERSONALITY_FUNCTION(test_eh_personality)
// Don't bother with a mutex here. It doesn't matter if two threads set
// these values at the same time.
if (!done_setup)
{
uint64_t cls = __builtin_bswap64(exceptionClass);
type_info_offset = find_backwards(exceptionObject, &typeid(MagicValueHolder));
exception_struct_size = find_forwards(exceptionObject, MagicValueHolder::magic);
cxx_exception_class = exceptionClass;
done_setup = true;
}
return CALL_PERSONALITY_FUNCTION(__gxx_personality_v0);
}
/**
* Probe the C++ exception handling implementation. This throws a C++
* exception through a function that uses `test_eh_personality` as its
* personality function, allowing us to inspect a C++ exception that is in a
* known state.
*/
#ifndef __MINGW32__
extern "C" void test_cxx_eh_implementation()
{
if (done_setup)
{
return;
}
bool caught = false;
try
{
eh_trampoline();
}
catch(MagicValueHolder)
{
caught = true;
}
assert(caught);
}
#endif

@ -0,0 +1,134 @@
#include <atomic>
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
#include "dwarf_eh.h"
#include "objcxx_eh_private.h"
#include "objcxx_eh.h"
#include "objc/runtime.h"
#include "objc/objc-arc.h"
#include "objc/objc-exception.h"
#include "objc/hooks.h"
namespace __cxxabiv1
{
struct __cxa_refcounted_exception
{
int referenceCount;
};
}
using namespace __cxxabiv1;
extern "C" __cxa_refcounted_exception* __cxa_init_primary_exception(void *obj, std::type_info *tinfo, void (*dest) (void *));
static void eh_cleanup(void *exception)
{
DEBUG_LOG("eh_cleanup: Releasing 0x%x\n", *(id*)exception);
objc_release(*(id*)exception);
}
/**
* Flag indicating that we've already inspected a C++ exception and found all
* of the offsets.
*/
std::atomic<bool> done_setup;
/**
* The size of the `_Unwind_Exception` (including padding) in a
* `__cxa_exception`.
*/
std::atomic<size_t> exception_struct_size;
extern "C"
OBJC_PUBLIC
void objc_exception_throw(id object)
{
// Don't bother with a mutex here. It doesn't matter if two threads set
// these values at the same time.
if (!done_setup)
{
DEBUG_LOG("objc_exception_throw: Doing initial setup\n");
MagicValueHolder *magicExc = (MagicValueHolder *)__cxa_allocate_exception(sizeof(MagicValueHolder));
MagicValueHolder x;
*magicExc = x;
__cxa_refcounted_exception *header =
__cxa_init_primary_exception(magicExc, & __objc_id_type_info, NULL);
exception_struct_size = find_forwards(header, MagicValueHolder::magic);
__cxa_free_exception(magicExc);
DEBUG_LOG("objc_exception_throw: exception_struct_size: 0x%x\n", unsigned(exception_struct_size));
done_setup = true;
}
id *exc = (id *)__cxa_allocate_exception(sizeof(id));
*exc = object;
objc_retain(object);
DEBUG_LOG("objc_exception_throw: Throwing 0x%x\n", *exc);
__cxa_eh_globals *globals = __cxa_get_globals ();
globals->uncaughtExceptions += 1;
__cxa_refcounted_exception *header =
__cxa_init_primary_exception(exc, & __objc_id_type_info, eh_cleanup);
header->referenceCount = 1;
_Unwind_Exception *unwindHeader = pointer_add<_Unwind_Exception>(header, exception_struct_size - sizeof(_Unwind_Exception));
_Unwind_Reason_Code err = _Unwind_RaiseException (unwindHeader);
if (_URC_END_OF_STACK == err && 0 != _objc_unexpected_exception)
{
DEBUG_LOG("Invoking _objc_unexpected_exception\n");
_objc_unexpected_exception(object);
}
DEBUG_LOG("Throw returned %d\n",(int) err);
abort();
}
OBJC_PUBLIC extern objc_uncaught_exception_handler objc_setUncaughtExceptionHandler(objc_uncaught_exception_handler handler)
{
return __atomic_exchange_n(&_objc_unexpected_exception, handler, __ATOMIC_SEQ_CST);
}
extern "C" void* __cxa_begin_catch(void *object);
extern "C"
OBJC_PUBLIC
void* objc_begin_catch(void* object)
{
return __cxa_begin_catch(object);
}
extern "C" void __cxa_end_catch();
extern "C"
OBJC_PUBLIC
void objc_end_catch()
{
__cxa_end_catch();
}
extern "C" void __cxa_rethrow();
extern "C"
OBJC_PUBLIC
void objc_exception_rethrow()
{
__cxa_rethrow();
}
extern "C" EXCEPTION_DISPOSITION __gxx_personality_seh0(PEXCEPTION_RECORD ms_exc,
void *this_frame,
PCONTEXT ms_orig_context,
PDISPATCHER_CONTEXT ms_disp);
extern "C"
OBJC_PUBLIC
EXCEPTION_DISPOSITION __gnu_objc_personality_seh0(PEXCEPTION_RECORD ms_exc,
void *this_frame,
PCONTEXT ms_orig_context,
PDISPATCHER_CONTEXT ms_disp)
{
return __gxx_personality_seh0(ms_exc, this_frame, ms_orig_context, ms_disp);
}

@ -0,0 +1,613 @@
#include "objc/runtime.h"
#include "objc/objc-arc.h"
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "class.h"
#include "properties.h"
#include "spinlock.h"
#include "visibility.h"
#include "nsobject.h"
#include "gc_ops.h"
#include "lock.h"
PRIVATE int spinlocks[spinlock_count];
/**
* Public function for getting a property.
*/
OBJC_PUBLIC
id objc_getProperty(id obj, SEL _cmd, ptrdiff_t offset, BOOL isAtomic)
{
if (nil == obj) { return nil; }
char *addr = (char*)obj;
addr += offset;
id ret;
if (isAtomic)
{
volatile int *lock = lock_for_pointer(addr);
lock_spinlock(lock);
ret = *(id*)addr;
ret = objc_retain(ret);
unlock_spinlock(lock);
ret = objc_autoreleaseReturnValue(ret);
}
else
{
ret = *(id*)addr;
ret = objc_retainAutoreleaseReturnValue(ret);
}
return ret;
}
OBJC_PUBLIC
void objc_setProperty(id obj, SEL _cmd, ptrdiff_t offset, id arg, BOOL isAtomic, BOOL isCopy)
{
if (nil == obj) { return; }
char *addr = (char*)obj;
addr += offset;
if (isCopy)
{
arg = [arg copy];
}
else
{
arg = objc_retain(arg);
}
id old;
if (isAtomic)
{
volatile int *lock = lock_for_pointer(addr);
lock_spinlock(lock);
old = *(id*)addr;
*(id*)addr = arg;
unlock_spinlock(lock);
}
else
{
old = *(id*)addr;
*(id*)addr = arg;
}
objc_release(old);
}
OBJC_PUBLIC
void objc_setProperty_atomic(id obj, SEL _cmd, id arg, ptrdiff_t offset)
{
char *addr = (char*)obj;
addr += offset;
arg = objc_retain(arg);
volatile int *lock = lock_for_pointer(addr);
lock_spinlock(lock);
id old = *(id*)addr;
*(id*)addr = arg;
unlock_spinlock(lock);
objc_release(old);
}
OBJC_PUBLIC
void objc_setProperty_atomic_copy(id obj, SEL _cmd, id arg, ptrdiff_t offset)
{
char *addr = (char*)obj;
addr += offset;
arg = [arg copy];
volatile int *lock = lock_for_pointer(addr);
lock_spinlock(lock);
id old = *(id*)addr;
*(id*)addr = arg;
unlock_spinlock(lock);
objc_release(old);
}
OBJC_PUBLIC
void objc_setProperty_nonatomic(id obj, SEL _cmd, id arg, ptrdiff_t offset)
{
char *addr = (char*)obj;
addr += offset;
arg = objc_retain(arg);
id old = *(id*)addr;
*(id*)addr = arg;
objc_release(old);
}
OBJC_PUBLIC
void objc_setProperty_nonatomic_copy(id obj, SEL _cmd, id arg, ptrdiff_t offset)
{
char *addr = (char*)obj;
addr += offset;
id old = *(id*)addr;
*(id*)addr = [arg copy];
objc_release(old);
}
OBJC_PUBLIC
void objc_copyCppObjectAtomic(void *dest, const void *src,
void (*copyHelper) (void *dest, const void *source))
{
volatile int *lock = lock_for_pointer(src < dest ? src : dest);
volatile int *lock2 = lock_for_pointer(src < dest ? dest : src);
lock_spinlock(lock);
lock_spinlock(lock2);
copyHelper(dest, src);
unlock_spinlock(lock);
unlock_spinlock(lock2);
}
OBJC_PUBLIC
void objc_getCppObjectAtomic(void *dest, const void *src,
void (*copyHelper) (void *dest, const void *source))
{
volatile int *lock = lock_for_pointer(src);
lock_spinlock(lock);
copyHelper(dest, src);
unlock_spinlock(lock);
}
OBJC_PUBLIC
void objc_setCppObjectAtomic(void *dest, const void *src,
void (*copyHelper) (void *dest, const void *source))
{
volatile int *lock = lock_for_pointer(dest);
lock_spinlock(lock);
copyHelper(dest, src);
unlock_spinlock(lock);
}
/**
* Structure copy function. This is provided for compatibility with the Apple
* APIs (it's an ABI function, so it's semi-public), but it's a bad design so
* it's not used. The problem is that it does not identify which of the
* pointers corresponds to the object, which causes some excessive locking to
* be needed.
*/
OBJC_PUBLIC
void objc_copyPropertyStruct(void *dest,
void *src,
ptrdiff_t size,
BOOL atomic,
BOOL strong)
{
if (atomic)
{
volatile int *lock = lock_for_pointer(src < dest ? src : dest);
volatile int *lock2 = lock_for_pointer(src < dest ? dest : src);
lock_spinlock(lock);
lock_spinlock(lock2);
memcpy(dest, src, size);
unlock_spinlock(lock);
unlock_spinlock(lock2);
}
else
{
memcpy(dest, src, size);
}
}
/**
* Get property structure function. Copies a structure from an ivar to another
* variable. Locks on the address of src.
*/
OBJC_PUBLIC
void objc_getPropertyStruct(void *dest,
void *src,
ptrdiff_t size,
BOOL atomic,
BOOL strong)
{
if (atomic)
{
volatile int *lock = lock_for_pointer(src);
lock_spinlock(lock);
memcpy(dest, src, size);
unlock_spinlock(lock);
}
else
{
memcpy(dest, src, size);
}
}
/**
* Set property structure function. Copes a structure to an ivar. Locks on
* dest.
*/
OBJC_PUBLIC
void objc_setPropertyStruct(void *dest,
void *src,
ptrdiff_t size,
BOOL atomic,
BOOL strong)
{
if (atomic)
{
volatile int *lock = lock_for_pointer(dest);
lock_spinlock(lock);
memcpy(dest, src, size);
unlock_spinlock(lock);
}
else
{
memcpy(dest, src, size);
}
}
OBJC_PUBLIC
objc_property_t class_getProperty(Class cls, const char *name)
{
if (Nil == cls)
{
return NULL;
}
struct objc_property_list *properties = cls->properties;
while (NULL != properties)
{
for (int i=0 ; i<properties->count ; i++)
{
objc_property_t p = property_at_index(properties, i);
if (strcmp(property_getName(p), name) == 0)
{
return p;
}
}
properties = properties->next;
}
return NULL;
}
OBJC_PUBLIC
objc_property_t* class_copyPropertyList(Class cls, unsigned int *outCount)
{
if (Nil == cls)
{
if (NULL != outCount) { *outCount = 0; }
return NULL;
}
struct objc_property_list *properties = cls->properties;
if (!properties)
{
if (NULL != outCount) { *outCount = 0; }
return NULL;
}
unsigned int count = 0;
for (struct objc_property_list *l=properties ; NULL!=l ; l=l->next)
{
count += l->count;
}
if (NULL != outCount)
{
*outCount = count;
}
if (0 == count)
{
return NULL;
}
objc_property_t *list = calloc(sizeof(objc_property_t), count);
unsigned int out = 0;
for (struct objc_property_list *l=properties ; NULL!=l ; l=l->next)
{
for (int i=0 ; i<l->count ; i++)
{
list[out++] = property_at_index(l, i);
}
}
return list;
}
static const char* property_getIVar(objc_property_t property)
{
const char *iVar = property_getAttributes(property);
if (iVar != 0)
{
while ((*iVar != 0) && (*iVar != 'V'))
{
iVar++;
}
if (*iVar == 'V')
{
return iVar+1;
}
}
return 0;
}
OBJC_PUBLIC
const char *property_getName(objc_property_t property)
{
if (NULL == property) { return NULL; }
const char *name = property->name;
if (NULL == name) { return NULL; }
if (name[0] == 0)
{
name += name[1];
}
return name;
}
/*
* The compiler stores the type encoding of the getter. We replace this with
* the type encoding of the property itself. We use a 0 byte at the start to
* indicate that the swap has taken place.
*/
static const char *property_getTypeEncoding(objc_property_t property)
{
if (NULL == property) { return NULL; }
return property->type;
}
OBJC_PUBLIC
const char *property_getAttributes(objc_property_t property)
{
if (NULL == property) { return NULL; }
return property->attributes;
}
OBJC_PUBLIC
objc_property_attribute_t *property_copyAttributeList(objc_property_t property,
unsigned int *outCount)
{
if (NULL == property)
{
if (NULL != outCount)
{
*outCount = 0;
}
return NULL;
}
objc_property_attribute_t attrs[12];
int count = 0;
const char *types = property_getTypeEncoding(property);
if (NULL != types)
{
attrs[count].name = "T";
attrs[count].value = types;
count++;
}
// If the compiler provides a type encoding string, then it's more
// informative than the bitfields and should be treated as canonical. If
// the compiler didn't provide a type encoding string, then this will
// create a best-effort one.
const char *attributes = property_getAttributes(property);
for (int i=strlen(types)+1 ; attributes[i] != 0 ; i++)
{
assert(count<12);
if (attributes[i] == ',')
{
// Comma is never the last character in the string, so this should
// never push us past the end.
i++;
}
attrs[count].value = "";
switch (attributes[i])
{
case 'R':
attrs[count].name = "R";
break;
case 'C':
attrs[count].name = "C";
break;
case '&':
attrs[count].name = "&";
break;
case 'D':
attrs[count].name = "D";
break;
case 'W':
attrs[count].name = "W";
break;
case 'N':
attrs[count].name = "N";
break;
case 'G':
attrs[count].name = "G";
attrs[count].value = sel_getName(property->getter);
i += strlen(attrs[count].value);
break;
case 'S':
attrs[count].name = "S";
attrs[count].value = sel_getName(property->setter);
i += strlen(attrs[count].value);
break;
case 'V':
attrs[count].name = "V";
attrs[count].value = attributes+i+1;
i += strlen(attributes+i)-1;
break;
default:
continue;
}
count++;
}
objc_property_attribute_t *propAttrs = calloc(sizeof(objc_property_attribute_t), count);
memcpy(propAttrs, attrs, count * sizeof(objc_property_attribute_t));
if (NULL != outCount)
{
*outCount = count;
}
return propAttrs;
}
static const objc_property_attribute_t *findAttribute(char attr,
const objc_property_attribute_t *attributes,
unsigned int attributeCount)
{
// This linear scan is N^2 in the worst case, but that's still probably
// cheaper than sorting the array because N<12
for (int i=0 ; i<attributeCount ; i++)
{
if (attributes[i].name[0] == attr)
{
return &attributes[i];
}
}
return NULL;
}
static char *addAttrIfExists(char a,
char *buffer,
const objc_property_attribute_t *attributes,
unsigned int attributeCount)
{
const objc_property_attribute_t *attr = findAttribute(a, attributes, attributeCount);
if (attr)
{
*(buffer++) = attr->name[0];
if (attr->value)
{
size_t len = strlen(attr->value);
memcpy(buffer, attr->value, len);
buffer += len;
}
*(buffer++) = ',';
}
return buffer;
}
static const char *encodingFromAttrs(const objc_property_attribute_t *attributes,
unsigned int attributeCount)
{
// Length of the attributes string (initially the number of keys and commas and trailing null)
size_t attributesSize = 2 * attributeCount;
for (int i=0 ; i<attributeCount ; i++)
{
if (attributes[i].value)
{
attributesSize += strlen(attributes[i].value);
}
}
if (attributesSize == 0)
{
return NULL;
}
char *buffer = malloc(attributesSize);
char *out = buffer;
out = addAttrIfExists('T', out, attributes, attributeCount);
out = addAttrIfExists('R', out, attributes, attributeCount);
out = addAttrIfExists('&', out, attributes, attributeCount);
out = addAttrIfExists('C', out, attributes, attributeCount);
out = addAttrIfExists('W', out, attributes, attributeCount);
out = addAttrIfExists('D', out, attributes, attributeCount);
out = addAttrIfExists('N', out, attributes, attributeCount);
out = addAttrIfExists('G', out, attributes, attributeCount);
out = addAttrIfExists('S', out, attributes, attributeCount);
out = addAttrIfExists('V', out, attributes, attributeCount);
assert(out != buffer);
out--;
*out = '\0';
return buffer;
}
PRIVATE struct objc_property propertyFromAttrs(const objc_property_attribute_t *attributes,
unsigned int attributeCount,
const char *name)
{
struct objc_property p;
p.name = strdup(name);
p.attributes = encodingFromAttrs(attributes, attributeCount);
p.type = NULL;
const objc_property_attribute_t *attr = findAttribute('T', attributes, attributeCount);
if (attr)
{
p.type = strdup(attr->value);
}
p.getter = NULL;
attr = findAttribute('G', attributes, attributeCount);
if (attr)
{
// TODO: We should be able to construct the full type encoding if we
// also have a type, but for now use an untyped selector.
p.getter = sel_registerName(attr->value);
}
p.setter = NULL;
attr = findAttribute('S', attributes, attributeCount);
if (attr)
{
// TODO: We should be able to construct the full type encoding if we
// also have a type, but for now use an untyped selector.
p.setter = sel_registerName(attr->value);
}
return p;
}
OBJC_PUBLIC
BOOL class_addProperty(Class cls,
const char *name,
const objc_property_attribute_t *attributes,
unsigned int attributeCount)
{
if ((Nil == cls) || (NULL == name) || (class_getProperty(cls, name) != 0)) { return NO; }
struct objc_property p = propertyFromAttrs(attributes, attributeCount, name);
struct objc_property_list *l = calloc(1, sizeof(struct objc_property_list)
+ sizeof(struct objc_property));
l->count = 1;
l->size = sizeof(struct objc_property);
memcpy(&l->properties, &p, sizeof(struct objc_property));
LOCK_RUNTIME_FOR_SCOPE();
l->next = cls->properties;
cls->properties = l;
return YES;
}
OBJC_PUBLIC
void class_replaceProperty(Class cls,
const char *name,
const objc_property_attribute_t *attributes,
unsigned int attributeCount)
{
if ((Nil == cls) || (NULL == name)) { return; }
objc_property_t old = class_getProperty(cls, name);
if (NULL == old)
{
class_addProperty(cls, name, attributes, attributeCount);
return;
}
struct objc_property p = propertyFromAttrs(attributes, attributeCount, name);
LOCK_RUNTIME_FOR_SCOPE();
memcpy(old, &p, sizeof(struct objc_property));
}
OBJC_PUBLIC
char *property_copyAttributeValue(objc_property_t property,
const char *attributeName)
{
if ((NULL == property) || (NULL == attributeName)) { return NULL; }
const char *attributes = property_getAttributes(property);
switch (attributeName[0])
{
case 'T':
{
const char *types = property_getTypeEncoding(property);
return (NULL == types) ? NULL : strdup(types);
}
case 'D':
case 'R':
case 'W':
case 'C':
case '&':
case 'N':
{
return strchr(attributes, attributeName[0]) ? strdup("") : 0;
}
case 'V':
{
return strdup(property_getIVar(property));
}
case 'S':
{
return strdup(sel_getName(property->setter));
}
case 'G':
{
return strdup(sel_getName(property->getter));
}
}
return 0;
}

@ -0,0 +1,712 @@
#include "objc/runtime.h"
#include "protocol.h"
#include "properties.h"
#include "class.h"
#include "lock.h"
#include "legacy.h"
#include <stdlib.h>
#include <assert.h>
#define BUFFER_TYPE struct objc_protocol_list *
#include "buffer.h"
// Get the functions for string hashing
#include "string_hash.h"
static int protocol_compare(const char *name,
const struct objc_protocol *protocol)
{
return string_compare(name, protocol->name);
}
static int protocol_hash(const struct objc_protocol *protocol)
{
return string_hash(protocol->name);
}
#define MAP_TABLE_NAME protocol
#define MAP_TABLE_COMPARE_FUNCTION protocol_compare
#define MAP_TABLE_HASH_KEY string_hash
#define MAP_TABLE_HASH_VALUE protocol_hash
#include "hash_table.h"
static protocol_table *known_protocol_table;
mutex_t protocol_table_lock;
PRIVATE void init_protocol_table(void)
{
protocol_initialize(&known_protocol_table, 128);
INIT_LOCK(protocol_table_lock);
}
static void protocol_table_insert(const struct objc_protocol *protocol)
{
protocol_insert(known_protocol_table, (void*)protocol);
}
struct objc_protocol *protocol_for_name(const char *name)
{
return protocol_table_get(known_protocol_table, name);
}
static id incompleteProtocolClass(void)
{
static id IncompleteProtocolClass = 0;
if (IncompleteProtocolClass == nil)
{
IncompleteProtocolClass = objc_getClass("__IncompleteProtocol");
}
return IncompleteProtocolClass;
}
/**
* Class used for legacy GCC protocols (`ProtocolGCC`).
*/
static id protocol_class_gcc;
/**
* Class used for legacy GNUstep V1 ABI protocols (`ProtocolGSv1`).
*/
static id protocol_class_gsv1;
/**
* Class used for protocols (`Protocol`).
*/
static id protocol_class_gsv2;
static BOOL init_protocol_classes(void)
{
if (nil == protocol_class_gcc)
{
protocol_class_gcc = objc_getClass("ProtocolGCC");
}
if (nil == protocol_class_gsv1)
{
protocol_class_gsv1 = objc_getClass("ProtocolGSv1");
}
if (nil == protocol_class_gsv2)
{
protocol_class_gsv2 = objc_getClass("Protocol");
}
if ((nil == protocol_class_gcc) ||
(nil == protocol_class_gsv1) ||
(nil == protocol_class_gsv2))
{
return NO;
}
return YES;
}
static BOOL protocol_hasClassProperties(struct objc_protocol *p)
{
if (!init_protocol_classes())
{
return NO;
}
return p->isa == protocol_class_gsv2;
}
static BOOL protocol_hasOptionalMethodsAndProperties(struct objc_protocol *p)
{
if (!init_protocol_classes())
{
return NO;
}
if (p->isa == protocol_class_gcc)
{
return NO;
}
return YES;
}
static int isEmptyProtocol(struct objc_protocol *aProto)
{
int isEmpty =
((aProto->instance_methods == NULL) ||
(aProto->instance_methods->count == 0)) &&
((aProto->class_methods == NULL) ||
(aProto->class_methods->count == 0)) &&
((aProto->protocol_list == NULL) ||
(aProto->protocol_list->count == 0));
if (protocol_hasOptionalMethodsAndProperties(aProto))
{
isEmpty &= (aProto->optional_instance_methods == NULL) ||
(aProto->optional_instance_methods->count == 0);
isEmpty &= (aProto->optional_class_methods == NULL) ||
(aProto->optional_class_methods->count == 0);
isEmpty &= (aProto->properties == 0) || (aProto->properties->count == 0);
isEmpty &= (aProto->optional_properties == 0) || (aProto->optional_properties->count == 0);
}
return isEmpty;
}
// FIXME: Make p1 adopt all of the stuff in p2
static void makeProtocolEqualToProtocol(struct objc_protocol *p1,
struct objc_protocol *p2)
{
#define COPY(x) p1->x = p2->x
COPY(instance_methods);
COPY(class_methods);
COPY(protocol_list);
if (protocol_hasOptionalMethodsAndProperties(p1) &&
protocol_hasOptionalMethodsAndProperties(p2))
{
COPY(optional_instance_methods);
COPY(optional_class_methods);
COPY(properties);
COPY(optional_properties);
}
#undef COPY
}
static struct objc_protocol *unique_protocol(struct objc_protocol *aProto)
{
struct objc_protocol *oldProtocol =
protocol_for_name(aProto->name);
if (NULL == oldProtocol)
{
// This is the first time we've seen this protocol, so add it to the
// hash table and ignore it.
protocol_table_insert(aProto);
return aProto;
}
if (isEmptyProtocol(oldProtocol))
{
if (isEmptyProtocol(aProto))
{
return aProto;
// Add protocol to a list somehow.
}
else
{
// This protocol is not empty, so we use its definitions
makeProtocolEqualToProtocol(oldProtocol, aProto);
return aProto;
}
}
else
{
if (isEmptyProtocol(aProto))
{
makeProtocolEqualToProtocol(aProto, oldProtocol);
return oldProtocol;
}
else
{
return oldProtocol;
//FIXME: We should really perform a check here to make sure the
//protocols are actually the same.
}
}
}
static BOOL init_protocols(struct objc_protocol_list *protocols)
{
if (!init_protocol_classes())
{
return NO;
}
for (unsigned i=0 ; i<protocols->count ; i++)
{
struct objc_protocol *aProto = protocols->list[i];
// Don't initialise a protocol twice
if ((aProto->isa == protocol_class_gcc) ||
(aProto->isa == protocol_class_gsv1) ||
(aProto->isa == protocol_class_gsv2))
{
continue;
}
// Protocols in the protocol list have their class pointers set to the
// version of the protocol class that they expect.
enum protocol_version version =
(enum protocol_version)(uintptr_t)aProto->isa;
switch (version)
{
default:
fprintf(stderr, "Unknown protocol version");
abort();
#ifdef OLDABI_COMPAT
case protocol_version_gcc:
protocols->list[i] = objc_upgrade_protocol_gcc((struct objc_protocol_gcc *)aProto);
assert(aProto->isa == protocol_class_gcc);
assert(protocols->list[i]->isa == protocol_class_gsv2);
aProto = protocols->list[i];
break;
case protocol_version_gsv1:
protocols->list[i] = objc_upgrade_protocol_gsv1((struct objc_protocol_gsv1 *)aProto);
assert(aProto->isa == protocol_class_gsv1);
assert(protocols->list[i]->isa == protocol_class_gsv2);
aProto = protocols->list[i];
break;
#endif
case protocol_version_gsv2:
aProto->isa = protocol_class_gsv2;
break;
}
// Initialize all of the protocols that this protocol refers to
if (NULL != aProto->protocol_list)
{
init_protocols(aProto->protocol_list);
}
// Replace this protocol with a unique version of it.
protocols->list[i] = unique_protocol(aProto);
}
return YES;
}
PRIVATE void objc_init_protocols(struct objc_protocol_list *protocols)
{
LOCK_FOR_SCOPE(&protocol_table_lock);
if (!init_protocols(protocols))
{
set_buffered_object_at_index(protocols, buffered_objects++);
return;
}
if (buffered_objects == 0) { return; }
// If we can load one protocol, then we can load all of them.
for (unsigned i=0 ; i<buffered_objects ; i++)
{
struct objc_protocol_list *c = buffered_object_at_index(i);
if (NULL != c)
{
init_protocols(c);
set_buffered_object_at_index(NULL, i);
}
}
compact_buffer();
}
// Public functions:
Protocol *objc_getProtocol(const char *name)
{
if (NULL == name) { return NULL; }
LOCK_FOR_SCOPE(&protocol_table_lock);
return (Protocol*)protocol_for_name(name);
}
BOOL protocol_conformsToProtocol(Protocol *p1, Protocol *p2)
{
if (NULL == p1 || NULL == p2) { return NO; }
// A protocol trivially conforms to itself
if (strcmp(p1->name, p2->name) == 0) { return YES; }
for (struct objc_protocol_list *list = p1->protocol_list ;
list != NULL ; list = list->next)
{
for (int i=0 ; i<list->count ; i++)
{
if (strcmp(list->list[i]->name, p2->name) == 0)
{
return YES;
}
if (protocol_conformsToProtocol((Protocol*)list->list[i], p2))
{
return YES;
}
}
}
return NO;
}
BOOL class_conformsToProtocol(Class cls, Protocol *protocol)
{
if (Nil == cls || NULL == protocol) { return NO; }
for ( ; Nil != cls ; cls = class_getSuperclass(cls))
{
for (struct objc_protocol_list *protocols = cls->protocols;
protocols != NULL ; protocols = protocols->next)
{
for (int i=0 ; i<protocols->count ; i++)
{
Protocol *p1 = (Protocol*)protocols->list[i];
if (protocol_conformsToProtocol(p1, protocol))
{
return YES;
}
}
}
}
return NO;
}
static struct objc_protocol_method_description_list *
get_method_list(Protocol *p,
BOOL isRequiredMethod,
BOOL isInstanceMethod)
{
struct objc_protocol_method_description_list *list;
if (isRequiredMethod)
{
if (isInstanceMethod)
{
list = p->instance_methods;
}
else
{
list = p->class_methods;
}
}
else
{
if (!protocol_hasOptionalMethodsAndProperties(p)) { return NULL; }
if (isInstanceMethod)
{
list = p->optional_instance_methods;
}
else
{
list = p->optional_class_methods;
}
}
return list;
}
struct objc_method_description *protocol_copyMethodDescriptionList(Protocol *p,
BOOL isRequiredMethod, BOOL isInstanceMethod, unsigned int *count)
{
if ((NULL == p) || (NULL == count)){ return NULL; }
struct objc_protocol_method_description_list *list =
get_method_list(p, isRequiredMethod, isInstanceMethod);
*count = 0;
if (NULL == list || list->count == 0) { return NULL; }
*count = list->count;
struct objc_method_description *out =
calloc(sizeof(struct objc_method_description), list->count);
for (int i=0 ; i < (list->count) ; i++)
{
out[i].name = protocol_method_at_index(list, i)->selector;
out[i].types = sel_getType_np(protocol_method_at_index(list, i)->selector);
}
return out;
}
Protocol*__unsafe_unretained* protocol_copyProtocolList(Protocol *p, unsigned int *count)
{
if (NULL == p) { return NULL; }
*count = 0;
if (p->protocol_list == NULL || p->protocol_list->count ==0)
{
return NULL;
}
*count = p->protocol_list->count;
Protocol **out = calloc(sizeof(Protocol*), p->protocol_list->count);
for (int i=0 ; i<p->protocol_list->count ; i++)
{
out[i] = (Protocol*)p->protocol_list->list[i];
}
return out;
}
objc_property_t *protocol_copyPropertyList2(Protocol *p, unsigned int *outCount,
BOOL isRequiredProperty, BOOL isInstanceProperty)
{
struct objc_property_list *properties =
isInstanceProperty ?
(isRequiredProperty ? p->properties : p->optional_properties) :
(isRequiredProperty ? p->class_properties : p->optional_class_properties);
if (NULL == p) { return NULL; }
// If it's an old protocol, it won't have any of the other options.
if (!isRequiredProperty && !isInstanceProperty &&
!protocol_hasOptionalMethodsAndProperties(p))
{
return NULL;
}
if (properties == NULL)
{
return NULL;
}
unsigned int count = 0;
for (struct objc_property_list *l=properties ; l!=NULL ; l=l->next)
{
count += l->count;
}
if (0 == count)
{
return NULL;
}
objc_property_t *list = calloc(sizeof(objc_property_t), count);
unsigned int out = 0;
for (struct objc_property_list *l=properties ; l!=NULL ; l=l->next)
{
for (int i=0 ; i<l->count ; i++)
{
list[out++] = property_at_index(l, i);
}
}
*outCount = count;
return list;
}
objc_property_t *protocol_copyPropertyList(Protocol *p,
unsigned int *outCount)
{
return protocol_copyPropertyList2(p, outCount, YES, YES);
}
objc_property_t protocol_getProperty(Protocol *p,
const char *name,
BOOL isRequiredProperty,
BOOL isInstanceProperty)
{
if (NULL == p) { return NULL; }
if (!protocol_hasOptionalMethodsAndProperties(p))
{
return NULL;
}
if (!isInstanceProperty && !protocol_hasClassProperties(p))
{
return NULL;
}
struct objc_property_list *properties =
isInstanceProperty ?
(isRequiredProperty ? p->properties : p->optional_properties) :
(isRequiredProperty ? p->class_properties : p->optional_class_properties);
while (NULL != properties)
{
for (int i=0 ; i<properties->count ; i++)
{
objc_property_t prop = property_at_index(properties, i);
if (strcmp(property_getName(prop), name) == 0)
{
return prop;
}
}
properties = properties->next;
}
return NULL;
}
static struct objc_protocol_method_description *
get_method_description(Protocol *p,
SEL aSel,
BOOL isRequiredMethod,
BOOL isInstanceMethod)
{
struct objc_protocol_method_description_list *list =
get_method_list(p, isRequiredMethod, isInstanceMethod);
if (NULL == list)
{
return NULL;
}
for (int i=0 ; i<list->count ; i++)
{
SEL s = protocol_method_at_index(list, i)->selector;
if (sel_isEqual(s, aSel))
{
return protocol_method_at_index(list, i);
}
}
return NULL;
}
struct objc_method_description
protocol_getMethodDescription(Protocol *p,
SEL aSel,
BOOL isRequiredMethod,
BOOL isInstanceMethod)
{
struct objc_method_description d = {0,0};
struct objc_protocol_method_description *m =
get_method_description(p, aSel, isRequiredMethod, isInstanceMethod);
if (m != NULL)
{
SEL s = m->selector;
d.name = s;
d.types = sel_getType_np(s);
}
return d;
}
const char *_protocol_getMethodTypeEncoding(Protocol *p,
SEL aSel,
BOOL isRequiredMethod,
BOOL isInstanceMethod)
{
struct objc_protocol_method_description *m =
get_method_description(p, aSel, isRequiredMethod, isInstanceMethod);
if (m != NULL)
{
return m->types;
}
return NULL;
}
const char *protocol_getName(Protocol *p)
{
if (NULL != p)
{
return p->name;
}
return NULL;
}
BOOL protocol_isEqual(Protocol *p, Protocol *other)
{
if (NULL == p || NULL == other)
{
return NO;
}
if (p == other ||
p->name == other->name ||
0 == strcmp(p->name, other->name))
{
return YES;
}
return NO;
}
Protocol*__unsafe_unretained* objc_copyProtocolList(unsigned int *outCount)
{
LOCK_FOR_SCOPE(&protocol_table_lock);
unsigned int total = known_protocol_table->table_used;
Protocol **p = calloc(sizeof(Protocol*), known_protocol_table->table_used);
struct protocol_table_enumerator *e = NULL;
Protocol *next;
unsigned int count = 0;
while ((count < total) && (next = protocol_next(known_protocol_table, &e)))
{
p[count++] = next;
}
if (NULL != outCount)
{
*outCount = total;
}
return p;
}
Protocol *objc_allocateProtocol(const char *name)
{
if (objc_getProtocol(name) != NULL) { return NULL; }
// Create this as an object and add extra space at the end for the properties.
Protocol *p = (Protocol*)class_createInstance((Class)incompleteProtocolClass(),
sizeof(struct objc_protocol) - sizeof(id));
p->name = strdup(name);
return p;
}
void objc_registerProtocol(Protocol *proto)
{
if (NULL == proto) { return; }
LOCK_FOR_SCOPE(&protocol_table_lock);
if (objc_getProtocol(proto->name) != NULL) { return; }
if (incompleteProtocolClass() != proto->isa) { return; }
init_protocol_classes();
proto->isa = protocol_class_gsv2;
protocol_table_insert(proto);
}
PRIVATE void registerProtocol(Protocol *proto)
{
init_protocol_classes();
LOCK_FOR_SCOPE(&protocol_table_lock);
proto->isa = protocol_class_gsv2;
if (protocol_for_name(proto->name) == NULL)
{
protocol_table_insert(proto);
}
}
void protocol_addMethodDescription(Protocol *aProtocol,
SEL name,
const char *types,
BOOL isRequiredMethod,
BOOL isInstanceMethod)
{
if ((NULL == aProtocol) || (NULL == name) || (NULL == types)) { return; }
if (incompleteProtocolClass() != aProtocol->isa) { return; }
struct objc_protocol_method_description_list **listPtr;
if (isInstanceMethod)
{
if (isRequiredMethod)
{
listPtr = &aProtocol->instance_methods;
}
else
{
listPtr = &aProtocol->optional_instance_methods;
}
}
else
{
if (isRequiredMethod)
{
listPtr = &aProtocol->class_methods;
}
else
{
listPtr = &aProtocol->optional_class_methods;
}
}
if (NULL == *listPtr)
{
// FIXME: Factor this out, we do the same thing in multiple places.
*listPtr = calloc(1, sizeof(struct objc_protocol_method_description_list) +
sizeof(struct objc_protocol_method_description));
(*listPtr)->count = 1;
(*listPtr)->size = sizeof(struct objc_protocol_method_description);
}
else
{
(*listPtr)->count++;
*listPtr = realloc(*listPtr, sizeof(struct objc_protocol_method_description_list) +
sizeof(struct objc_protocol_method_description) * (*listPtr)->count);
}
struct objc_protocol_method_description_list *list = *listPtr;
int index = list->count-1;
protocol_method_at_index(list, index)->selector = sel_registerTypedName_np(sel_getName(name), types);
protocol_method_at_index(list, index)->types = types;
}
void protocol_addProtocol(Protocol *aProtocol, Protocol *addition)
{
if ((NULL == aProtocol) || (NULL == addition)) { return; }
if (incompleteProtocolClass() != aProtocol->isa) { return; }
if (NULL == aProtocol->protocol_list)
{
aProtocol->protocol_list = calloc(1, sizeof(struct objc_property_list) + sizeof(Protocol*));
aProtocol->protocol_list->count = 1;
}
else
{
aProtocol->protocol_list->count++;
aProtocol->protocol_list = realloc(aProtocol->protocol_list, sizeof(struct objc_property_list) +
aProtocol->protocol_list->count * sizeof(Protocol*));
}
aProtocol->protocol_list->list[aProtocol->protocol_list->count-1] = (Protocol*)addition;
}
void protocol_addProperty(Protocol *aProtocol,
const char *name,
const objc_property_attribute_t *attributes,
unsigned int attributeCount,
BOOL isRequiredProperty,
BOOL isInstanceProperty)
{
if ((NULL == aProtocol) || (NULL == name)) { return; }
if (incompleteProtocolClass() != aProtocol->isa) { return; }
if (!isInstanceProperty) { return; }
struct objc_property_list **listPtr =
isInstanceProperty ?
(isRequiredProperty ? &aProtocol->properties : &aProtocol->optional_properties) :
(isRequiredProperty ? &aProtocol->class_properties : &aProtocol->optional_class_properties);
if (NULL == *listPtr)
{
*listPtr = calloc(1, sizeof(struct objc_property_list) + sizeof(struct objc_property));
(*listPtr)->size = sizeof(struct objc_property);
(*listPtr)->count = 1;
}
else
{
(*listPtr)->count++;
*listPtr = realloc(*listPtr, sizeof(struct objc_property_list) +
sizeof(struct objc_property) * (*listPtr)->count);
}
struct objc_property_list *list = *listPtr;
int index = list->count-1;
struct objc_property p = propertyFromAttrs(attributes, attributeCount, name);
assert(list->size == sizeof(p));
memcpy(&(list->properties[index]), &p, sizeof(p));
}

@ -0,0 +1,849 @@
#include "objc/runtime.h"
#include "selector.h"
#include "class.h"
#include "protocol.h"
#include "ivar.h"
#include "method.h"
#include "lock.h"
#include "dtable.h"
#include "gc_ops.h"
/* Make glibc export strdup() */
#if defined __GLIBC__
#define __USE_BSD 1
#endif
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define CHECK_ARG(arg) if (0 == arg) { return 0; }
static inline void safe_remove_from_subclass_list(Class cls);
PRIVATE void objc_resolve_class(Class);
void objc_send_initialize(id object);
/**
* Calls C++ destructors in the correct order.
*/
PRIVATE void call_cxx_destruct(id obj)
{
static SEL cxx_destruct;
if (NULL == cxx_destruct)
{
cxx_destruct = sel_registerName(".cxx_destruct");
}
// Don't call object_getClass(), because we want to get hidden classes too
Class cls = classForObject(obj);
while (cls)
{
// If we're deallocating a class with a hidden class, then the
// `.cxx_destruct` method may deallocate the class.
Class currentClass = cls;
cls = cls->super_class;
if (currentClass->cxx_destruct)
{
currentClass->cxx_destruct(obj, cxx_destruct);
}
}
}
static void call_cxx_construct_for_class(Class cls, id obj)
{
static SEL cxx_construct;
if (NULL == cxx_construct)
{
cxx_construct = sel_registerName(".cxx_construct");
}
if (cls->super_class)
{
call_cxx_construct_for_class(cls->super_class, obj);
}
if (cls->cxx_construct)
{
cls->cxx_construct(obj, cxx_construct);
}
}
PRIVATE void call_cxx_construct(id obj)
{
call_cxx_construct_for_class(classForObject(obj), obj);
}
/**
* Looks up the instance method in a specific class, without recursing into
* superclasses.
*/
static Method class_getInstanceMethodNonrecursive(Class aClass, SEL aSelector)
{
for (struct objc_method_list *methods = aClass->methods;
methods != NULL ; methods = methods->next)
{
for (int i=0 ; i<methods->count ; i++)
{
Method method = method_at_index(methods, i);
if (sel_isEqual(method->selector, aSelector))
{
return method;
}
}
}
return NULL;
}
BOOL class_addIvar(Class cls, const char *name, size_t size, uint8_t alignment,
const char *types)
{
CHECK_ARG(cls);
CHECK_ARG(name);
CHECK_ARG(types);
// You can't add ivars to initialized classes. Note: We can't use the
// resolved flag here because class_getInstanceVariable() sets it.
if (objc_test_class_flag(cls, objc_class_flag_initialized))
{
return NO;
}
if (class_getInstanceVariable(cls, name) != NULL)
{
return NO;
}
struct objc_ivar_list *ivarlist = cls->ivars;
if (NULL == ivarlist)
{
cls->ivars = malloc(sizeof(struct objc_ivar_list) + sizeof(struct objc_ivar));
cls->ivars->size = sizeof(struct objc_ivar);
cls->ivars->count = 1;
}
else
{
ivarlist->count++;
// objc_ivar_list contains one ivar. Others follow it.
cls->ivars = realloc(ivarlist, sizeof(struct objc_ivar_list) +
(ivarlist->count) * sizeof(struct objc_ivar));
}
Ivar ivar = ivar_at_index(cls->ivars, cls->ivars->count - 1);
ivar->name = strdup(name);
ivar->type = strdup(types);
ivarSetAlign(ivar, alignment);
// Round up the offset of the ivar so it is correctly aligned.
long offset = cls->instance_size;
if (alignment != 0)
{
offset >>= alignment;
if (offset << alignment != cls->instance_size)
{
offset++;
}
offset <<= alignment;
}
ivar->offset = (int*)(uintptr_t)offset;
// Increase the instance size to make space for this.
cls->instance_size = offset + size;
return YES;
}
BOOL class_addMethod(Class cls, SEL name, IMP imp, const char *types)
{
CHECK_ARG(cls);
CHECK_ARG(name);
CHECK_ARG(imp);
CHECK_ARG(types);
const char *methodName = sel_getName(name);
struct objc_method_list *methods;
for (methods=cls->methods; methods!=NULL ; methods=methods->next)
{
for (int i=0 ; i<methods->count ; i++)
{
Method method = method_at_index(methods, i);
if (strcmp(sel_getName(method->selector), methodName) == 0)
{
return NO;
}
}
}
methods = malloc(sizeof(struct objc_method_list) + sizeof(struct objc_method));
methods->next = cls->methods;
methods->size = sizeof(struct objc_method);
cls->methods = methods;
methods->count = 1;
struct objc_method *m0 = method_at_index(methods, 0);
m0->selector = sel_registerTypedName_np(methodName, types);
m0->types = strdup(types);
m0->imp = imp;
if (classHasDtable(cls))
{
add_method_list_to_class(cls, methods);
}
return YES;
}
BOOL class_addProtocol(Class cls, Protocol *protocol)
{
CHECK_ARG(cls);
CHECK_ARG(protocol);
if (class_conformsToProtocol(cls, protocol)) { return NO; }
struct objc_protocol_list *protocols =
malloc(sizeof(struct objc_protocol_list) + sizeof(Protocol*));
if (protocols == NULL) { return NO; }
protocols->next = cls->protocols;
protocols->count = 1;
protocols->list[0] = protocol;
cls->protocols = protocols;
return YES;
}
Ivar * class_copyIvarList(Class cls, unsigned int *outCount)
{
if (outCount != NULL)
{
*outCount = 0x0;
}
CHECK_ARG(cls);
struct objc_ivar_list *ivarlist = NULL;
unsigned int count = 0;
unsigned int index;
Ivar *list;
if (Nil != cls)
{
ivarlist = cls->ivars;
}
if (ivarlist != NULL)
{
count = ivarlist->count;
}
if (outCount != NULL)
{
*outCount = count;
}
if (count == 0)
{
return NULL;
}
list = malloc((count + 1) * sizeof(struct objc_ivar *));
list[count] = NULL;
count = 0;
for (index = 0; index < ivarlist->count; index++)
{
list[count++] = ivar_at_index(ivarlist, index);
}
return list;
}
Method * class_copyMethodList(Class cls, unsigned int *outCount)
{
if (outCount != NULL)
{
*outCount = 0x0;
}
CHECK_ARG(cls);
unsigned int count = 0;
Method *list;
struct objc_method_list *methods;
if (cls != NULL)
{
for (methods = cls->methods; methods != NULL; methods = methods->next)
{
count += methods->count;
}
}
if (outCount != NULL)
{
*outCount = count;
}
if (count == 0)
{
return NULL;
}
list = malloc((count + 1) * sizeof(struct objc_method *));
list[count] = NULL;
count = 0;
for (methods = cls->methods; methods != NULL; methods = methods->next)
{
unsigned int index;
for (index = 0; index < methods->count; index++)
{
list[count++] = method_at_index(methods, index);
}
}
return list;
}
Protocol*__unsafe_unretained* class_copyProtocolList(Class cls, unsigned int *outCount)
{
if (outCount != NULL)
{
*outCount = 0x0;
}
CHECK_ARG(cls);
struct objc_protocol_list *protocolList = NULL;
struct objc_protocol_list *list;
unsigned int count = 0;
Protocol **protocols;
if (Nil != cls)
{
protocolList = cls->protocols;
}
for (list = protocolList; list != NULL; list = list->next)
{
count += list->count;
}
if (outCount != NULL)
{
*outCount = count;
}
if (count == 0)
{
return NULL;
}
protocols = malloc((count + 1) * sizeof(Protocol *));
protocols[count] = NULL;
count = 0;
for (list = protocolList; list != NULL; list = list->next)
{
memcpy(&protocols[count], list->list, list->count * sizeof(Protocol *));
count += list->count;
}
return protocols;
}
id class_createInstance(Class cls, size_t extraBytes)
{
CHECK_ARG(cls);
if (sizeof(id) == 4)
{
if (cls == SmallObjectClasses[0])
{
return (id)1;
}
}
else
{
for (int i=0 ; i<4 ; i++)
{
if (cls == SmallObjectClasses[i])
{
return (id)(uintptr_t)((i<<1)+1);
}
}
}
if (Nil == cls) { return nil; }
// Don't try to allocate an object of size 0, because there's no space for
// its isa pointer!
if (cls->instance_size < sizeof(Class)) { return nil; }
id obj = gc->allocate_class(cls, extraBytes);
obj->isa = cls;
checkARCAccessorsSlow(cls);
call_cxx_construct(obj);
return obj;
}
id object_copy(id obj, size_t size)
{
Class cls = object_getClass(obj);
id cpy = class_createInstance(cls, size - class_getInstanceSize(cls));
memcpy(((char*)cpy + sizeof(id)), ((char*)obj + sizeof(id)), size - sizeof(id));
return cpy;
}
id object_dispose(id obj)
{
call_cxx_destruct(obj);
gc->free_object(obj);
return nil;
}
Method class_getInstanceMethod(Class aClass, SEL aSelector)
{
CHECK_ARG(aClass);
CHECK_ARG(aSelector);
// If the class has a dtable installed, then we can use the fast path
if (classHasInstalledDtable(aClass))
{
// Do a dtable lookup to find out which class the method comes from.
struct objc_slot2 *slot = objc_get_slot2(aClass, aSelector, NULL);
if (NULL == slot)
{
slot = objc_get_slot2(aClass, sel_registerName(sel_getName(aSelector)), NULL);
if (NULL == slot)
{
return NULL;
}
}
// Slots are the same as methods.
return (struct objc_method*)slot;
}
Method m = class_getInstanceMethodNonrecursive(aClass, aSelector);
if (NULL != m)
{
return m;
}
return class_getInstanceMethod(class_getSuperclass(aClass), aSelector);
}
Method class_getClassMethod(Class aClass, SEL aSelector)
{
return class_getInstanceMethod(object_getClass((id)aClass), aSelector);
}
Ivar class_getClassVariable(Class cls, const char* name)
{
// Note: We don't have compiler support for cvars in ObjC
return class_getInstanceVariable(object_getClass((id)cls), name);
}
size_t class_getInstanceSize(Class cls)
{
if (Nil == cls) { return 0; }
return cls->instance_size;
}
Ivar class_getInstanceVariable(Class cls, const char *name)
{
if (name != NULL)
{
while (cls != Nil)
{
struct objc_ivar_list *ivarlist = cls->ivars;
if (ivarlist != NULL)
{
for (int i = 0; i < ivarlist->count; i++)
{
Ivar ivar = ivar_at_index(ivarlist, i);
if (strcmp(ivar->name, name) == 0)
{
return ivar;
}
}
}
cls = class_getSuperclass(cls);
}
}
return NULL;
}
// The format of the char* is undocumented. This function is only ever used in
// conjunction with class_setIvarLayout().
const char *class_getIvarLayout(Class cls)
{
CHECK_ARG(cls);
return (char*)cls->ivars;
}
const char * class_getName(Class cls)
{
if (Nil == cls) { return "nil"; }
return cls->name;
}
int class_getVersion(Class theClass)
{
CHECK_ARG(theClass);
return theClass->version;
}
const char *class_getWeakIvarLayout(Class cls)
{
assert(0 && "Weak ivars not supported");
return NULL;
}
BOOL class_isMetaClass(Class cls)
{
CHECK_ARG(cls);
return objc_test_class_flag(cls, objc_class_flag_meta);
}
IMP class_replaceMethod(Class cls, SEL name, IMP imp, const char *types)
{
if (Nil == cls) { return (IMP)0; }
SEL sel = sel_registerTypedName_np(sel_getName(name), types);
Method method = class_getInstanceMethodNonrecursive(cls, sel);
if (method == NULL)
{
class_addMethod(cls, sel, imp, types);
return NULL;
}
IMP old = (IMP)method->imp;
method->imp = imp;
return old;
}
void class_setIvarLayout(Class cls, const char *layout)
{
if ((Nil == cls) || (NULL == layout)) { return; }
struct objc_ivar_list *list = (struct objc_ivar_list*)layout;
size_t listsize = sizeof(struct objc_ivar_list) +
sizeof(struct objc_ivar) * (list->count);
cls->ivars = malloc(listsize);
memcpy(cls->ivars, list, listsize);
}
__attribute__((deprecated))
Class class_setSuperclass(Class cls, Class newSuper)
{
CHECK_ARG(cls);
CHECK_ARG(newSuper);
Class oldSuper;
if (Nil == cls) { return Nil; }
{
LOCK_RUNTIME_FOR_SCOPE();
oldSuper = cls->super_class;
if (oldSuper == newSuper) { return newSuper; }
safe_remove_from_subclass_list(cls);
objc_resolve_class(newSuper);
cls->super_class = newSuper;
// The super class's subclass list is used in certain method resolution scenarios.
cls->sibling_class = cls->super_class->subclass_list;
cls->super_class->subclass_list = cls;
if (UNLIKELY(class_isMetaClass(cls)))
{
// newSuper is presumably a metaclass. Its isa will therefore be the appropriate root metaclass.
cls->isa = newSuper->isa;
}
else
{
Class meta = cls->isa, newSuperMeta = newSuper->isa;
// Update the metaclass's superclass.
safe_remove_from_subclass_list(meta);
objc_resolve_class(newSuperMeta);
meta->super_class = newSuperMeta;
meta->isa = newSuperMeta->isa;
// The super class's subclass list is used in certain method resolution scenarios.
meta->sibling_class = newSuperMeta->subclass_list;
newSuperMeta->subclass_list = meta;
}
LOCK_FOR_SCOPE(&initialize_lock);
if (!objc_test_class_flag(cls, objc_class_flag_initialized))
{
// Uninitialized classes don't have dtables to update
// and don't need their superclasses initialized.
return oldSuper;
}
}
objc_send_initialize((id)newSuper); // also initializes the metaclass
objc_update_dtable_for_new_superclass(cls->isa, newSuper->isa);
objc_update_dtable_for_new_superclass(cls, newSuper);
return oldSuper;
}
void class_setVersion(Class theClass, int version)
{
if (Nil == theClass) { return; }
theClass->version = version;
}
void class_setWeakIvarLayout(Class cls, const char *layout)
{
assert(0 && "Not implemented");
}
const char * ivar_getName(Ivar ivar)
{
CHECK_ARG(ivar);
return ivar->name;
}
ptrdiff_t ivar_getOffset(Ivar ivar)
{
CHECK_ARG(ivar);
return *ivar->offset;
}
const char * ivar_getTypeEncoding(Ivar ivar)
{
CHECK_ARG(ivar);
return ivar->type;
}
void method_exchangeImplementations(Method m1, Method m2)
{
if (NULL == m1 || NULL == m2) { return; }
IMP tmp = (IMP)m1->imp;
m1->imp = m2->imp;
m2->imp = tmp;
}
IMP method_getImplementation(Method method)
{
if (NULL == method) { return (IMP)NULL; }
return (IMP)method->imp;
}
SEL method_getName(Method method)
{
if (NULL == method) { return (SEL)NULL; }
return (SEL)method->selector;
}
IMP method_setImplementation(Method method, IMP imp)
{
if (NULL == method) { return (IMP)NULL; }
IMP old = (IMP)method->imp;
method->imp = imp;
return old;
}
id objc_getRequiredClass(const char *name)
{
CHECK_ARG(name);
id cls = objc_getClass(name);
if (nil == cls)
{
abort();
}
return cls;
}
PRIVATE void freeMethodLists(Class aClass)
{
struct objc_method_list *methods = aClass->methods;
while(methods != NULL)
{
for (int i=0 ; i<methods->count ; i++)
{
free((void*)method_at_index(methods, i)->types);
}
struct objc_method_list *current = methods;
methods = methods->next;
free(current);
}
}
PRIVATE void freeIvarLists(Class aClass)
{
struct objc_ivar_list *ivarlist = aClass->ivars;
if (NULL == ivarlist) { return; }
if (ivarlist->count > 0)
{
// For dynamically created classes, ivar offset variables are allocated
// as a contiguous range starting with the first one.
free(ivar_at_index(ivarlist, 0)->offset);
}
for (int i=0 ; i<ivarlist->count ; i++)
{
Ivar ivar = ivar_at_index(ivarlist, i);
free((void*)ivar->type);
free((void*)ivar->name);
}
free(ivarlist);
}
/*
* Removes a class from the subclass list found on its super class.
* Must be called with the objc runtime mutex locked.
*/
static inline void safe_remove_from_subclass_list(Class cls)
{
// If this class hasn't been added to the class hierarchy, then this is easy
if (!objc_test_class_flag(cls, objc_class_flag_resolved)) { return; }
Class sub = cls->super_class->subclass_list;
if (sub == cls)
{
cls->super_class->subclass_list = cls->sibling_class;
}
else
{
while (sub != NULL)
{
if (sub->sibling_class == cls)
{
sub->sibling_class = cls->sibling_class;
break;
}
sub = sub->sibling_class;
}
}
}
void objc_disposeClassPair(Class cls)
{
if (0 == cls) { return; }
Class meta = ((id)cls)->isa;
// Remove from the runtime system so nothing tries updating the dtable
// while we are freeing the class.
{
LOCK_RUNTIME_FOR_SCOPE();
safe_remove_from_subclass_list(meta);
safe_remove_from_subclass_list(cls);
class_table_remove(cls);
}
// Free the method and ivar lists.
freeMethodLists(cls);
freeMethodLists(meta);
freeIvarLists(cls);
if (cls->dtable != uninstalled_dtable)
{
free_dtable(cls->dtable);
}
if (meta->dtable != uninstalled_dtable)
{
free_dtable(meta->dtable);
}
// Free the class and metaclass
gc->free(meta);
gc->free(cls);
}
Class objc_allocateClassPair(Class superclass, const char *name, size_t extraBytes)
{
// Check the class doesn't already exist.
if (nil != objc_lookUpClass(name)) { return Nil; }
Class newClass = gc->malloc(sizeof(struct objc_class) + extraBytes);
if (Nil == newClass) { return Nil; }
// Create the metaclass
Class metaClass = gc->malloc(sizeof(struct objc_class));
if (Nil == superclass)
{
/*
* Metaclasses of root classes are precious little flowers and work a
* little differently:
*/
metaClass->isa = metaClass;
metaClass->super_class = newClass;
}
else
{
// Initialize the metaclass
// Set the meta-metaclass pointer to the name. The runtime will fix this
// in objc_resolve_class().
// If the superclass is not yet resolved, then we need to look it up
// via the class table.
metaClass->isa = superclass->isa;
metaClass->super_class = superclass->isa;
}
metaClass->name = strdup(name);
metaClass->info = objc_class_flag_meta | objc_class_flag_user_created;
metaClass->dtable = uninstalled_dtable;
metaClass->instance_size = sizeof(struct objc_class);
// Set up the new class
newClass->isa = metaClass;
newClass->super_class = superclass;
newClass->name = strdup(name);
newClass->info = objc_class_flag_user_created;
newClass->dtable = uninstalled_dtable;
newClass->abi_version = 2;
metaClass->abi_version = 2;
if (Nil == superclass)
{
newClass->instance_size = sizeof(struct objc_class*);
}
else
{
newClass->instance_size = superclass->instance_size;
}
return newClass;
}
void *object_getIndexedIvars(id obj)
{
CHECK_ARG(obj);
size_t size = classForObject(obj)->instance_size;
if ((0 == size) && class_isMetaClass(classForObject(obj)))
{
size = sizeof(struct objc_class);
}
return ((char*)obj) + size;
}
Class object_getClass(id obj)
{
CHECK_ARG(obj);
Class isa = classForObject(obj);
while ((Nil != isa) && objc_test_class_flag(isa, objc_class_flag_hidden_class))
{
isa = isa->super_class;
}
return isa;
}
Class object_setClass(id obj, Class cls)
{
CHECK_ARG(obj);
// If this is a small object, then don't set its class.
if (isSmallObject(obj)) { return classForObject(obj); }
Class oldClass = obj->isa;
obj->isa = cls;
return oldClass;
}
const char *object_getClassName(id obj)
{
CHECK_ARG(obj);
return class_getName(object_getClass(obj));
}
void objc_registerClassPair(Class cls)
{
if (cls->ivars != NULL)
{
int *ptrs = calloc(cls->ivars->count, sizeof(int));
for (int i=0 ; i<cls->ivars->count ; i++)
{
ptrs[i] = (int)(intptr_t)ivar_at_index(cls->ivars, i)->offset;
ivar_at_index(cls->ivars, i)->offset = &ptrs[i];
}
}
LOCK_RUNTIME_FOR_SCOPE();
class_table_insert(cls);
objc_resolve_class(cls);
}

@ -0,0 +1,254 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include "sarray2.h"
#include "visibility.h"
const static SparseArray EmptyArray = { 0, 0, .data[0 ... 255] = 0 };
const static SparseArray EmptyArray8 = { 8, 0, .data[0 ... 255] = (void*)&EmptyArray};
const static SparseArray EmptyArray16 = { 16, 0, .data[0 ... 255] = (void*)&EmptyArray8};
const static SparseArray EmptyArray24 = { 24, 0, .data[0 ... 255] = (void*)&EmptyArray16};
#define MAX_INDEX(sarray) (0xff)
// Tweak this value to trade speed for memory usage. Bigger values use more
// memory, but give faster lookups.
#define base_shift 8
#define base_mask ((1<<base_shift) - 1)
static void *EmptyChildForShift(uint32_t shift)
{
switch(shift)
{
default: UNREACHABLE("Broken sparse array");
case 8:
return (void*)&EmptyArray;
case 16:
return (void*)&EmptyArray8;
case 24:
return (void*)&EmptyArray16;
}
}
static void init_pointers(SparseArray * sarray)
{
if(sarray->shift != 0)
{
void *data = EmptyChildForShift(sarray->shift);
for(unsigned i=0 ; i<=MAX_INDEX(sarray) ; i++)
{
sarray->data[i] = data;
}
}
}
PRIVATE SparseArray * SparseArrayNewWithDepth(uint32_t depth)
{
SparseArray * sarray = calloc(1, sizeof(SparseArray));
sarray->refCount = 1;
sarray->shift = depth-base_shift;
init_pointers(sarray);
return sarray;
}
PRIVATE SparseArray *SparseArrayNew()
{
return SparseArrayNewWithDepth(32);
}
PRIVATE SparseArray *SparseArrayExpandingArray(SparseArray *sarray, uint32_t new_depth)
{
if (new_depth == sarray->shift)
{
return sarray;
}
assert(new_depth > sarray->shift);
// Expanding a child sarray has undefined results.
assert(sarray->refCount == 1);
SparseArray *new = calloc(1, sizeof(SparseArray));
new->refCount = 1;
new->shift = sarray->shift + 8;
new->data[0] = sarray;
void *data = EmptyChildForShift(new->shift);
for(unsigned i=1 ; i<=MAX_INDEX(sarray) ; i++)
{
new->data[i] = data;
}
// Now, any lookup in sarray for any value less than its capacity will have
// all non-zero values shifted away, resulting in 0. All lookups will
// therefore go to the new sarray.
return new;
}
static void *SparseArrayFind(SparseArray * sarray, uint32_t * index)
{
uint32_t j = MASK_INDEX((*index));
uint32_t max = MAX_INDEX(sarray);
if (sarray->shift == 0)
{
while (j<=max)
{
if (sarray->data[j] != SARRAY_EMPTY)
{
return sarray->data[j];
}
(*index)++;
j++;
}
}
else while (j<max)
{
// If the shift is not 0, then we need to recursively look at child
// nodes.
uint32_t zeromask = ~((0xff << sarray->shift) >> base_shift);
while (j<max)
{
//Look in child nodes
SparseArray *child = sarray->data[j];
// Skip over known-empty children
if ((&EmptyArray == child) ||
(&EmptyArray8 == child) ||
(&EmptyArray16 == child) ||
(&EmptyArray24 == child))
{
//Add 2^n to index so j is still correct
(*index) += 1<<sarray->shift;
//Zero off the next component of the index so we don't miss any.
*index &= zeromask;
}
else
{
// The recursive call will set index to the correct value for
// the next index, but won't update j
void * ret = SparseArrayFind(child, index);
if (ret != SARRAY_EMPTY)
{
return ret;
}
}
//Go to the next child
j++;
}
}
return SARRAY_EMPTY;
}
PRIVATE void *SparseArrayNext(SparseArray * sarray, uint32_t * idx)
{
(*idx)++;
return SparseArrayFind(sarray, idx);
}
PRIVATE void SparseArrayInsert(SparseArray * sarray, uint32_t index, void *value)
{
if (sarray->shift > 0)
{
uint32_t i = MASK_INDEX(index);
SparseArray *child = sarray->data[i];
if ((&EmptyArray == child) ||
(&EmptyArray8 == child) ||
(&EmptyArray16 == child) ||
(&EmptyArray24 == child))
{
// Insert missing nodes
SparseArray * newsarray = calloc(1, sizeof(SparseArray));
newsarray->refCount = 1;
if (base_shift >= sarray->shift)
{
newsarray->shift = 0;
}
else
{
newsarray->shift = sarray->shift - base_shift;
}
init_pointers(newsarray);
sarray->data[i] = newsarray;
child = newsarray;
}
else if (child->refCount > 1)
{
// Copy the copy-on-write part of the tree
sarray->data[i] = SparseArrayCopy(child);
SparseArrayDestroy(child);
child = sarray->data[i];
}
SparseArrayInsert(child, index, value);
}
else
{
sarray->data[MASK_INDEX(index)] = value;
}
}
PRIVATE SparseArray *SparseArrayCopy(SparseArray * sarray)
{
SparseArray *copy = calloc(sizeof(SparseArray), 1);
memcpy(copy, sarray, sizeof(SparseArray));
copy->refCount = 1;
// If the sarray has children, increase their refcounts and link them
if (sarray->shift > 0)
{
for (unsigned int i = 0 ; i<=MAX_INDEX(sarray); i++)
{
SparseArray *child = copy->data[i];
if (!(child == &EmptyArray ||
child == &EmptyArray8 ||
child == &EmptyArray16 ||
child == &EmptyArray24))
{
__sync_fetch_and_add(&child->refCount, 1);
}
// Non-lazy copy. Uncomment if debugging
// copy->data[i] = SparseArrayCopy(copy->data[i]);
}
}
return copy;
}
PRIVATE void SparseArrayDestroy(SparseArray * sarray)
{
// Don't really delete this sarray if its ref count is > 0
if (sarray == &EmptyArray ||
sarray == &EmptyArray8 ||
sarray == &EmptyArray16 ||
sarray == &EmptyArray24 ||
(__sync_sub_and_fetch(&sarray->refCount, 1) > 0))
{
return;
}
if(sarray->shift > 0)
{
for(uint32_t i=0 ; i<data_size ; i++)
{
SparseArrayDestroy((SparseArray*)sarray->data[i]);
}
}
free(sarray);
}
#if 0
// Unused function, but helpful when debugging.
PRIVATE int SparseArraySize(SparseArray *sarray)
{
int size = 0;
if (sarray->shift == 0)
{
return 256*sizeof(void*) + sizeof(SparseArray);
}
size += 256*sizeof(void*) + sizeof(SparseArray);
for(unsigned i=0 ; i<=MAX_INDEX(sarray) ; i++)
{
SparseArray *child = sarray->data[i];
if (child == &EmptyArray ||
child == &EmptyArray8 ||
child == &EmptyArray16)
{
continue;
}
size += SparseArraySize(child);
}
return size;
}
#endif

@ -0,0 +1,741 @@
/**
* Handle selector uniquing.
*
* When building, you may define TYPE_DEPENDENT_DISPATCH to enable message
* sends to depend on their types.
*/
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <ctype.h>
#include <vector>
#include <mutex>
#include <forward_list>
#include <tsl/robin_set.h>
#include "class.h"
#include "lock.h"
#include "method.h"
#include "objc/runtime.h"
#include "pool.hh"
#include "selector.h"
#include "string_hash.h"
#include "visibility.h"
#ifdef TYPE_DEPENDENT_DISPATCH
# define TDD(x) x
#else
# define TDD(x)
#endif
namespace {
/**
* Type representing a selector that has not been registered with the runtime.
*
* This is used only for looking up entries in the selector table, it is never
* stored.
*/
struct UnregisteredSelector
{
/// The selector name.
const char *name;
/// The type encoding of the selector.
const char *types;
};
/**
* Class for holding the name and list of types for a selector. With
* type-dependent dispatch, we store all of the types that we've seen for each
* selector name alongside the untyped variant of the selector. When a
* selector is registered with the runtime, its name is replaced with the UID
* (dtable index) used for dispatch and we use the first element of the types
* list to store the name.
*
* In the common case, this will have 1-2 entries.
*/
struct TypeList : public std::forward_list<const char*>
{
/// The superclass type.
using Super = std::forward_list<const char*>;
/// Inherit constructors
using Super::forward_list;
/// Get the name of the selector represented by this list
const char *name()
{
return front();
}
/**
* Begin iterator. This skips the name and returns an iterator to the
* first type.
*/
auto begin()
{
return ++(std::forward_list<const char*>::begin());
}
/**
* Add a type. The order of types is not defined and so, for simplicity,
* we store new ones immediately after the name element.
*/
void add_types(const char *types)
{
// Types cannot be added to an empty type list, a name is the first element.
assert(!empty());
insert_after(Super::begin(), types);
}
};
/**
* Mapping from selector numbers to selector names, followed by types.
*
* Note: This must be a pointer so that we do not hit issues with
*/
std::vector<TypeList> *selector_list;
/**
* Lock protecting the selector table.
*/
RecursiveMutex selector_table_lock;
/// Type to use as a lock guard
using LockGuard = std::lock_guard<decltype(selector_table_lock)>;
inline TypeList *selLookup_locked(uint32_t idx)
{
if (idx >= selector_list->size())
{
return nullptr;
}
return &(*selector_list)[idx];
}
inline TypeList *selLookup(uint32_t idx)
{
LockGuard g{selector_table_lock};
return selLookup_locked(idx);
}
BOOL isSelRegistered(SEL sel)
{
if (sel->index < selector_list->size())
{
return YES;
}
return NO;
}
/// Gets the name of a registered selector.
const char *sel_getNameRegistered(SEL sel)
{
const char *name = sel->name;
return selLookup_locked(sel->index)->name();
}
/**
* Gets the name of a selector that might not have been registered. This
* should be used only on legacy-ABI compatibility code paths.
*/
const char *sel_getNameNonUnique(SEL sel)
{
const char *name = sel->name;
if (isSelRegistered(sel))
{
auto* list = selLookup_locked(sel->index);
name = (list == nullptr) ? nullptr : list->name();
}
if (nullptr == name)
{
name = "";
}
return name;
}
/**
* Skip anything in a type encoding that is irrelevant to the comparison
* between selectors, including type qualifiers and argframe info.
*/
static const char *skip_irrelevant_type_info(const char *t)
{
switch (*t)
{
default: return t;
case 'r': case 'n': case 'N': case 'o': case 'O': case 'R':
case 'V': case 'A': case '!': case '0'...'9':
return skip_irrelevant_type_info(t+1);
}
}
static BOOL selector_types_equal(const char *t1, const char *t2)
{
if (t1 == nullptr || t2 == nullptr) { return t1 == t2; }
while (('\0' != *t1) && ('\0' != *t2))
{
t1 = skip_irrelevant_type_info(t1);
t2 = skip_irrelevant_type_info(t2);
// This is a really ugly hack. For some stupid reason, the people
// designing Objective-C type encodings decided to allow * as a
// shorthand for char*, because strings are 'special'. Unfortunately,
// FSF GCC generates "*" for @encode(BOOL*), while Clang and Apple GCC
// generate "^c" or "^C" (depending on whether BOOL is declared
// unsigned).
//
// The correct fix is to remove * completely from type encodings, but
// unfortunately my time machine is broken so I can't travel to 1986
// and apply a cluebat to those responsible.
if ((*t1 == '*') && (*t2 != '*'))
{
if (*t2 == '^' && (((*(t2+1) == 'C') || (*(t2+1) == 'c'))))
{
t2++;
}
else
{
return NO;
}
}
else if ((*t2 == '*') && (*t1 != '*'))
{
if (*t1 == '^' && (((*(t1+1) == 'C') || (*(t1+1) == 'c'))))
{
t1++;
}
else
{
return NO;
}
}
else if (*t1 != *t2)
{
return NO;
}
if ('\0' != *t1) { t1++; }
if ('\0' != *t2) { t2++; }
}
return YES;
}
#ifdef TYPE_DEPENDENT_DISPATCH
static BOOL selector_types_equivalent(const char *t1, const char *t2)
{
// We always treat untyped selectors as having the same type as typed
// selectors, for dispatch purposes.
if (t1 == nullptr || t2 == nullptr) { return YES; }
return selector_types_equal(t1, t2);
}
#endif
/**
* Compare selectors based on whether they are treated as equivalent for the
* purpose of dispatch.
*/
struct SelectorEqual
{
/// Opt into heterogeneous lookup
using is_transparent = void;
/// Compare two registered selectors
bool operator()(const SEL a, const SEL b) const
{
#ifdef TYPE_DEPENDENT_DISPATCH
return string_compare(sel_getNameRegistered(a), sel_getNameRegistered(b)) &&
selector_types_equal(sel_getType_np(a), sel_getType_np(b));
#else
return string_compare(sel_getNameRegistered(a), sel_getNameRegistered(b));
#endif
}
/// Compare an unregistered and registered selector
bool operator()(const UnregisteredSelector &a, const SEL b) const
{
#ifdef TYPE_DEPENDENT_DISPATCH
return string_compare(a.name, sel_getNameRegistered(b)) &&
selector_types_equal(a.types, sel_getType_np(b));
#else
return string_compare(a.name, sel_getNameRegistered(b));
#endif
}
/// Compare a registered and unregistered selector
bool operator()(const SEL b, const UnregisteredSelector &a) const
{
return (*this)(a, b);
}
};
/**
* Compare whether two selectors are identical.
*/
static int selector_identical(const UnregisteredSelector &key,
const SEL value)
{
return SelectorEqual{}(key, value);
}
/**
* Hash a selector.
*/
struct SelectorHash
{
size_t hash(const char *name, const char *types) const
{
size_t hash = 5381;
const char *str = name;
size_t c;
while((c = (size_t)*str++))
{
hash = hash * 33 + c;
}
#ifdef TYPE_DEPENDENT_DISPATCH
// We can't use all of the values in the type encoding for the hash,
// because our equality test is a bit more complex than simple string
// encoding (for example, * and ^C have to be considered equivalent, since
// they are both used as encodings for C strings in different situations)
if ((str = types))
{
while((c = (size_t)*str++))
{
switch (c)
{
case '@': case 'i': case 'I': case 'l': case 'L':
case 'q': case 'Q': case 's': case 'S':
hash = hash * 33 + c;
}
}
}
#endif
return hash;
}
size_t operator()(objc_selector *sel) const
{
return hash(sel_getNameNonUnique(sel), sel_getType_np(sel));
}
size_t operator()(const UnregisteredSelector &sel) const
{
return hash(sel.name, sel.types);
}
};
using SelectorAllocator = PoolAllocate<objc_selector>;
using SelectorTable = tsl::robin_set<objc_selector*, SelectorHash, SelectorEqual>;
/**
* Table of registered selector. Maps from selector to selector.
*/
static SelectorTable *selector_table;
static int selector_name_copies;
}
extern "C" PRIVATE void log_selector_memory_usage(void)
{
#if 0
fprintf(stderr, "%lu bytes in selector name list.\n", (unsigned long)(table_size * sizeof(void*)));
fprintf(stderr, "%d bytes in selector names.\n", selector_name_copies);
fprintf(stderr, "%d bytes (%d entries) in selector hash table.\n", (int)(sel_table->table_size *
sizeof(struct selector_table_cell_struct)), sel_table->table_size);
fprintf(stderr, "%d selectors registered.\n", selector_count);
fprintf(stderr, "%d hash table cells per selector (%.2f%% full)\n", sel_table->table_size / selector_count, ((float)selector_count) / sel_table->table_size * 100);
#endif
}
/**
* Resizes the dtables to ensure that they can store as many selectors as
* exist.
*/
extern "C" void objc_resize_dtables(uint32_t);
/**
* Create data structures to store selectors.
*/
extern "C" PRIVATE void init_selector_tables()
{
selector_list = new std::vector<TypeList>(1<<16);
selector_table = new SelectorTable(1024);
selector_table_lock.init();
}
static SEL selector_lookup(const char *name, const char *types)
{
UnregisteredSelector sel = {name, types};
LockGuard g{selector_table_lock};
auto result = selector_table->find(sel);
return (result == selector_table->end()) ? nullptr : *result;
}
static inline void add_selector_to_table(SEL aSel)
{
// Store the name at the head of the list.
if (selector_list->capacity() == selector_list->size())
{
selector_list->reserve(selector_list->capacity() * 2);
}
selector_list->push_back({aSel->name});
// Set the selector's name to the uid.
aSel->index = selector_list->size() - 1;
// Store the selector in the set.
selector_table->insert(aSel);
}
/**
* Really registers a selector. Must be called with the selector table locked.
*/
static inline void register_selector_locked(SEL aSel)
{
if (aSel->name == nullptr)
{
return;
}
if (nullptr == aSel->types)
{
add_selector_to_table(aSel);
objc_resize_dtables(selector_list->size());
return;
}
SEL untyped = selector_lookup(aSel->name, 0);
// If this has a type encoding, store the untyped version too.
if (untyped == nullptr)
{
untyped = SelectorAllocator::allocate();
untyped->name = aSel->name;
untyped->types = 0;
add_selector_to_table(untyped);
}
else
{
// Make sure we only store one copy of the name
aSel->name = sel_getNameNonUnique(untyped);
}
add_selector_to_table(aSel);
// Add this set of types to the list.
if (aSel->types)
{
(*selector_list)[aSel->index].add_types(aSel->types);
TDD((*selector_list)[untyped->index].add_types(aSel->types));
}
objc_resize_dtables(selector_list->size());
}
/**
* Registers a selector. This assumes that the argument is never deallocated.
*/
extern "C" PRIVATE SEL objc_register_selector(SEL aSel)
{
if (isSelRegistered(aSel))
{
return aSel;
}
UnregisteredSelector unregistered{aSel->name, aSel->types};
// Check that this isn't already registered, before we try
SEL registered = selector_lookup(aSel->name, aSel->types);
SelectorEqual eq;
if (nullptr != registered && eq(unregistered, registered))
{
aSel->name = registered->name;
return registered;
}
assert(!(aSel->types && (strstr(aSel->types, "@\"") != nullptr)));
LockGuard g{selector_table_lock};
register_selector_locked(aSel);
return aSel;
}
/**
* Registers a selector by copying the argument.
*/
SEL objc_register_selector_copy(UnregisteredSelector &aSel, BOOL copyArgs)
{
// If an identical selector is already registered, return it.
SEL copy = selector_lookup(aSel.name, aSel.types);
if ((nullptr != copy) && selector_identical(aSel, copy))
{
return copy;
}
LockGuard g{selector_table_lock};
copy = selector_lookup(aSel.name, aSel.types);
if (nullptr != copy && selector_identical(aSel, copy))
{
return copy;
}
assert(!(aSel.types && (strstr(aSel.types, "@\"") != nullptr)));
// Create a copy of this selector.
copy = SelectorAllocator::allocate();
copy->name = aSel.name;
copy->types = (nullptr == aSel.types) ? nullptr : aSel.types;
if (copyArgs)
{
SEL untyped = selector_lookup(aSel.name, 0);
if (untyped != nullptr)
{
copy->name = sel_getName(untyped);
}
else
{
copy->name = strdup(aSel.name);
if (copy->name == nullptr)
{
fprintf(stderr, "Failed to allocate memory for selector %s\n", aSel.name);
abort();
}
assert(copy->name);
selector_name_copies += strlen(copy->name);
}
if (copy->types != nullptr)
{
copy->types = strdup(copy->types);
if (copy->name == nullptr)
{
fprintf(stderr, "Failed to allocate memory for selector %s\n", aSel.name);
abort();
}
selector_name_copies += strlen(copy->types);
}
}
// Try to register the copy as the authoritative version
register_selector_locked(copy);
return copy;
}
/**
* Public API functions.
*/
extern "C"
{
const char *sel_getName(SEL sel)
{
if (nullptr == sel) { return "<null selector>"; }
auto list = selLookup(sel->index);
return (list == nullptr) ? "" : list->front();
}
SEL sel_getUid(const char *selName)
{
return sel_registerName(selName);
}
BOOL sel_isEqual(SEL sel1, SEL sel2)
{
if ((0 == sel1) || (0 == sel2))
{
return sel1 == sel2;
}
if (sel1->name == sel2->name)
{
return YES;
}
// Otherwise, do a slow compare
return string_compare(sel_getNameNonUnique(sel1), sel_getNameNonUnique(sel2)) TDD(&&
(sel1->types == nullptr || sel2->types == nullptr ||
selector_types_equivalent(sel_getType_np(sel1), sel_getType_np(sel2))));
}
SEL sel_registerName(const char *selName)
{
if (nullptr == selName) { return nullptr; }
UnregisteredSelector sel = {selName, nullptr};
return objc_register_selector_copy(sel, YES);
}
SEL sel_registerTypedName_np(const char *selName, const char *types)
{
if (nullptr == selName) { return nullptr; }
UnregisteredSelector sel = {selName, types};
return objc_register_selector_copy(sel, YES);
}
const char *sel_getType_np(SEL aSel)
{
if (nullptr == aSel) { return nullptr; }
return aSel->types;
}
unsigned sel_copyTypes_np(const char *selName, const char **types, unsigned count)
{
if (nullptr == selName) { return 0; }
SEL untyped = selector_lookup(selName, 0);
if (untyped == nullptr) { return 0; }
auto *l = selLookup(untyped->index);
if (l == nullptr)
{
return 0;
}
if (count == 0)
{
for (auto type : *l)
{
count++;
}
return count;
}
unsigned found = 0;
for (auto type : *l)
{
if (found < count)
{
types[found] = type;
}
found++;
}
return found;
}
unsigned sel_copyTypedSelectors_np(const char *selName, SEL *const sels, unsigned count)
{
if (nullptr == selName) { return 0; }
SEL untyped = selector_lookup(selName, 0);
if (untyped == nullptr) { return 0; }
auto *l = selLookup(untyped->index);
if (l == nullptr)
{
return 0;
}
if (count == 0)
{
for (auto type : *l)
{
count++;
}
return count;
}
unsigned found = 0;
for (auto type : *l)
{
if (found > count)
{
break;
}
sels[found++] = selector_lookup(selName, type);
}
return found;
}
extern "C" PRIVATE void objc_register_selectors_from_list(struct objc_method_list *l)
{
for (int i=0 ; i<l->count ; i++)
{
Method m = method_at_index(l, i);
UnregisteredSelector sel{(const char*)m->selector, m->types};
m->selector = objc_register_selector_copy(sel, NO);
}
}
/**
* Register all of the (unregistered) selectors that are used in a class.
*/
extern "C" PRIVATE void objc_register_selectors_from_class(Class aClass)
{
for (struct objc_method_list *l=aClass->methods ; nullptr!=l ; l=l->next)
{
objc_register_selectors_from_list(l);
}
}
extern "C" PRIVATE void objc_register_selector_array(SEL selectors, unsigned long count)
{
// GCC is broken and always sets the count to 0, so we ignore count until
// we can throw stupid and buggy compilers in the bin.
for (unsigned long i=0 ; (nullptr != selectors[i].name) ; i++)
{
objc_register_selector(&selectors[i]);
}
}
/**
* Legacy GNU runtime compatibility.
*
* All of the functions in this section are deprecated and should not be used
* in new code.
*/
#ifndef NO_LEGACY
SEL sel_get_typed_uid (const char *name, const char *types)
{
if (nullptr == name) { return nullptr; }
SEL sel = selector_lookup(name, types);
if (nullptr == sel) { return sel_registerTypedName_np(name, types); }
struct sel_type_list *l = selLookup(sel->index);
// Skip the head, which just contains the name, not the types.
l = l->next;
if (nullptr != l)
{
sel = selector_lookup(name, l->value);
}
return sel;
}
SEL sel_get_any_typed_uid (const char *name)
{
if (nullptr == name) { return nullptr; }
SEL sel = selector_lookup(name, 0);
if (nullptr == sel) { return sel_registerName(name); }
struct sel_type_list *l = selLookup(sel->index);
// Skip the head, which just contains the name, not the types.
l = l->next;
if (nullptr != l)
{
sel = selector_lookup(name, l->value);
}
return sel;
}
SEL sel_get_any_uid (const char *name)
{
return selector_lookup(name, 0);
}
SEL sel_get_uid(const char *name)
{
return selector_lookup(name, 0);
}
const char *sel_get_name(SEL selector)
{
return sel_getNameNonUnique(selector);
}
BOOL sel_is_mapped(SEL selector)
{
return isSelRegistered(selector);
}
const char *sel_get_type(SEL selector)
{
return sel_getType_np(selector);
}
SEL sel_register_name(const char *name)
{
return sel_registerName(name);
}
SEL sel_register_typed_name(const char *name, const char *type)
{
return sel_registerTypedName_np(name, type);
}
BOOL sel_eq(SEL s1, SEL s2)
{
return sel_isEqual(s1, s2);
}
#endif // NO_LEGACY
}

@ -0,0 +1,498 @@
#include "objc/runtime.h"
#include "lock.h"
#include "dtable.h"
#include "selector.h"
#include "loader.h"
#include "objc/hooks.h"
#include <stdint.h>
#include <stdio.h>
#ifndef __clang__
#define NO_SAFE_CACHING
#endif
void objc_send_initialize(id object);
static long long nil_method(id self, SEL _cmd) { return 0; }
static long double nil_method_D(id self, SEL _cmd) { return 0; }
static double nil_method_d(id self, SEL _cmd) { return 0; }
static float nil_method_f(id self, SEL _cmd) { return 0; }
static struct objc_slot nil_slot_v1 = { Nil, Nil, 0, 1, (IMP)nil_method };
static struct objc_slot nil_slot_D_v1 = { Nil, Nil, 0, 1, (IMP)nil_method_D };
static struct objc_slot nil_slot_d_v1 = { Nil, Nil, 0, 1, (IMP)nil_method_d };
static struct objc_slot nil_slot_f_v1 = { Nil, Nil, 0, 1, (IMP)nil_method_f };
static struct objc_method nil_slot = { (IMP)nil_method, NULL, NULL };
static struct objc_method nil_slot_D = { (IMP)nil_method_D, NULL, NULL };
static struct objc_method nil_slot_d = { (IMP)nil_method_d, NULL, NULL };
static struct objc_method nil_slot_f = { (IMP)nil_method_f, NULL, NULL };
static struct objc_slot2* objc_slot_lookup(id *receiver, SEL selector);
// Default implementations of the two new hooks. Return NULL.
static id objc_proxy_lookup_null(id receiver, SEL op) { return nil; }
static struct objc_slot *objc_msg_forward3_null(id receiver, SEL op) { return &nil_slot_v1; }
id (*objc_proxy_lookup)(id receiver, SEL op) = objc_proxy_lookup_null;
struct objc_slot *(*__objc_msg_forward3)(id receiver, SEL op) = objc_msg_forward3_null;
static IMP forward2(id self, SEL _cmd)
{
return __objc_msg_forward3(self, _cmd)->method;
}
IMP (*__objc_msg_forward2)(id, SEL) = forward2;
__thread struct objc_method uncacheable_slot = { (IMP)nil_method, NULL, NULL };
__thread struct objc_slot uncacheable_slot_v1 = { Nil, Nil, 0, 0, (IMP)nil_method };
#ifndef NO_SELECTOR_MISMATCH_WARNINGS
static IMP objc_selector_type_mismatch(Class cls, SEL
selector, struct objc_slot2 *result)
{
fprintf(stderr, "Calling [%s %c%s] with incorrect signature. "
"Method has %s (%s), selector has %s\n",
cls->name,
class_isMetaClass(cls) ? '+' : '-',
sel_getName(selector),
sel_getType_np(((struct objc_method*)result)->selector),
((struct objc_method*)result)->types,
sel_getType_np(selector));
return result->method;
}
#else
static IMP objc_selector_type_mismatch(Class cls, SEL
selector, struct objc_slot2 *result)
{
return result->method;
}
#endif
IMP (*_objc_selector_type_mismatch2)(Class cls, SEL
selector, struct objc_slot2 *result) = objc_selector_type_mismatch;
struct objc_slot *(*_objc_selector_type_mismatch)(Class cls, SEL
selector, struct objc_slot *result);
static IMP call_mismatch_hook(Class cls, SEL sel, struct objc_slot2 *slot)
{
if (_objc_selector_type_mismatch &&
(!_objc_selector_type_mismatch2 ||
(_objc_selector_type_mismatch2 == objc_selector_type_mismatch)))
{
struct objc_slot fwdslot;
fwdslot.types = ((struct objc_method*)slot)->types;
fwdslot.selector = sel;
fwdslot.method = slot->method;
struct objc_slot *slot_v1 = _objc_selector_type_mismatch(cls, sel, &uncacheable_slot_v1);
return slot_v1->method;
}
return _objc_selector_type_mismatch2(cls, sel, slot);
}
static
// Uncomment for debugging
//__attribute__((noinline))
__attribute__((always_inline))
struct objc_slot2 *objc_msg_lookup_internal(id *receiver, SEL selector, uint64_t *version)
{
if (version)
{
#ifdef NO_SAFE_CACHING
// Always write 0 to version, marking the slot as uncacheable.
*version = 0;
#else
*version = objc_method_cache_version;
#endif
}
Class class = classForObject((*receiver));
retry:;
struct objc_slot2 * result = objc_dtable_lookup(class->dtable, selector->index);
if (UNLIKELY(0 == result))
{
dtable_t dtable = dtable_for_class(class);
/* Install the dtable if it hasn't already been initialized. */
if (dtable == uninstalled_dtable)
{
objc_send_initialize(*receiver);
dtable = dtable_for_class(class);
result = objc_dtable_lookup(dtable, selector->index);
}
else
{
// Check again incase another thread updated the dtable while we
// weren't looking
result = objc_dtable_lookup(dtable, selector->index);
}
if (0 == result)
{
if ((result = objc_dtable_lookup(dtable, get_untyped_idx(selector))))
{
#ifndef NO_SAFE_CACHING
if (version)
{
*version = 0;
}
#endif
uncacheable_slot.imp = call_mismatch_hook(class, selector, result);
result = (struct objc_slot2*)&uncacheable_slot;
}
id newReceiver = objc_proxy_lookup(*receiver, selector);
// If some other library wants us to play forwarding games, try
// again with the new object.
if (nil != newReceiver)
{
*receiver = newReceiver;
return objc_slot_lookup(receiver, selector);
}
if (0 == result)
{
#ifndef NO_SAFE_CACHING
if (version)
{
*version = 0;
}
#endif
uncacheable_slot.imp = __objc_msg_forward2(*receiver, selector);
result = (struct objc_slot2*)&uncacheable_slot;
}
}
}
return result;
}
PRIVATE IMP slowMsgLookup(id *receiver, SEL cmd)
{
// By the time we've got here, the assembly version of this function has
// already done the nil checks.
return objc_msg_lookup_internal(receiver, cmd, NULL)->method;
}
PRIVATE void logInt(void *a)
{
fprintf(stderr, "Value: %p\n", a);
}
/**
* New Objective-C lookup function. This permits the lookup to modify the
* receiver and also supports multi-dimensional dispatch based on the sender.
*/
struct objc_slot *objc_msg_lookup_sender(id *receiver, SEL selector, id sender)
{
// Returning a nil slot allows the caller to cache the lookup for nil too,
// although this is not particularly useful because the nil method can be
// inlined trivially.
if (UNLIKELY(*receiver == nil))
{
// Return the correct kind of zero, depending on the type encoding.
if (selector->types)
{
const char *t = selector->types;
// Skip type qualifiers
while ('r' == *t || 'n' == *t || 'N' == *t || 'o' == *t ||
'O' == *t || 'R' == *t || 'V' == *t || 'A' == *t)
{
t++;
}
switch (selector->types[0])
{
case 'D': return &nil_slot_D_v1;
case 'd': return &nil_slot_d_v1;
case 'f': return &nil_slot_f_v1;
}
}
return &nil_slot_v1;
}
struct objc_slot2 *slot = objc_msg_lookup_internal(receiver, selector, NULL);
uncacheable_slot_v1.owner = Nil;
uncacheable_slot_v1.types = sel_getType_np(((struct objc_method*)slot)->selector);
uncacheable_slot_v1.selector = selector;
uncacheable_slot_v1.method = slot->method;
return &uncacheable_slot_v1;
}
static struct objc_slot2* objc_slot_lookup(id *receiver, SEL selector)
{
// Returning a nil slot allows the caller to cache the lookup for nil too,
// although this is not particularly useful because the nil method can be
// inlined trivially.
if (UNLIKELY(*receiver == nil))
{
// Return the correct kind of zero, depending on the type encoding.
if (selector->types)
{
const char *t = selector->types;
// Skip type qualifiers
while ('r' == *t || 'n' == *t || 'N' == *t || 'o' == *t ||
'O' == *t || 'R' == *t || 'V' == *t || 'A' == *t)
{
t++;
}
switch (selector->types[0])
{
case 'D': return (struct objc_slot2*)&nil_slot_D;
case 'd': return (struct objc_slot2*)&nil_slot_d;
case 'f': return (struct objc_slot2*)&nil_slot_f;
}
}
return (struct objc_slot2*)&nil_slot;
}
return objc_msg_lookup_internal(receiver, selector, NULL);
}
struct objc_slot2 *objc_slot_lookup_version(id *receiver, SEL selector, uint64_t *version)
{
// Returning a nil slot allows the caller to cache the lookup for nil too,
// although this is not particularly useful because the nil method can be
// inlined trivially.
if (UNLIKELY(*receiver == nil))
{
#ifndef NO_SAFE_CACHING
if (version)
{
*version = 0;
}
#endif
// Return the correct kind of zero, depending on the type encoding.
if (selector->types)
{
const char *t = selector->types;
// Skip type qualifiers
while ('r' == *t || 'n' == *t || 'N' == *t || 'o' == *t ||
'O' == *t || 'R' == *t || 'V' == *t || 'A' == *t)
{
t++;
}
switch (selector->types[0])
{
case 'D': return (struct objc_slot2*)&nil_slot_D;
case 'd': return (struct objc_slot2*)&nil_slot_d;
case 'f': return (struct objc_slot2*)&nil_slot_f;
}
}
return (struct objc_slot2*)&nil_slot;
}
return objc_msg_lookup_internal(receiver, selector, version);
}
IMP objc_msg_lookup2(id *receiver, SEL selector)
{
return objc_slot_lookup(receiver, selector)->method;
}
struct objc_slot2 *objc_slot_lookup_super2(struct objc_super *super, SEL selector)
{
id receiver = super->receiver;
if (receiver)
{
Class class = super->class;
struct objc_slot2 * result = objc_dtable_lookup(dtable_for_class(class),
selector->index);
if (0 == result)
{
Class class = classForObject(receiver);
// Dtable should always be installed in the superclass in
// Objective-C, but may not be for other languages (Python).
if (dtable_for_class(class) == uninstalled_dtable)
{
if (class_isMetaClass(class))
{
objc_send_initialize(receiver);
}
else
{
objc_send_initialize((id)class);
}
objc_send_initialize((id)class);
return objc_slot_lookup_super2(super, selector);
}
uncacheable_slot.imp = __objc_msg_forward2(receiver, selector);
return (struct objc_slot2*)&uncacheable_slot;
}
return result;
}
return (struct objc_slot2*)&nil_slot;
}
OBJC_PUBLIC
struct objc_slot *objc_slot_lookup_super(struct objc_super *super, SEL selector)
{
id receiver = super->receiver;
if (receiver)
{
Class class = super->class;
struct objc_slot2 * result = objc_dtable_lookup(dtable_for_class(class),
selector->index);
if (0 == result)
{
Class class = classForObject(receiver);
// Dtable should always be installed in the superclass in
// Objective-C, but may not be for other languages (Python).
if (dtable_for_class(class) == uninstalled_dtable)
{
if (class_isMetaClass(class))
{
objc_send_initialize(receiver);
}
else
{
objc_send_initialize((id)class);
}
objc_send_initialize((id)class);
return objc_slot_lookup_super(super, selector);
}
uncacheable_slot_v1.owner = Nil;
uncacheable_slot_v1.types = sel_getType_np(selector);
uncacheable_slot_v1.selector = selector;
uncacheable_slot_v1.method = __objc_msg_forward2(receiver, selector);
return &uncacheable_slot_v1;
}
uncacheable_slot_v1.owner = Nil;
uncacheable_slot_v1.types = sel_getType_np(((struct objc_method*)result)->selector);
uncacheable_slot_v1.selector = selector;
uncacheable_slot_v1.method = result->method;
return &uncacheable_slot_v1;
}
return &nil_slot_v1;
}
/**
* looks up a slot without invoking any forwarding mechanisms
*/
struct objc_slot2 *objc_get_slot2(Class cls, SEL selector, uint64_t *version)
{
#ifndef NO_SAFE_CACHING
if (version)
{
*version = objc_method_cache_version;
}
#endif
struct objc_slot2 * result = objc_dtable_lookup(cls->dtable, selector->index);
if (0 == result)
{
void *dtable = dtable_for_class(cls);
/* Install the dtable if it hasn't already been initialized. */
if (dtable == uninstalled_dtable)
{
dtable = dtable_for_class(cls);
result = objc_dtable_lookup(dtable, selector->index);
}
else
{
// Check again incase another thread updated the dtable while we
// weren't looking
result = objc_dtable_lookup(dtable, selector->index);
}
if (NULL == result)
{
if ((result = objc_dtable_lookup(dtable, get_untyped_idx(selector))))
{
#ifndef NO_SAFE_CACHING
if (version)
{
*version = 0;
}
#endif
uncacheable_slot.imp = call_mismatch_hook(cls, selector, result);
result = (struct objc_slot2*)&uncacheable_slot;
}
}
}
return result;
}
struct objc_slot *objc_get_slot(Class cls, SEL selector)
{
struct objc_slot2 *result = objc_get_slot2(cls, selector, NULL);
if (result == NULL)
{
return NULL;
}
uncacheable_slot_v1.owner = Nil;
// Don't leak extended type encodings!
uncacheable_slot_v1.types = sel_getType_np(((struct objc_method*)result)->selector);
uncacheable_slot_v1.selector = selector;
uncacheable_slot_v1.method = result->method;
return &uncacheable_slot_v1;
}
////////////////////////////////////////////////////////////////////////////////
// Public API
////////////////////////////////////////////////////////////////////////////////
BOOL class_respondsToSelector(Class cls, SEL selector)
{
if (0 == selector || 0 == cls) { return NO; }
return NULL != objc_get_slot2(cls, selector, NULL);
}
IMP class_getMethodImplementation(Class cls, SEL name)
{
if ((Nil == cls) || (NULL == name)) { return (IMP)0; }
struct objc_slot2 * slot = objc_get_slot2(cls, name, NULL);
return NULL != slot ? slot->method : __objc_msg_forward2(nil, name);
}
IMP class_getMethodImplementation_stret(Class cls, SEL name)
{
return class_getMethodImplementation(cls, name);
}
////////////////////////////////////////////////////////////////////////////////
// Legacy compatibility
////////////////////////////////////////////////////////////////////////////////
#ifndef NO_LEGACY
/**
* Legacy message lookup function.
*/
BOOL __objc_responds_to(id object, SEL sel)
{
return class_respondsToSelector(classForObject(object), sel);
}
IMP get_imp(Class cls, SEL selector)
{
return class_getMethodImplementation(cls, selector);
}
/**
* Message send function that only ever worked on a small subset of compiler /
* architecture combinations.
*/
void *objc_msg_sendv(void)
{
fprintf(stderr, "objc_msg_sendv() never worked correctly. Don't use it.\n");
abort();
}
#endif
/**
* Legacy message lookup function. Does not support fast proxies or safe IMP
* caching.
*/
IMP objc_msg_lookup(id receiver, SEL selector)
{
if (nil == receiver) { return (IMP)nil_method; }
id self = receiver;
struct objc_slot2 * slot = objc_msg_lookup_internal(&self, selector, NULL);
// If the receiver is changed by the lookup mechanism then we have to fall
// back to old-style forwarding.
if (self != receiver)
{
return __objc_msg_forward2(receiver, selector);
}
return slot->method;
}
IMP objc_msg_lookup_super(struct objc_super *super, SEL selector)
{
return objc_slot_lookup_super2(super, selector)->method;
}

@ -0,0 +1,72 @@
#include <string.h>
#include <stdio.h>
#include "objc/runtime.h"
#include "module.h"
#include "constant_string.h"
#include "visibility.h"
#define BUFFER_TYPE struct objc_static_instance_list *
#include "buffer.h"
static BOOL try_init_statics(struct objc_static_instance_list *statics)
{
const char *class_name = statics->class_name;
// This is a horrible hack.
//
// Very bad things happen when you have more than one constant string class
// used in a program. Unfortunately, GCC defaults to using
// NXConstantString, and if you forget to specify
// -fconstant-string-class=NSConstantString for some compilation units then
// you will end up with some NSConstantString instances and some
// NXConstantString instances. This is a mess. We hack around this by
// silently assuming that the user meant NSConstantString when they said
// NXConstantString if NSConstantString is set as the constant string class
// in string_class.h or by an external -D flag.
if (strcmp(class_name, "NXConstantString") == 0)
{
class_name = CONSTANT_STRING_CLASS;
}
Class class = (Class)objc_getClass(class_name);
if (Nil == class)
{
return NO;
}
for (id *instance=statics->instances ; nil!=*instance ; instance++)
{
(*instance)->isa = class;
}
return YES;
}
PRIVATE void objc_init_statics(struct objc_static_instance_list *statics)
{
if (!try_init_statics(statics))
{
set_buffered_object_at_index(statics, buffered_objects++);
}
}
PRIVATE void objc_init_buffered_statics(void)
{
BOOL shouldReshuffle = NO;
for (unsigned i=0 ; i<buffered_objects ; i++)
{
struct objc_static_instance_list *c = buffered_object_at_index(i);
if (NULL != c)
{
if (try_init_statics(c))
{
set_buffered_object_at_index(NULL, i);
shouldReshuffle = YES;
}
}
}
if (shouldReshuffle)
{
compact_buffer();
}
}
Loading…
Cancel
Save