mirror of
https://github.com/ruby/ruby.git
synced 2026-01-27 04:24:23 +00:00
Previously this held a pointer to the Fiber itself, which requires marking it (which was only implemented recently, prior to that it was buggy). Using a monotonically increasing integer instead allows us to avoid having a free function and keeps everything simpler. My main motivations in making this change are that the root fiber lazily allocates self, which makes the writebarrier implementation challenging to do correctly, and wanting to avoid sending Mutexes to the remembered set when locked by a short-lived Fiber.
90 lines
3.6 KiB
C
90 lines
3.6 KiB
C
#ifndef INTERNAL_ATOMIC_H
|
|
#define INTERNAL_ATOMIC_H
|
|
|
|
#include "ruby/atomic.h"
|
|
|
|
#define RUBY_ATOMIC_VALUE_LOAD(x) rbimpl_atomic_value_load(&(x), RBIMPL_ATOMIC_SEQ_CST)
|
|
|
|
/* shim macros only */
|
|
#define ATOMIC_ADD(var, val) RUBY_ATOMIC_ADD(var, val)
|
|
#define ATOMIC_CAS(var, oldval, newval) RUBY_ATOMIC_CAS(var, oldval, newval)
|
|
#define ATOMIC_DEC(var) RUBY_ATOMIC_DEC(var)
|
|
#define ATOMIC_EXCHANGE(var, val) RUBY_ATOMIC_EXCHANGE(var, val)
|
|
#define ATOMIC_FETCH_ADD(var, val) RUBY_ATOMIC_FETCH_ADD(var, val)
|
|
#define ATOMIC_FETCH_SUB(var, val) RUBY_ATOMIC_FETCH_SUB(var, val)
|
|
#define ATOMIC_INC(var) RUBY_ATOMIC_INC(var)
|
|
#define ATOMIC_OR(var, val) RUBY_ATOMIC_OR(var, val)
|
|
#define ATOMIC_PTR_CAS(var, oldval, newval) RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
|
|
#define ATOMIC_PTR_EXCHANGE(var, val) RUBY_ATOMIC_PTR_EXCHANGE(var, val)
|
|
#define ATOMIC_SET(var, val) RUBY_ATOMIC_SET(var, val)
|
|
#define ATOMIC_SIZE_ADD(var, val) RUBY_ATOMIC_SIZE_ADD(var, val)
|
|
#define ATOMIC_SIZE_CAS(var, oldval, newval) RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
|
|
#define ATOMIC_SIZE_DEC(var) RUBY_ATOMIC_SIZE_DEC(var)
|
|
#define ATOMIC_SIZE_EXCHANGE(var, val) RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
|
|
#define ATOMIC_SIZE_INC(var) RUBY_ATOMIC_SIZE_INC(var)
|
|
#define ATOMIC_SIZE_SUB(var, val) RUBY_ATOMIC_SIZE_SUB(var, val)
|
|
#define ATOMIC_SUB(var, val) RUBY_ATOMIC_SUB(var, val)
|
|
#define ATOMIC_VALUE_CAS(var, oldval, val) RUBY_ATOMIC_VALUE_CAS(var, oldval, val)
|
|
#define ATOMIC_VALUE_EXCHANGE(var, val) RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
|
|
|
|
#define ATOMIC_LOAD_RELAXED(var) rbimpl_atomic_load(&(var), RBIMPL_ATOMIC_RELAXED)
|
|
|
|
typedef RBIMPL_ALIGNAS(8) uint64_t rbimpl_atomic_uint64_t;
|
|
|
|
static inline uint64_t
|
|
rbimpl_atomic_u64_load_relaxed(const volatile rbimpl_atomic_uint64_t *value)
|
|
{
|
|
#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
|
|
return __atomic_load_n(value, __ATOMIC_RELAXED);
|
|
#elif defined(_WIN32)
|
|
uint64_t val = *value;
|
|
return InterlockedCompareExchange64(RBIMPL_CAST((uint64_t *)value), val, val);
|
|
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
|
|
uint64_t val = *value;
|
|
return atomic_cas_64(value, val, val);
|
|
#else
|
|
return *value;
|
|
#endif
|
|
}
|
|
#define ATOMIC_U64_LOAD_RELAXED(var) rbimpl_atomic_u64_load_relaxed(&(var))
|
|
|
|
static inline void
|
|
rbimpl_atomic_u64_set_relaxed(volatile rbimpl_atomic_uint64_t *address, uint64_t value)
|
|
{
|
|
#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
|
|
__atomic_store_n(address, value, __ATOMIC_RELAXED);
|
|
#elif defined(_WIN32)
|
|
InterlockedExchange64(address, value);
|
|
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
|
|
atomic_swap_64(address, value);
|
|
#else
|
|
*address = value;
|
|
#endif
|
|
}
|
|
#define ATOMIC_U64_SET_RELAXED(var, val) rbimpl_atomic_u64_set_relaxed(&(var), val)
|
|
|
|
static inline uint64_t
|
|
rbimpl_atomic_u64_fetch_add(volatile rbimpl_atomic_uint64_t *ptr, uint64_t val)
|
|
{
|
|
#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
|
|
return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
|
|
#elif defined(_WIN32)
|
|
return InterlockedExchangeAdd64((volatile LONG64 *)ptr, val);
|
|
#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
|
|
return atomic_add_64_nv(ptr, val) - val;
|
|
#elif defined(HAVE_STDATOMIC_H)
|
|
return atomic_fetch_add_explicit((_Atomic uint64_t *)ptr, val, memory_order_seq_cst);
|
|
#else
|
|
// Fallback using mutex for platforms without 64-bit atomics
|
|
static rb_native_mutex_t lock = RB_NATIVE_MUTEX_INITIALIZER;
|
|
rb_native_mutex_lock(&lock);
|
|
uint64_t old = *ptr;
|
|
*ptr = old + val;
|
|
rb_native_mutex_unlock(&lock);
|
|
return old;
|
|
#endif
|
|
}
|
|
#define ATOMIC_U64_FETCH_ADD(var, val) rbimpl_atomic_u64_fetch_add(&(var), val)
|
|
|
|
#endif
|