mirror of
https://github.com/ruby/ruby.git
synced 2026-01-27 04:24:23 +00:00
Revert "gc.c: Pass shape_id to newobj_init"
This reverts commit 228d13f6ed914d1e7f6bd2416e3f5be8283be865. This commit makes default.c and mmtk.c depend on shape.h, which prevents them from building independently.
This commit is contained in:
parent
65995c22f8
commit
791acc5697
Notes:
git
2025-12-05 23:41:09 +00:00
5
gc.c
5
gc.c
@ -622,7 +622,7 @@ typedef struct gc_function_map {
|
||||
void (*stress_set)(void *objspace_ptr, VALUE flag);
|
||||
VALUE (*stress_get)(void *objspace_ptr);
|
||||
// Object allocation
|
||||
VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t alloc_size);
|
||||
VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
|
||||
size_t (*obj_slot_size)(VALUE obj);
|
||||
size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
|
||||
bool (*size_allocatable_p)(size_t size);
|
||||
@ -993,7 +993,8 @@ gc_validate_pc(VALUE obj)
|
||||
static inline VALUE
|
||||
newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
|
||||
{
|
||||
VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, shape_id, wb_protected, size);
|
||||
VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
|
||||
RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
|
||||
|
||||
gc_validate_pc(obj);
|
||||
|
||||
|
||||
@ -32,7 +32,6 @@
|
||||
#include "darray.h"
|
||||
#include "gc/gc.h"
|
||||
#include "gc/gc_impl.h"
|
||||
#include "shape.h"
|
||||
|
||||
#ifndef BUILDING_MODULAR_GC
|
||||
# include "probes.h"
|
||||
@ -2148,13 +2147,15 @@ rb_gc_impl_source_location_cstr(int *ptr)
|
||||
#endif
|
||||
|
||||
static inline VALUE
|
||||
newobj_init(VALUE klass, VALUE flags, shape_id_t shape_id, int wb_protected, rb_objspace_t *objspace, VALUE obj)
|
||||
newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
|
||||
{
|
||||
GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
|
||||
GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
|
||||
RBASIC(obj)->flags = flags;
|
||||
*((VALUE *)&RBASIC(obj)->klass) = klass;
|
||||
RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
|
||||
#if RBASIC_SHAPE_ID_FIELD
|
||||
RBASIC(obj)->shape_id = 0;
|
||||
#endif
|
||||
|
||||
int t = flags & RUBY_T_MASK;
|
||||
if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
|
||||
@ -2438,10 +2439,10 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t he
|
||||
return obj;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, shape_id_t shape_id, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx));
|
||||
ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx));
|
||||
|
||||
static inline VALUE
|
||||
newobj_slowpath(VALUE klass, VALUE flags, shape_id_t shape_id, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)
|
||||
newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)
|
||||
{
|
||||
VALUE obj;
|
||||
unsigned int lev;
|
||||
@ -2466,32 +2467,32 @@ newobj_slowpath(VALUE klass, VALUE flags, shape_id_t shape_id, rb_objspace_t *ob
|
||||
}
|
||||
|
||||
obj = newobj_alloc(objspace, cache, heap_idx, true);
|
||||
newobj_init(klass, flags, shape_id, wb_protected, objspace, obj);
|
||||
newobj_init(klass, flags, wb_protected, objspace, obj);
|
||||
}
|
||||
RB_GC_CR_UNLOCK(lev);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, shape_id_t shape_id,
|
||||
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
|
||||
rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
|
||||
NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, shape_id_t shape_id,
|
||||
NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
|
||||
rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
|
||||
|
||||
static VALUE
|
||||
newobj_slowpath_wb_protected(VALUE klass, VALUE flags, shape_id_t shape_id, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
|
||||
newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
|
||||
{
|
||||
return newobj_slowpath(klass, flags, shape_id, objspace, cache, TRUE, heap_idx);
|
||||
return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, shape_id_t shape_id, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
|
||||
newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
|
||||
{
|
||||
return newobj_slowpath(klass, flags, shape_id, objspace, cache, FALSE, heap_idx);
|
||||
return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx);
|
||||
}
|
||||
|
||||
VALUE
|
||||
rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t alloc_size)
|
||||
rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
|
||||
{
|
||||
VALUE obj;
|
||||
rb_objspace_t *objspace = objspace_ptr;
|
||||
@ -2512,14 +2513,14 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
|
||||
if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
|
||||
wb_protected) {
|
||||
obj = newobj_alloc(objspace, cache, heap_idx, false);
|
||||
newobj_init(klass, flags, shape_id, wb_protected, objspace, obj);
|
||||
newobj_init(klass, flags, wb_protected, objspace, obj);
|
||||
}
|
||||
else {
|
||||
RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
|
||||
|
||||
obj = wb_protected ?
|
||||
newobj_slowpath_wb_protected(klass, flags, shape_id, objspace, cache, heap_idx) :
|
||||
newobj_slowpath_wb_unprotected(klass, flags, shape_id, objspace, cache, heap_idx);
|
||||
newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) :
|
||||
newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx);
|
||||
}
|
||||
|
||||
return obj;
|
||||
|
||||
@ -55,7 +55,7 @@ GC_IMPL_FN VALUE rb_gc_impl_stress_get(void *objspace_ptr);
|
||||
GC_IMPL_FN VALUE rb_gc_impl_config_get(void *objspace_ptr);
|
||||
GC_IMPL_FN void rb_gc_impl_config_set(void *objspace_ptr, VALUE hash);
|
||||
// Object allocation
|
||||
GC_IMPL_FN VALUE rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, uint32_t /* shape_id_t */ shape_id, bool wb_protected, size_t alloc_size);
|
||||
GC_IMPL_FN VALUE rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
|
||||
GC_IMPL_FN size_t rb_gc_impl_obj_slot_size(VALUE obj);
|
||||
GC_IMPL_FN size_t rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size);
|
||||
GC_IMPL_FN bool rb_gc_impl_size_allocatable_p(size_t size);
|
||||
|
||||
@ -7,7 +7,6 @@
|
||||
|
||||
#include "gc/gc.h"
|
||||
#include "gc/gc_impl.h"
|
||||
#include "shape.h"
|
||||
#include "gc/mmtk/mmtk.h"
|
||||
|
||||
#include "ccan/list/list.h"
|
||||
@ -604,7 +603,7 @@ rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
|
||||
// Object allocation
|
||||
|
||||
VALUE
|
||||
rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t alloc_size)
|
||||
rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
|
||||
{
|
||||
#define MMTK_ALLOCATION_SEMANTICS_DEFAULT 0
|
||||
struct objspace *objspace = objspace_ptr;
|
||||
@ -626,7 +625,7 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
|
||||
VALUE *alloc_obj = mmtk_alloc(ractor_cache->mutator, alloc_size + 8, MMTk_MIN_OBJ_ALIGN, 0, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
|
||||
alloc_obj++;
|
||||
alloc_obj[-1] = alloc_size;
|
||||
alloc_obj[0] = RSHAPE_COMBINE_IN_FLAGS(flags, shape_id);;
|
||||
alloc_obj[0] = flags;
|
||||
alloc_obj[1] = klass;
|
||||
|
||||
mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size + 8, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
|
||||
|
||||
9
shape.h
9
shape.h
@ -162,12 +162,6 @@ RBASIC_SHAPE_ID_FOR_READ(VALUE obj)
|
||||
bool rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id);
|
||||
#endif
|
||||
|
||||
static inline VALUE
|
||||
RSHAPE_COMBINE_IN_FLAGS(VALUE flags, shape_id_t shape_id)
|
||||
{
|
||||
return (flags &SHAPE_FLAG_MASK) | (((VALUE)shape_id) << SHAPE_FLAG_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
RBASIC_SET_SHAPE_ID_NO_CHECKS(VALUE obj, shape_id_t shape_id)
|
||||
{
|
||||
@ -175,7 +169,8 @@ RBASIC_SET_SHAPE_ID_NO_CHECKS(VALUE obj, shape_id_t shape_id)
|
||||
RBASIC(obj)->shape_id = (VALUE)shape_id;
|
||||
#else
|
||||
// Object shapes are occupying top bits
|
||||
RBASIC(obj)->flags = RSHAPE_COMBINE_IN_FLAGS(RBASIC(obj)->flags, shape_id);
|
||||
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
|
||||
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user