mirror of
https://github.com/ruby/ruby.git
synced 2026-01-26 12:14:51 +00:00
Use is_obj_encoding instead of is_data_encoding
The argument to `is_data_encoding` is assumed to be `T_DATA`.
This commit is contained in:
parent
dbfedeb3a3
commit
b27d9353a7
@ -125,8 +125,9 @@ static const rb_data_type_t encoding_data_type = {
|
||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
||||
};
|
||||
|
||||
#define is_data_encoding(obj) (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj) == &encoding_data_type)
|
||||
#define is_obj_encoding(obj) (RB_TYPE_P((obj), T_DATA) && is_data_encoding(obj))
|
||||
#define is_encoding_type(obj) (RTYPEDDATA_TYPE(obj) == &encoding_data_type)
|
||||
#define is_data_encoding(obj) (rbimpl_rtypeddata_p(obj) && is_encoding_type(obj))
|
||||
#define is_obj_encoding(obj) (rbimpl_obj_typeddata_p(obj) && is_encoding_type(obj))
|
||||
|
||||
int
|
||||
rb_data_is_encoding(VALUE obj)
|
||||
@ -1345,7 +1346,7 @@ enc_inspect(VALUE self)
|
||||
{
|
||||
rb_encoding *enc;
|
||||
|
||||
if (!is_data_encoding(self)) {
|
||||
if (!is_obj_encoding(self)) { /* do not resolve autoload */
|
||||
not_encoding(self);
|
||||
}
|
||||
if (!(enc = RTYPEDDATA_GET_DATA(self)) || rb_enc_from_index(rb_enc_to_index(enc)) != enc) {
|
||||
|
||||
@ -696,27 +696,6 @@ rb_mmtk_alloc_fast_path(struct objspace *objspace, struct MMTk_ractor_cache *rac
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
obj_can_parallel_free_p(VALUE obj)
|
||||
{
|
||||
switch (RB_BUILTIN_TYPE(obj)) {
|
||||
case T_ARRAY:
|
||||
case T_BIGNUM:
|
||||
case T_COMPLEX:
|
||||
case T_FLOAT:
|
||||
case T_HASH:
|
||||
case T_OBJECT:
|
||||
case T_RATIONAL:
|
||||
case T_REGEXP:
|
||||
case T_STRING:
|
||||
case T_STRUCT:
|
||||
case T_SYMBOL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
VALUE
|
||||
rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
|
||||
{
|
||||
@ -753,7 +732,7 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
|
||||
mmtk_post_alloc(ractor_cache->mutator, (void*)alloc_obj, alloc_size, MMTK_ALLOCATION_SEMANTICS_DEFAULT);
|
||||
|
||||
// TODO: only add when object needs obj_free to be called
|
||||
mmtk_add_obj_free_candidate(alloc_obj, obj_can_parallel_free_p((VALUE)alloc_obj));
|
||||
mmtk_add_obj_free_candidate(alloc_obj);
|
||||
|
||||
objspace->total_allocated_objects++;
|
||||
|
||||
|
||||
@ -122,7 +122,7 @@ void mmtk_post_alloc(MMTk_Mutator *mutator,
|
||||
size_t bytes,
|
||||
MMTk_AllocationSemantics semantics);
|
||||
|
||||
void mmtk_add_obj_free_candidate(MMTk_ObjectReference object, bool can_parallel_free);
|
||||
void mmtk_add_obj_free_candidate(MMTk_ObjectReference object);
|
||||
|
||||
void mmtk_declare_weak_references(MMTk_ObjectReference object);
|
||||
|
||||
|
||||
@ -198,10 +198,7 @@ pub unsafe extern "C" fn mmtk_init_binding(
|
||||
let mmtk_boxed = mmtk_init(&builder);
|
||||
let mmtk_static = Box::leak(Box::new(mmtk_boxed));
|
||||
|
||||
let mut binding = RubyBinding::new(mmtk_static, &binding_options, upcalls);
|
||||
binding
|
||||
.weak_proc
|
||||
.init_parallel_obj_free_candidates(memory_manager::num_of_workers(binding.mmtk));
|
||||
let binding = RubyBinding::new(mmtk_static, &binding_options, upcalls);
|
||||
|
||||
crate::BINDING
|
||||
.set(binding)
|
||||
@ -299,10 +296,8 @@ pub unsafe extern "C" fn mmtk_post_alloc(
|
||||
|
||||
// TODO: Replace with buffered mmtk_add_obj_free_candidates
|
||||
#[no_mangle]
|
||||
pub extern "C" fn mmtk_add_obj_free_candidate(object: ObjectReference, can_parallel_free: bool) {
|
||||
binding()
|
||||
.weak_proc
|
||||
.add_obj_free_candidate(object, can_parallel_free)
|
||||
pub extern "C" fn mmtk_add_obj_free_candidate(object: ObjectReference) {
|
||||
binding().weak_proc.add_obj_free_candidate(object)
|
||||
}
|
||||
|
||||
// =============== Weak references ===============
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use mmtk::{
|
||||
@ -11,13 +9,10 @@ use mmtk::{
|
||||
use crate::{abi::GCThreadTLS, upcalls, Ruby};
|
||||
|
||||
pub struct WeakProcessor {
|
||||
non_parallel_obj_free_candidates: Mutex<Vec<ObjectReference>>,
|
||||
parallel_obj_free_candidates: Vec<Mutex<Vec<ObjectReference>>>,
|
||||
parallel_obj_free_candidates_counter: AtomicUsize,
|
||||
|
||||
/// Objects that needs `obj_free` called when dying.
|
||||
/// If it is a bottleneck, replace it with a lock-free data structure,
|
||||
/// or add candidates in batch.
|
||||
obj_free_candidates: Mutex<Vec<ObjectReference>>,
|
||||
weak_references: Mutex<Vec<ObjectReference>>,
|
||||
}
|
||||
|
||||
@ -30,59 +25,32 @@ impl Default for WeakProcessor {
|
||||
impl WeakProcessor {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
non_parallel_obj_free_candidates: Mutex::new(Vec::new()),
|
||||
parallel_obj_free_candidates: vec![Mutex::new(Vec::new())],
|
||||
parallel_obj_free_candidates_counter: AtomicUsize::new(0),
|
||||
obj_free_candidates: Mutex::new(Vec::new()),
|
||||
weak_references: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_parallel_obj_free_candidates(&mut self, num_workers: usize) {
|
||||
debug_assert_eq!(self.parallel_obj_free_candidates.len(), 1);
|
||||
|
||||
for _ in 1..num_workers {
|
||||
self.parallel_obj_free_candidates
|
||||
.push(Mutex::new(Vec::new()));
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an object as a candidate for `obj_free`.
|
||||
///
|
||||
/// Multiple mutators can call it concurrently, so it has `&self`.
|
||||
pub fn add_obj_free_candidate(&self, object: ObjectReference, can_parallel_free: bool) {
|
||||
if can_parallel_free {
|
||||
// Newly allocated objects are placed in parallel_obj_free_candidates using
|
||||
// round-robin. This may not be ideal for load balancing.
|
||||
let idx = self
|
||||
.parallel_obj_free_candidates_counter
|
||||
.fetch_add(1, Ordering::Relaxed)
|
||||
% self.parallel_obj_free_candidates.len();
|
||||
pub fn add_obj_free_candidate(&self, object: ObjectReference) {
|
||||
let mut obj_free_candidates = self.obj_free_candidates.lock().unwrap();
|
||||
obj_free_candidates.push(object);
|
||||
}
|
||||
|
||||
self.parallel_obj_free_candidates[idx]
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(object);
|
||||
} else {
|
||||
self.non_parallel_obj_free_candidates
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(object);
|
||||
/// Add many objects as candidates for `obj_free`.
|
||||
///
|
||||
/// Multiple mutators can call it concurrently, so it has `&self`.
|
||||
pub fn add_obj_free_candidates(&self, objects: &[ObjectReference]) {
|
||||
let mut obj_free_candidates = self.obj_free_candidates.lock().unwrap();
|
||||
for object in objects.iter().copied() {
|
||||
obj_free_candidates.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_all_obj_free_candidates(&self) -> Vec<ObjectReference> {
|
||||
// let mut obj_free_candidates = self.obj_free_candidates.lock().unwrap();
|
||||
let mut all_obj_free_candidates = self
|
||||
.non_parallel_obj_free_candidates
|
||||
.lock()
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
|
||||
for candidates_mutex in &self.parallel_obj_free_candidates {
|
||||
all_obj_free_candidates.extend(candidates_mutex.lock().unwrap().to_vec());
|
||||
}
|
||||
|
||||
std::mem::take(all_obj_free_candidates.as_mut())
|
||||
let mut obj_free_candidates = self.obj_free_candidates.lock().unwrap();
|
||||
std::mem::take(obj_free_candidates.as_mut())
|
||||
}
|
||||
|
||||
pub fn add_weak_reference(&self, object: ObjectReference) {
|
||||
@ -95,18 +63,7 @@ impl WeakProcessor {
|
||||
worker: &mut GCWorker<Ruby>,
|
||||
_tracer_context: impl ObjectTracerContext<Ruby>,
|
||||
) {
|
||||
worker.add_work(
|
||||
WorkBucketStage::VMRefClosure,
|
||||
ProcessNonParallelObjFreeCanadidates {},
|
||||
);
|
||||
|
||||
for index in 0..self.parallel_obj_free_candidates.len() {
|
||||
worker.add_work(
|
||||
WorkBucketStage::VMRefClosure,
|
||||
ProcessParallelObjFreeCandidates { index },
|
||||
);
|
||||
}
|
||||
|
||||
worker.add_work(WorkBucketStage::VMRefClosure, ProcessObjFreeCandidates);
|
||||
worker.add_work(WorkBucketStage::VMRefClosure, ProcessWeakReferences);
|
||||
|
||||
worker.add_work(WorkBucketStage::Prepare, UpdateFinalizerObjIdTables);
|
||||
@ -123,50 +80,36 @@ impl WeakProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_obj_free_candidates(obj_free_candidates: &mut Vec<ObjectReference>) {
|
||||
// Process obj_free
|
||||
let mut new_candidates = Vec::new();
|
||||
struct ProcessObjFreeCandidates;
|
||||
|
||||
for object in obj_free_candidates.iter().copied() {
|
||||
if object.is_reachable() {
|
||||
// Forward and add back to the candidate list.
|
||||
let new_object = object.forward();
|
||||
trace!("Forwarding obj_free candidate: {object} -> {new_object}");
|
||||
new_candidates.push(new_object);
|
||||
} else {
|
||||
(upcalls().call_obj_free)(object);
|
||||
}
|
||||
}
|
||||
|
||||
*obj_free_candidates = new_candidates;
|
||||
}
|
||||
|
||||
struct ProcessParallelObjFreeCandidates {
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl GCWork<Ruby> for ProcessParallelObjFreeCandidates {
|
||||
fn do_work(&mut self, _worker: &mut GCWorker<Ruby>, _mmtk: &'static mmtk::MMTK<Ruby>) {
|
||||
let mut obj_free_candidates = crate::binding().weak_proc.parallel_obj_free_candidates
|
||||
[self.index]
|
||||
.try_lock()
|
||||
.expect("Lock for parallel_obj_free_candidates should not be held");
|
||||
|
||||
process_obj_free_candidates(&mut obj_free_candidates);
|
||||
}
|
||||
}
|
||||
|
||||
struct ProcessNonParallelObjFreeCanadidates;
|
||||
|
||||
impl GCWork<Ruby> for ProcessNonParallelObjFreeCanadidates {
|
||||
impl GCWork<Ruby> for ProcessObjFreeCandidates {
|
||||
fn do_work(&mut self, _worker: &mut GCWorker<Ruby>, _mmtk: &'static mmtk::MMTK<Ruby>) {
|
||||
// If it blocks, it is a bug.
|
||||
let mut obj_free_candidates = crate::binding()
|
||||
.weak_proc
|
||||
.non_parallel_obj_free_candidates
|
||||
.obj_free_candidates
|
||||
.try_lock()
|
||||
.expect("Lock for non_parallel_obj_free_candidates should not be held");
|
||||
.expect("It's GC time. No mutators should hold this lock at this time.");
|
||||
|
||||
process_obj_free_candidates(&mut obj_free_candidates);
|
||||
let n_cands = obj_free_candidates.len();
|
||||
|
||||
debug!("Total: {n_cands} candidates");
|
||||
|
||||
// Process obj_free
|
||||
let mut new_candidates = Vec::new();
|
||||
|
||||
for object in obj_free_candidates.iter().copied() {
|
||||
if object.is_reachable() {
|
||||
// Forward and add back to the candidate list.
|
||||
let new_object = object.forward();
|
||||
trace!("Forwarding obj_free candidate: {object} -> {new_object}");
|
||||
new_candidates.push(new_object);
|
||||
} else {
|
||||
(upcalls().call_obj_free)(object);
|
||||
}
|
||||
}
|
||||
|
||||
*obj_free_candidates = new_candidates;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user