mirror of
https://github.com/ruby/ruby.git
synced 2026-01-26 12:14:51 +00:00
We can't grab the VM Lock in free functions
This is due to the way MMTK frees objects, which is on another native thread. Due to this, there's no `ec` so we can't grab the VM Lock. This was causing issues in release builds of MMTK on CI like: ``` /home/runner/work/ruby/ruby/build/ruby(sigsegv+0x46) [0x557905117ef6] ../src/signal.c:948 /lib/x86_64-linux-gnu/libc.so.6(0x7f555f845330) [0x7f555f845330] /home/runner/work/ruby/ruby/build/ruby(rb_ec_thread_ptr+0x0) [0x5579051d59d5] ../src/vm_core.h:2087 /home/runner/work/ruby/ruby/build/ruby(rb_ec_ractor_ptr) ../src/vm_core.h:2036 /home/runner/work/ruby/ruby/build/ruby(rb_current_execution_context) ../src/vm_core.h:2105 /home/runner/work/ruby/ruby/build/ruby(rb_current_ractor_raw) ../src/vm_core.h:2104 /home/runner/work/ruby/ruby/build/ruby(rb_current_ractor) ../src/vm_core.h:2112 /home/runner/work/ruby/ruby/build/ruby(rb_current_ractor) ../src/vm_core.h:2110 /home/runner/work/ruby/ruby/build/ruby(vm_locked) ../src/vm_sync.c:15 /home/runner/work/ruby/ruby/build/ruby(rb_vm_lock_enter_body) ../src/vm_sync.c:141 /home/runner/work/ruby/ruby/build/ruby(rb_vm_lock_enter+0xa) [0x557905390a5a] ../src/vm_sync.h:76 /home/runner/work/ruby/ruby/build/ruby(fiber_pool_stack_release) ../src/cont.c:777 /home/runner/work/ruby/ruby/build/ruby(fiber_stack_release+0xe) [0x557905392075] ../src/cont.c:919 /home/runner/work/ruby/ruby/build/ruby(cont_free) ../src/cont.c:1087 /home/runner/work/ruby/ruby/build/ruby(fiber_free) ../src/cont.c:1180 ``` This would have ran into an assertion error in a debug build but we don't run debug builds of MMTK on Github's CI. Co-authored-by: john.hawthorn@shopify.com
This commit is contained in:
parent
31a1a39ace
commit
27ff586152
Notes:
git
2025-10-15 17:50:06 +00:00
71
cont.c
71
cont.c
@ -774,44 +774,40 @@ static void
|
||||
fiber_pool_stack_release(struct fiber_pool_stack * stack)
|
||||
{
|
||||
struct fiber_pool * pool = stack->pool;
|
||||
RB_VM_LOCK_ENTER();
|
||||
{
|
||||
struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
|
||||
struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
|
||||
|
||||
if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
|
||||
if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
|
||||
|
||||
// Copy the stack details into the vacancy area:
|
||||
vacancy->stack = *stack;
|
||||
// After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
|
||||
// Copy the stack details into the vacancy area:
|
||||
vacancy->stack = *stack;
|
||||
// After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
|
||||
|
||||
// Reset the stack pointers and reserve space for the vacancy data:
|
||||
fiber_pool_vacancy_reset(vacancy);
|
||||
// Reset the stack pointers and reserve space for the vacancy data:
|
||||
fiber_pool_vacancy_reset(vacancy);
|
||||
|
||||
// Push the vacancy into the vancancies list:
|
||||
pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
|
||||
pool->used -= 1;
|
||||
// Push the vacancy into the vancancies list:
|
||||
pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
|
||||
pool->used -= 1;
|
||||
|
||||
#ifdef FIBER_POOL_ALLOCATION_FREE
|
||||
struct fiber_pool_allocation * allocation = stack->allocation;
|
||||
struct fiber_pool_allocation * allocation = stack->allocation;
|
||||
|
||||
allocation->used -= 1;
|
||||
allocation->used -= 1;
|
||||
|
||||
// Release address space and/or dirty memory:
|
||||
if (allocation->used == 0) {
|
||||
fiber_pool_allocation_free(allocation);
|
||||
}
|
||||
else if (stack->pool->free_stacks) {
|
||||
fiber_pool_stack_free(&vacancy->stack);
|
||||
}
|
||||
#else
|
||||
// This is entirely optional, but clears the dirty flag from the stack
|
||||
// memory, so it won't get swapped to disk when there is memory pressure:
|
||||
if (stack->pool->free_stacks) {
|
||||
fiber_pool_stack_free(&vacancy->stack);
|
||||
}
|
||||
#endif
|
||||
// Release address space and/or dirty memory:
|
||||
if (allocation->used == 0) {
|
||||
fiber_pool_allocation_free(allocation);
|
||||
}
|
||||
RB_VM_LOCK_LEAVE();
|
||||
else if (stack->pool->free_stacks) {
|
||||
fiber_pool_stack_free(&vacancy->stack);
|
||||
}
|
||||
#else
|
||||
// This is entirely optional, but clears the dirty flag from the stack
|
||||
// memory, so it won't get swapped to disk when there is memory pressure:
|
||||
if (stack->pool->free_stacks) {
|
||||
fiber_pool_stack_free(&vacancy->stack);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -924,6 +920,17 @@ fiber_stack_release(rb_fiber_t * fiber)
|
||||
rb_ec_clear_vm_stack(ec);
|
||||
}
|
||||
|
||||
static void
|
||||
fiber_stack_release_locked(rb_fiber_t *fiber)
|
||||
{
|
||||
if (!ruby_vm_during_cleanup) {
|
||||
// We can't try to acquire the VM lock here because MMTK calls free in its own native thread which has no ec.
|
||||
// This assertion will fail on MMTK but we currently don't have CI for debug releases of MMTK, so we can assert for now.
|
||||
ASSERT_vm_locking_with_barrier();
|
||||
}
|
||||
fiber_stack_release(fiber);
|
||||
}
|
||||
|
||||
static const char *
|
||||
fiber_status_name(enum fiber_status s)
|
||||
{
|
||||
@ -1084,7 +1091,7 @@ cont_free(void *ptr)
|
||||
else {
|
||||
rb_fiber_t *fiber = (rb_fiber_t*)cont;
|
||||
coroutine_destroy(&fiber->context);
|
||||
fiber_stack_release(fiber);
|
||||
fiber_stack_release_locked(fiber);
|
||||
}
|
||||
|
||||
RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
|
||||
@ -2741,7 +2748,9 @@ fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fi
|
||||
// We cannot free the stack until the pthread is joined:
|
||||
#ifndef COROUTINE_PTHREAD_CONTEXT
|
||||
if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
|
||||
fiber_stack_release(fiber);
|
||||
RB_VM_LOCKING() {
|
||||
fiber_stack_release(fiber);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
3
vm.c
3
vm.c
@ -59,6 +59,8 @@ int ruby_assert_critical_section_entered = 0;
|
||||
|
||||
static void *native_main_thread_stack_top;
|
||||
|
||||
bool ruby_vm_during_cleanup = false;
|
||||
|
||||
VALUE rb_str_concat_literals(size_t, const VALUE*);
|
||||
|
||||
VALUE vm_exec(rb_execution_context_t *);
|
||||
@ -3287,6 +3289,7 @@ int
|
||||
ruby_vm_destruct(rb_vm_t *vm)
|
||||
{
|
||||
RUBY_FREE_ENTER("vm");
|
||||
ruby_vm_during_cleanup = true;
|
||||
|
||||
if (vm) {
|
||||
rb_thread_t *th = vm->ractor.main_thread;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user