mirror of
https://github.com/ruby/ruby.git
synced 2026-01-26 12:14:51 +00:00
Set context_stack on main thread
We allocate the stack of the main thread using malloc, but we never set malloc_stack to true and context_stack. If we fork, the main thread may no longer be the main thread anymore so it reports memory being leaked in RUBY_FREE_AT_EXIT. This commit allows the main thread to free its own VM stack at shutdown.
This commit is contained in:
parent
37d65e9252
commit
d8c8623f50
@ -335,4 +335,10 @@ rb_thread_prevent_fork(void *(*func)(void *), void *data)
|
|||||||
return func(data);
|
return func(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_thread_malloc_stack_set(rb_thread_t *th, void *stack)
|
||||||
|
{
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
||||||
|
|||||||
@ -3492,4 +3492,11 @@ rb_thread_lock_native_thread(void)
|
|||||||
return is_snt;
|
return is_snt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_thread_malloc_stack_set(rb_thread_t *th, void *stack)
|
||||||
|
{
|
||||||
|
th->sched.malloc_stack = true;
|
||||||
|
th->sched.context_stack = stack;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
||||||
|
|||||||
@ -1020,4 +1020,10 @@ rb_thread_prevent_fork(void *(*func)(void *), void *data)
|
|||||||
return func(data);
|
return func(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_thread_malloc_stack_set(rb_thread_t *th, void *stack)
|
||||||
|
{
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
||||||
|
|||||||
7
vm.c
7
vm.c
@ -3280,7 +3280,7 @@ ruby_vm_destruct(rb_vm_t *vm)
|
|||||||
|
|
||||||
if (vm) {
|
if (vm) {
|
||||||
rb_thread_t *th = vm->ractor.main_thread;
|
rb_thread_t *th = vm->ractor.main_thread;
|
||||||
VALUE *stack = th->ec->vm_stack;
|
|
||||||
if (rb_free_at_exit) {
|
if (rb_free_at_exit) {
|
||||||
rb_free_encoded_insn_data();
|
rb_free_encoded_insn_data();
|
||||||
rb_free_global_enc_table();
|
rb_free_global_enc_table();
|
||||||
@ -3345,7 +3345,6 @@ ruby_vm_destruct(rb_vm_t *vm)
|
|||||||
rb_free_default_rand_key();
|
rb_free_default_rand_key();
|
||||||
if (th && vm->fork_gen == 0) {
|
if (th && vm->fork_gen == 0) {
|
||||||
/* If we have forked, main_thread may not be the initial thread */
|
/* If we have forked, main_thread may not be the initial thread */
|
||||||
xfree(stack);
|
|
||||||
ruby_mimfree(th);
|
ruby_mimfree(th);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3827,7 +3826,9 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
|
|||||||
|
|
||||||
if (self == 0) {
|
if (self == 0) {
|
||||||
size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
||||||
rb_ec_initialize_vm_stack(th->ec, ALLOC_N(VALUE, size), size);
|
VALUE *stack = ALLOC_N(VALUE, size);
|
||||||
|
rb_ec_initialize_vm_stack(th->ec, stack, size);
|
||||||
|
rb_thread_malloc_stack_set(th, stack);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
VM_ASSERT(th->ec->cfp == NULL);
|
VM_ASSERT(th->ec->cfp == NULL);
|
||||||
|
|||||||
@ -1965,6 +1965,7 @@ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t
|
|||||||
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
||||||
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
|
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
|
||||||
void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
|
void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
|
||||||
|
void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
|
||||||
rb_thread_t * ruby_thread_from_native(void);
|
rb_thread_t * ruby_thread_from_native(void);
|
||||||
int ruby_thread_set_native(rb_thread_t *th);
|
int ruby_thread_set_native(rb_thread_t *th);
|
||||||
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user