diff --git a/jit.c b/jit.c index db3fe097ef..2ff38c28e2 100644 --- a/jit.c +++ b/jit.c @@ -16,10 +16,13 @@ #include "vm_sync.h" #include "internal/fixnum.h" -// Field offsets for the RObject struct -enum robject_offsets { +enum jit_bindgen_constants { + // Field offsets for the RObject struct ROBJECT_OFFSET_AS_HEAP_FIELDS = offsetof(struct RObject, as.heap.fields), ROBJECT_OFFSET_AS_ARY = offsetof(struct RObject, as.ary), + + // Field offsets for the RString struct + RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len) }; // Manually bound in rust since this is out-of-range of `int`, @@ -162,6 +165,21 @@ rb_get_def_original_id(const rb_method_definition_t *def) return def->original_id; } +VALUE +rb_get_def_bmethod_proc(rb_method_definition_t *def) +{ + RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD); + return def->body.bmethod.proc; +} + +rb_proc_t * +rb_jit_get_proc_ptr(VALUE procv) +{ + rb_proc_t *proc; + GetProcPtr(procv, proc); + return proc; +} + int rb_get_mct_argc(const rb_method_cfunc_t *mct) { diff --git a/shape.h b/shape.h index fdc2b3ddd6..a20da1baa5 100644 --- a/shape.h +++ b/shape.h @@ -47,9 +47,9 @@ enum shape_id_fl_type { #undef RBIMPL_SHAPE_ID_FL }; -// This masks allows to check if a shape_id contains any ivar. -// It rely on ROOT_SHAPE_WITH_OBJ_ID==1. -enum { +// This mask allows to check if a shape_id contains any ivar. +// It relies on ROOT_SHAPE_WITH_OBJ_ID==1. +enum shape_id_mask { SHAPE_ID_HAS_IVAR_MASK = SHAPE_ID_FL_TOO_COMPLEX | (SHAPE_ID_OFFSET_MASK - 1), }; diff --git a/yjit.c b/yjit.c index d0ab367b1c..807aec9e39 100644 --- a/yjit.c +++ b/yjit.c @@ -38,11 +38,6 @@ #include -// Field offsets for the RString struct -enum rstring_offsets { - RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len) -}; - // We need size_t to have a known size to simplify code generation and FFI. // TODO(alan): check this in configure.ac to fail fast on 32 bit platforms. STATIC_ASSERT(64b_size_t, SIZE_MAX == UINT64_MAX); @@ -234,14 +229,6 @@ rb_iseq_set_yjit_payload(const rb_iseq_t *iseq, void *payload) iseq->body->yjit_payload = payload; } -rb_proc_t * -rb_yjit_get_proc_ptr(VALUE procv) -{ - rb_proc_t *proc; - GetProcPtr(procv, proc); - return proc; -} - // This is defined only as a named struct inside rb_iseq_constant_body. // By giving it a separate typedef, we make it nameable by rust-bindgen. // Bindgen's temp/anon name isn't guaranteed stable. @@ -249,13 +236,6 @@ typedef struct rb_iseq_param_keyword rb_seq_param_keyword_struct; ID rb_get_symbol_id(VALUE namep); -VALUE -rb_get_def_bmethod_proc(rb_method_definition_t *def) -{ - RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD); - return def->body.bmethod.proc; -} - VALUE rb_optimized_call(VALUE *recv, rb_execution_context_t *ec, int argc, VALUE *argv, int kw_splat, VALUE block_handler) { diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs index 2b4f48d73e..df287e1bf8 100644 --- a/yjit/bindgen/src/main.rs +++ b/yjit/bindgen/src/main.rs @@ -91,7 +91,7 @@ fn main() { .allowlist_function("rb_yjit_shape_capacity") .allowlist_function("rb_yjit_shape_index") .allowlist_var("SHAPE_ID_NUM_BITS") - .allowlist_var("SHAPE_ID_HAS_IVAR_MASK") + .allowlist_type("shape_id_mask") .allowlist_function("rb_funcall") .allowlist_function("rb_obj_is_kind_of") .allowlist_function("rb_obj_frozen_p") @@ -265,7 +265,7 @@ fn main() { .allowlist_function("rb_RSTRING_PTR") .allowlist_function("rb_RSTRING_LEN") .allowlist_function("rb_ENCODING_GET") - .allowlist_function("rb_yjit_get_proc_ptr") + .allowlist_function("rb_jit_get_proc_ptr") .allowlist_function("rb_yjit_exit_locations_dict") .allowlist_function("rb_jit_icache_invalidate") .allowlist_function("rb_optimized_call") @@ -280,7 +280,7 @@ fn main() { .allowlist_function("rb_jit_vm_lock_then_barrier") .allowlist_function("rb_jit_vm_unlock") .allowlist_function("rb_jit_for_each_iseq") - .allowlist_type("robject_offsets") + .allowlist_type("jit_bindgen_constants") .allowlist_function("rb_vm_barrier") // Not sure why it's picking these up, but don't. diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index bf758a4f62..2316558261 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -7367,7 +7367,7 @@ fn gen_send_bmethod( ) -> Option { let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) }; - let proc = unsafe { rb_yjit_get_proc_ptr(procv) }; + let proc = unsafe { rb_jit_get_proc_ptr(procv) }; let proc_block = unsafe { &(*proc).block }; if proc_block.type_ != block_type_iseq { diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index 74661e7ade..272586a79f 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -634,8 +634,8 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16; pub type vm_frame_env_flags = u32; pub type attr_index_t = u16; pub type shape_id_t = u32; -pub const SHAPE_ID_HAS_IVAR_MASK: _bindgen_ty_37 = 134742014; -pub type _bindgen_ty_37 = u32; +pub const SHAPE_ID_HAS_IVAR_MASK: shape_id_mask = 134742014; +pub type shape_id_mask = u32; #[repr(C)] pub struct rb_cvar_class_tbl_entry { pub index: u32, @@ -941,12 +941,11 @@ pub const DEFINED_REF: defined_type = 15; pub const DEFINED_FUNC: defined_type = 16; pub const DEFINED_CONST_FROM: defined_type = 17; pub type defined_type = u32; -pub const RUBY_OFFSET_RSTRING_LEN: rstring_offsets = 16; -pub type rstring_offsets = u32; pub type rb_seq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword; -pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: robject_offsets = 16; -pub const ROBJECT_OFFSET_AS_ARY: robject_offsets = 16; -pub type robject_offsets = u32; +pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: jit_bindgen_constants = 16; +pub const ROBJECT_OFFSET_AS_ARY: jit_bindgen_constants = 16; +pub const RUBY_OFFSET_RSTRING_LEN: jit_bindgen_constants = 16; +pub type jit_bindgen_constants = u32; pub type rb_iseq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword; extern "C" { pub fn ruby_xfree(ptr: *mut ::std::os::raw::c_void); @@ -1122,9 +1121,7 @@ extern "C" { pub fn rb_full_cfunc_return(ec: *mut rb_execution_context_t, return_value: VALUE); pub fn rb_iseq_get_yjit_payload(iseq: *const rb_iseq_t) -> *mut ::std::os::raw::c_void; pub fn rb_iseq_set_yjit_payload(iseq: *const rb_iseq_t, payload: *mut ::std::os::raw::c_void); - pub fn rb_yjit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t; pub fn rb_get_symbol_id(namep: VALUE) -> ID; - pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE; pub fn rb_optimized_call( recv: *mut VALUE, ec: *mut rb_execution_context_t, @@ -1200,6 +1197,8 @@ extern "C" { ) -> *mut rb_method_cfunc_t; pub fn rb_get_def_method_serial(def: *const rb_method_definition_t) -> usize; pub fn rb_get_def_original_id(def: *const rb_method_definition_t) -> ID; + pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE; + pub fn rb_jit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t; pub fn rb_get_mct_argc(mct: *const rb_method_cfunc_t) -> ::std::os::raw::c_int; pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void; pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t; diff --git a/zjit/bindgen/src/main.rs b/zjit/bindgen/src/main.rs index 60dcb7a69c..92f7a10e56 100644 --- a/zjit/bindgen/src/main.rs +++ b/zjit/bindgen/src/main.rs @@ -294,8 +294,7 @@ fn main() { .allowlist_function("rb_zjit_singleton_class_p") .allowlist_function("rb_zjit_defined_ivar") .allowlist_function("rb_zjit_insn_leaf") - .allowlist_type("robject_offsets") - .allowlist_type("rstring_offsets") + .allowlist_type("jit_bindgen_constants") .allowlist_function("rb_assert_holding_vm_lock") .allowlist_function("rb_jit_shape_too_complex_p") .allowlist_function("rb_jit_multi_ractor_p") @@ -303,7 +302,6 @@ fn main() { .allowlist_function("rb_jit_vm_unlock") .allowlist_function("rb_jit_for_each_iseq") .allowlist_function("rb_iseq_reset_jit_func") - .allowlist_type("robject_offsets") .allowlist_function("rb_vm_barrier") // Not sure why it's picking these up, but don't. @@ -367,6 +365,7 @@ fn main() { .allowlist_function("rb_get_mct_func") .allowlist_function("rb_get_def_iseq_ptr") .allowlist_function("rb_get_def_bmethod_proc") + .allowlist_function("rb_jit_get_proc_ptr") .allowlist_function("rb_iseq_encoded_size") .allowlist_function("rb_get_iseq_body_total_calls") .allowlist_function("rb_get_iseq_body_local_iseq") diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs index 6f2ad37d3b..c9e5bc8fd1 100644 --- a/zjit/src/cruby_bindings.inc.rs +++ b/zjit/src/cruby_bindings.inc.rs @@ -1,5 +1,142 @@ /* automatically generated by rust-bindgen 0.71.1 */ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + fn extract_bit(byte: u8, index: usize) -> bool { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + Self::extract_bit(byte, index) + } + #[inline] + pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool { + debug_assert!(index / 8 < core::mem::size_of::()); + let byte_index = index / 8; + let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize); + Self::extract_bit(byte, index) + } + #[inline] + fn change_bit(byte: u8, index: usize, val: bool) -> u8 { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + byte | mask + } else { + byte & !mask + } + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + *byte = Self::change_bit(*byte, index, val); + } + #[inline] + pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) { + debug_assert!(index / 8 < core::mem::size_of::()); + let byte_index = index / 8; + let byte = + (core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize); + *byte = Self::change_bit(*byte, index, val); + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < core::mem::size_of::()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if Self::raw_get_bit(this, i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } + #[inline] + pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < core::mem::size_of::()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + Self::raw_set_bit(this, index + bit_offset, val_bit_is_set); + } + } +} #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); @@ -30,6 +167,49 @@ impl ::std::fmt::Debug for __IncompleteArrayField { fmt.write_str("__IncompleteArrayField") } } +#[repr(C)] +pub struct __BindgenUnionField(::std::marker::PhantomData); +impl __BindgenUnionField { + #[inline] + pub const fn new() -> Self { + __BindgenUnionField(::std::marker::PhantomData) + } + #[inline] + pub unsafe fn as_ref(&self) -> &T { + ::std::mem::transmute(self) + } + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut T { + ::std::mem::transmute(self) + } +} +impl ::std::default::Default for __BindgenUnionField { + #[inline] + fn default() -> Self { + Self::new() + } +} +impl ::std::clone::Clone for __BindgenUnionField { + #[inline] + fn clone(&self) -> Self { + *self + } +} +impl ::std::marker::Copy for __BindgenUnionField {} +impl ::std::fmt::Debug for __BindgenUnionField { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + fmt.write_str("__BindgenUnionField") + } +} +impl ::std::hash::Hash for __BindgenUnionField { + fn hash(&self, _state: &mut H) {} +} +impl ::std::cmp::PartialEq for __BindgenUnionField { + fn eq(&self, _other: &__BindgenUnionField) -> bool { + true + } +} +impl ::std::cmp::Eq for __BindgenUnionField {} pub const ONIG_OPTION_IGNORECASE: u32 = 1; pub const ONIG_OPTION_EXTEND: u32 = 2; pub const ONIG_OPTION_MULTILINE: u32 = 4; @@ -163,6 +343,16 @@ pub type ruby_rmodule_flags = u32; pub const ROBJECT_HEAP: ruby_robject_flags = 65536; pub type ruby_robject_flags = u32; pub type rb_event_flag_t = u32; +pub type rb_block_call_func = ::std::option::Option< + unsafe extern "C" fn( + yielded_arg: VALUE, + callback_arg: VALUE, + argc: ::std::os::raw::c_int, + argv: *const VALUE, + blockarg: VALUE, + ) -> VALUE, +>; +pub type rb_block_call_func_t = rb_block_call_func; pub const RUBY_ENCODING_INLINE_MAX: ruby_encoding_consts = 127; pub const RUBY_ENCODING_SHIFT: ruby_encoding_consts = 22; pub const RUBY_ENCODING_MASK: ruby_encoding_consts = 532676608; @@ -233,6 +423,20 @@ pub const imemo_callcache: imemo_type = 11; pub const imemo_constcache: imemo_type = 12; pub const imemo_fields: imemo_type = 13; pub type imemo_type = u32; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct vm_ifunc_argc { + pub min: ::std::os::raw::c_int, + pub max: ::std::os::raw::c_int, +} +#[repr(C)] +pub struct vm_ifunc { + pub flags: VALUE, + pub svar_lep: *mut VALUE, + pub func: rb_block_call_func_t, + pub data: *const ::std::os::raw::c_void, + pub argc: vm_ifunc_argc, +} pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0; pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1; pub const METHOD_VISI_PRIVATE: rb_method_visibility_t = 2; @@ -354,7 +558,166 @@ pub struct rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword { pub table: *const ID, pub default_values: *mut VALUE, } +#[repr(C)] +pub struct rb_captured_block { + pub self_: VALUE, + pub ep: *const VALUE, + pub code: rb_captured_block__bindgen_ty_1, +} +#[repr(C)] +pub struct rb_captured_block__bindgen_ty_1 { + pub iseq: __BindgenUnionField<*const rb_iseq_t>, + pub ifunc: __BindgenUnionField<*const vm_ifunc>, + pub val: __BindgenUnionField, + pub bindgen_union_field: u64, +} +pub const block_type_iseq: rb_block_type = 0; +pub const block_type_ifunc: rb_block_type = 1; +pub const block_type_symbol: rb_block_type = 2; +pub const block_type_proc: rb_block_type = 3; +pub type rb_block_type = u32; +#[repr(C)] +pub struct rb_block { + pub as_: rb_block__bindgen_ty_1, + pub type_: rb_block_type, +} +#[repr(C)] +pub struct rb_block__bindgen_ty_1 { + pub captured: __BindgenUnionField, + pub symbol: __BindgenUnionField, + pub proc_: __BindgenUnionField, + pub bindgen_union_field: [u64; 3usize], +} pub type rb_control_frame_t = rb_control_frame_struct; +#[repr(C)] +pub struct rb_proc_t { + pub block: rb_block, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, + pub __bindgen_padding_0: [u8; 7usize], +} +impl rb_proc_t { + #[inline] + pub fn is_from_method(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_is_from_method(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub unsafe fn is_from_method_raw(this: *const Self) -> ::std::os::raw::c_uint { + unsafe { + ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get( + ::std::ptr::addr_of!((*this)._bitfield_1), + 0usize, + 1u8, + ) as u32) + } + } + #[inline] + pub unsafe fn set_is_from_method_raw(this: *mut Self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set( + ::std::ptr::addr_of_mut!((*this)._bitfield_1), + 0usize, + 1u8, + val as u64, + ) + } + } + #[inline] + pub fn is_lambda(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + } + #[inline] + pub fn set_is_lambda(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub unsafe fn is_lambda_raw(this: *const Self) -> ::std::os::raw::c_uint { + unsafe { + ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get( + ::std::ptr::addr_of!((*this)._bitfield_1), + 1usize, + 1u8, + ) as u32) + } + } + #[inline] + pub unsafe fn set_is_lambda_raw(this: *mut Self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set( + ::std::ptr::addr_of_mut!((*this)._bitfield_1), + 1usize, + 1u8, + val as u64, + ) + } + } + #[inline] + pub fn is_isolated(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + } + #[inline] + pub fn set_is_isolated(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub unsafe fn is_isolated_raw(this: *const Self) -> ::std::os::raw::c_uint { + unsafe { + ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get( + ::std::ptr::addr_of!((*this)._bitfield_1), + 2usize, + 1u8, + ) as u32) + } + } + #[inline] + pub unsafe fn set_is_isolated_raw(this: *mut Self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set( + ::std::ptr::addr_of_mut!((*this)._bitfield_1), + 2usize, + 1u8, + val as u64, + ) + } + } + #[inline] + pub fn new_bitfield_1( + is_from_method: ::std::os::raw::c_uint, + is_lambda: ::std::os::raw::c_uint, + is_isolated: ::std::os::raw::c_uint, + ) -> __BindgenBitfieldUnit<[u8; 1usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let is_from_method: u32 = unsafe { ::std::mem::transmute(is_from_method) }; + is_from_method as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let is_lambda: u32 = unsafe { ::std::mem::transmute(is_lambda) }; + is_lambda as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let is_isolated: u32 = unsafe { ::std::mem::transmute(is_isolated) }; + is_isolated as u64 + }); + __bindgen_bitfield_unit + } +} pub const VM_CHECKMATCH_TYPE_WHEN: vm_check_match_type = 1; pub const VM_CHECKMATCH_TYPE_CASE: vm_check_match_type = 2; pub const VM_CHECKMATCH_TYPE_RESCUE: vm_check_match_type = 3; @@ -730,9 +1093,10 @@ pub const DEFINED_REF: defined_type = 15; pub const DEFINED_FUNC: defined_type = 16; pub const DEFINED_CONST_FROM: defined_type = 17; pub type defined_type = u32; -pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: robject_offsets = 16; -pub const ROBJECT_OFFSET_AS_ARY: robject_offsets = 16; -pub type robject_offsets = u32; +pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: jit_bindgen_constants = 16; +pub const ROBJECT_OFFSET_AS_ARY: jit_bindgen_constants = 16; +pub const RUBY_OFFSET_RSTRING_LEN: jit_bindgen_constants = 16; +pub type jit_bindgen_constants = u32; pub const rb_invalid_shape_id: shape_id_t = 4294967295; pub type rb_iseq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword; unsafe extern "C" { @@ -997,6 +1361,8 @@ unsafe extern "C" { ) -> *mut rb_method_cfunc_t; pub fn rb_get_def_method_serial(def: *const rb_method_definition_t) -> usize; pub fn rb_get_def_original_id(def: *const rb_method_definition_t) -> ID; + pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE; + pub fn rb_jit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t; pub fn rb_get_mct_argc(mct: *const rb_method_cfunc_t) -> ::std::os::raw::c_int; pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void; pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;