ZJIT: Move c_stack_slots to Assembler

This commit is contained in:
Takashi Kokubun 2025-10-28 02:43:21 -07:00
parent cc051ef0e5
commit 0e1d99ce69
Notes: git 2025-10-28 16:26:09 +00:00
4 changed files with 54 additions and 30 deletions

View File

@ -1665,7 +1665,7 @@ mod tests {
fn test_emit_frame() {
let (mut asm, mut cb) = setup_asm();
asm.frame_setup(&[], 0);
asm.frame_setup(&[]);
asm.frame_teardown(&[]);
asm.compile_with_num_regs(&mut cb, 0);
@ -1684,7 +1684,8 @@ mod tests {
// Test 3 preserved regs (odd), odd slot_count
let cb1 = {
let (mut asm, mut cb) = setup_asm();
asm.frame_setup(THREE_REGS, 3);
asm.stack_base_idx = 3;
asm.frame_setup(THREE_REGS);
asm.frame_teardown(THREE_REGS);
asm.compile_with_num_regs(&mut cb, 0);
cb
@ -1693,7 +1694,8 @@ mod tests {
// Test 3 preserved regs (odd), even slot_count
let cb2 = {
let (mut asm, mut cb) = setup_asm();
asm.frame_setup(THREE_REGS, 4);
asm.stack_base_idx = 4;
asm.frame_setup(THREE_REGS);
asm.frame_teardown(THREE_REGS);
asm.compile_with_num_regs(&mut cb, 0);
cb
@ -1703,7 +1705,8 @@ mod tests {
let cb3 = {
static FOUR_REGS: &[Opnd] = &[Opnd::Reg(X19_REG), Opnd::Reg(X20_REG), Opnd::Reg(X21_REG), Opnd::Reg(X22_REG)];
let (mut asm, mut cb) = setup_asm();
asm.frame_setup(FOUR_REGS, 3);
asm.stack_base_idx = 3;
asm.frame_setup(FOUR_REGS);
asm.frame_teardown(FOUR_REGS);
asm.compile_with_num_regs(&mut cb, 0);
cb

View File

@ -1034,6 +1034,9 @@ impl fmt::Debug for Insn {
// Print list of operands
let mut opnd_iter = self.opnd_iter();
if let Insn::FrameSetup { slot_count, .. } = self {
write!(fmt, "{slot_count}")?;
}
if let Some(first_opnd) = opnd_iter.next() {
write!(fmt, "{first_opnd:?}")?;
}
@ -1176,6 +1179,11 @@ pub struct Assembler {
/// On `compile`, it also disables the backend's use of them.
pub(super) accept_scratch_reg: bool,
/// The Assembler can use NATIVE_BASE_PTR + stack_base_idx as the
/// first stack slot in case it needs to allocate memory. This is
/// equal to the number of spilled basic block arguments.
pub(super) stack_base_idx: usize,
/// If Some, the next ccall should verify its leafness
leaf_ccall_stack_size: Option<usize>
}
@ -1189,10 +1197,16 @@ impl Assembler
live_ranges: Vec::with_capacity(ASSEMBLER_INSNS_CAPACITY),
label_names: Vec::default(),
accept_scratch_reg: false,
stack_base_idx: 0,
leaf_ccall_stack_size: None,
}
}
/// Create an Assembler, reserving a specified number of stack slots
pub fn new_with_stack_slots(stack_base_idx: usize) -> Self {
Self { stack_base_idx, ..Self::new() }
}
/// Create an Assembler that allows the use of scratch registers.
/// This should be called only through [`Self::new_with_scratch_reg`].
pub(super) fn new_with_accept_scratch_reg(accept_scratch_reg: bool) -> Self {
@ -1205,6 +1219,7 @@ impl Assembler
let mut asm = Self {
label_names: old_asm.label_names.clone(),
accept_scratch_reg: old_asm.accept_scratch_reg,
stack_base_idx: old_asm.stack_base_idx,
..Self::new()
};
// Bump the initial VReg index to allow the use of the VRegs for the old Assembler
@ -1841,7 +1856,8 @@ impl Assembler {
out
}
pub fn frame_setup(&mut self, preserved_regs: &'static [Opnd], slot_count: usize) {
pub fn frame_setup(&mut self, preserved_regs: &'static [Opnd]) {
let slot_count = self.stack_base_idx;
self.push_insn(Insn::FrameSetup { preserved: preserved_regs, slot_count });
}

View File

@ -986,7 +986,9 @@ impl Assembler {
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::assert_disasm_snapshot;
#[cfg(feature = "disasm")]
use crate::disasms_with;
use crate::{assert_disasm_snapshot, hexdumps};
use super::*;
fn setup_asm() -> (Assembler, CodeBlock) {
@ -1553,18 +1555,19 @@ mod tests {
#[test]
fn frame_setup_teardown() {
let (mut asm, mut cb) = setup_asm();
asm.frame_setup(JIT_PRESERVED_REGS, 0);
let (mut asm, mut cb1) = setup_asm();
asm.frame_setup(JIT_PRESERVED_REGS);
asm.frame_teardown(JIT_PRESERVED_REGS);
asm.cret(C_RET_OPND);
asm.compile_with_num_regs(&mut cb1, 0);
asm.frame_setup(&[], 5);
let (mut asm, mut cb2) = setup_asm();
asm.stack_base_idx = 5;
asm.frame_setup(&[]);
asm.frame_teardown(&[]);
asm.compile_with_num_regs(&mut cb2, 0);
asm.compile_with_num_regs(&mut cb, 0);
assert_disasm_snapshot!(cb.disasm(), @"
assert_disasm_snapshot!(disasms_with!("\n", cb1, cb2), @r"
0x0: push rbp
0x1: mov rbp, rsp
0x4: push r13
@ -1577,13 +1580,17 @@ mod tests {
0x19: mov rsp, rbp
0x1c: pop rbp
0x1d: ret
0x1e: push rbp
0x1f: mov rbp, rsp
0x22: sub rsp, 0x30
0x26: mov rsp, rbp
0x29: pop rbp
0x0: push rbp
0x1: mov rbp, rsp
0x4: sub rsp, 0x30
0x8: mov rsp, rbp
0xb: pop rbp
");
assert_snapshot!(hexdumps!(cb1, cb2), @r"
554889e541555341544883ec084c8b6df8488b5df04c8b65e84889ec5dc3
554889e54883ec304889ec5d
");
assert_snapshot!(cb.hexdump(), @"554889e541555341544883ec084c8b6df8488b5df04c8b65e84889ec5dc3554889e54883ec304889ec5d");
}
#[test]

View File

@ -41,21 +41,17 @@ struct JITState {
/// ISEQ calls that need to be compiled later
iseq_calls: Vec<IseqCallRef>,
/// The number of bytes allocated for basic block arguments spilled onto the C stack
c_stack_slots: usize,
}
impl JITState {
/// Create a new JITState instance
fn new(iseq: IseqPtr, num_insns: usize, num_blocks: usize, c_stack_slots: usize) -> Self {
fn new(iseq: IseqPtr, num_insns: usize, num_blocks: usize) -> Self {
JITState {
iseq,
opnds: vec![None; num_insns],
labels: vec![None; num_blocks],
jit_entries: Vec::default(),
iseq_calls: Vec::default(),
c_stack_slots,
}
}
@ -246,9 +242,9 @@ fn gen_iseq_body(cb: &mut CodeBlock, iseq: IseqPtr, function: Option<&Function>,
/// Compile a function
fn gen_function(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function) -> Result<(IseqCodePtrs, Vec<CodePtr>, Vec<IseqCallRef>), CompileError> {
let c_stack_slots = max_num_params(function).saturating_sub(ALLOC_REGS.len());
let mut jit = JITState::new(iseq, function.num_insns(), function.num_blocks(), c_stack_slots);
let mut asm = Assembler::new();
let num_spilled_params = max_num_params(function).saturating_sub(ALLOC_REGS.len());
let mut jit = JITState::new(iseq, function.num_insns(), function.num_blocks());
let mut asm = Assembler::new_with_stack_slots(num_spilled_params);
// Compile each basic block
let reverse_post_order = function.rpo();
@ -1011,7 +1007,7 @@ fn gen_load_ivar_extended(asm: &mut Assembler, self_val: Opnd, id: ID, index: u1
fn gen_entry_prologue(asm: &mut Assembler, iseq: IseqPtr) {
asm_comment!(asm, "ZJIT entry point: {}", iseq_get_location(iseq, 0));
// Save the registers we'll use for CFP, EP, SP
asm.frame_setup(lir::JIT_PRESERVED_REGS, 0);
asm.frame_setup(lir::JIT_PRESERVED_REGS);
// EC and CFP are passed as arguments
asm.mov(EC, C_ARG_OPNDS[0]);
@ -1439,7 +1435,7 @@ fn gen_entry_point(jit: &mut JITState, asm: &mut Assembler, jit_entry_idx: Optio
jit_entry.borrow_mut().start_addr.set(Some(code_ptr));
});
}
asm.frame_setup(&[], jit.c_stack_slots);
asm.frame_setup(&[]);
}
/// Compile code that exits from JIT code with a return value
@ -1910,6 +1906,8 @@ fn param_opnd(idx: usize) -> Opnd {
if idx < ALLOC_REGS.len() {
Opnd::Reg(ALLOC_REGS[idx])
} else {
// With FrameSetup, the address that NATIVE_BASE_PTR points to stores an old value in the register.
// To avoid clobbering it, we need to start from the next slot, hence `+ 1` for the index.
Opnd::mem(64, NATIVE_BASE_PTR, (idx - ALLOC_REGS.len() + 1) as i32 * -SIZEOF_VALUE_I32)
}
}
@ -2121,7 +2119,7 @@ pub fn gen_function_stub_hit_trampoline(cb: &mut CodeBlock) -> Result<CodePtr, C
asm_comment!(asm, "function_stub_hit trampoline");
// Maintain alignment for x86_64, and set up a frame for arm64 properly
asm.frame_setup(&[], 0);
asm.frame_setup(&[]);
asm_comment!(asm, "preserve argument registers");
for &reg in ALLOC_REGS.iter() {