From 55d936a8d7b4d6dfb36434e3b598b5fe7613b73b Mon Sep 17 00:00:00 2001 From: Cr0a3 Date: Wed, 30 Oct 2024 12:17:30 +0100 Subject: [PATCH] [FIX] fixed signed zext --- src/CodeGen/compilation/cast.rs | 4 ++-- src/CodeGen/compilation/mod.rs | 21 ++++++++++------- src/CodeGen/compilation/prolog.rs | 16 +++++-------- src/CodeGen/reg_vec.rs | 4 ++-- src/Target/target_descr.rs | 4 ++-- src/Target/x64/asm/instr.rs | 39 +++++++++++++++++++++++++++++++ src/Target/x64/compilation.rs | 2 +- src/Target/x64/lower/zext.rs | 22 ++++++----------- src/Target/x64/optimizer.rs | 12 +++++----- src/Target/x64/reg_alloc.rs | 2 +- 10 files changed, 78 insertions(+), 48 deletions(-) diff --git a/src/CodeGen/compilation/cast.rs b/src/CodeGen/compilation/cast.rs index ca3b1efd..95bc9170 100644 --- a/src/CodeGen/compilation/cast.rs +++ b/src/CodeGen/compilation/cast.rs @@ -13,9 +13,9 @@ impl CompilationHelper { let op = { if node.inner1.ty.float() || node.inner2.float() { MachineMnemonic::FCast - } else if node.inner1.ty.bitSize() > node.inner2.bitSize() { + } else if node.inner1.ty.bitSize() < node.inner2.bitSize() { MachineMnemonic::Zext - } else if node.inner1.ty.bitSize() < node.inner2.bitSize(){ + } else if node.inner1.ty.bitSize() > node.inner2.bitSize(){ MachineMnemonic::Downcast } else { return; diff --git a/src/CodeGen/compilation/mod.rs b/src/CodeGen/compilation/mod.rs index d70bd199..2c3f181f 100644 --- a/src/CodeGen/compilation/mod.rs +++ b/src/CodeGen/compilation/mod.rs @@ -120,15 +120,23 @@ impl CompilationHelper { } pub(crate) fn alloc_stack(&mut self, ty: TypeMetadata) -> VarLocation { - if let Some(alloc_stack) = self.alloc.alloc_stack { + let out = if let Some(alloc_stack) = self.alloc.alloc_stack { alloc_stack(&mut self.alloc, ty) - } else { panic!("no registered stack allocation function for {:?}", self.arch) } + } else { panic!("no registered stack allocation function for {:?}", self.arch) }; + + self.epilog = self.alloc.epilog; + + out } pub(crate) fn alloc_rv(&mut self, ty: TypeMetadata) -> VarLocation { - if let Some(alloc) = self.alloc.alloc_rv { + let out = if let Some(alloc) = self.alloc.alloc_rv { alloc(&mut self.alloc, ty) - } else { panic!("no registered allocation function for {:?}", self.arch) } + } else { panic!("no registered allocation function for {:?}", self.arch) }; + + self.epilog = self.alloc.epilog; + + out } pub(crate) fn free(&mut self, loc: VarLocation) { @@ -155,11 +163,6 @@ impl CompilationHelper { with_name } - - #[inline] - pub(crate) fn epilog(&self) -> bool { - self.epilog - } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/src/CodeGen/compilation/prolog.rs b/src/CodeGen/compilation/prolog.rs index e8bec7a2..db6544f1 100644 --- a/src/CodeGen/compilation/prolog.rs +++ b/src/CodeGen/compilation/prolog.rs @@ -5,21 +5,17 @@ use super::CompilationHelper; impl CompilationHelper { #[allow(missing_docs)] pub fn compile_prolog(&mut self, sink: &mut Vec) { - if self.epilog() { - let mut instr = MachineInstr::new( MachineMnemonic::Prolog ); - instr.add_operand( MachineOperand::Imm(self.alloc.stack_off as f64) ); + let mut instr = MachineInstr::new( MachineMnemonic::Prolog ); + instr.add_operand( MachineOperand::Imm(self.alloc.stack_off as f64) ); - sink.push( instr ); - } + sink.push( instr ); } #[allow(missing_docs)] pub fn compile_epilog(&mut self, sink: &mut Vec) { - if self.epilog() { - let mut instr = MachineInstr::new( MachineMnemonic::Epilog ); - instr.add_operand( MachineOperand::Imm(self.alloc.stack_off as f64) ); + let mut instr = MachineInstr::new( MachineMnemonic::Epilog ); + instr.add_operand( MachineOperand::Imm(self.alloc.stack_off as f64) ); - sink.push( instr ); - } + sink.push( instr ); } } \ No newline at end of file diff --git a/src/CodeGen/reg_vec.rs b/src/CodeGen/reg_vec.rs index c84b31ed..96c3a768 100644 --- a/src/CodeGen/reg_vec.rs +++ b/src/CodeGen/reg_vec.rs @@ -41,7 +41,7 @@ impl RegVec { } } - pub(crate) fn inner(&mut self, arch: Arch) -> &mut Vec { + /*pub(crate) fn inner(&mut self, arch: Arch) -> &mut Vec { let keys = self.regs.clone(); let keys = keys.keys(); @@ -50,5 +50,5 @@ impl RegVec { } else { panic!("unkown entry: {:?} (known entrys: {:?})", arch, keys) } - } + }*/ } diff --git a/src/Target/target_descr.rs b/src/Target/target_descr.rs index e93783ca..8cdecedf 100644 --- a/src/Target/target_descr.rs +++ b/src/Target/target_descr.rs @@ -117,11 +117,11 @@ impl TargetBackendDescr { let mut ir_helper = IrCodeGenHelper::new(helper.to_owned()); for node in block.nodes.to_owned() { - if ir_helper.helper.epilog() { + if ir_helper.helper.alloc.epilog { self.epilog = true; } - // VERY UGLY CODE WHICH SINCRONICES THE MAX STACK_OFF + // VERY UGLY CODE WHICH SYNCS THE MAX STACK_OFF // OF EITHER ir_helper or helper (the one who has the biggest gets selected) if helper.alloc.stack_off < ir_helper.helper.alloc.stack_off { helper.alloc.stack_off = ir_helper.helper.alloc.stack_off; diff --git a/src/Target/x64/asm/instr.rs b/src/Target/x64/asm/instr.rs index ca912714..aea288a0 100644 --- a/src/Target/x64/asm/instr.rs +++ b/src/Target/x64/asm/instr.rs @@ -551,6 +551,39 @@ impl X64MCInstr { } else { todo!("{}", self)} } else { todo!("{}", self) } }, + Mnemonic::Movsx => { + if let Some(Operand::Reg(op1)) = &self.op1 { + if let Some(Operand::Reg(op2)) = &self.op2 { + if op1.is_gr64() { + if op2.is_gr16() { + Instruction::with2::(Code::Movsx_r64_rm16, (*op1).into(), (*op2).into())? + } else if op2.is_gr8() { + Instruction::with2::(Code::Movsx_r64_rm8, (*op1).into(), (*op2).into())? + } else { todo!("{}", self) } + } else if op1.is_gr32() { + if op2.is_gr16() { + Instruction::with2::(Code::Movsx_r32_rm16, (*op1).into(), (*op2).into())? + } else if op2.is_gr8() { + Instruction::with2::(Code::Movsx_r32_rm8, (*op1).into(), (*op2).into())? + } else { todo!("{}", self) } + } else if op1.is_gr16() { + if op2.is_gr16() { + Instruction::with2::(Code::Movsx_r32_rm16, (*op1).into(), (*op2).into())? + } else if op2.is_gr8() { + Instruction::with2::(Code::Movsx_r32_rm8, (*op1).into(), (*op2).into())? + } else { todo!("{}", self) } + } else { todo!("{}", self)} + } else if let Some(Operand::Mem(op2)) = &self.op2 { + if op1.is_gr64() { + Instruction::with2::(Code::Movsx_r64_rm16, (*op1).into(), op2.into())? + } else if op1.is_gr32() { + Instruction::with2::(Code::Movsx_r32_rm16, (*op1).into(), op2.into())? + } else if op1.is_gr16() { + Instruction::with2::(Code::Movsx_r16_rm16, (*op1).into(), op2.into())? + } else {todo!("{}", self) } + } else { todo!("{}", self)} + } else { todo!("{}", self) } + }, Mnemonic::Push => { if let Some(Operand::Reg(op1)) = &self.op1 { if op1.is_gr64() { @@ -1347,6 +1380,7 @@ pub enum Mnemonic { Lea, Mov, Movzx, + Movsx, Push, Pop, Ret, @@ -1494,6 +1528,7 @@ impl FromStr for Mnemonic { "cwd" => Ok(Mnemonic::Cwd), "cdq" => Ok(Mnemonic::Cdq), "cqo" => Ok(Mnemonic::Cqo), + "movsx" => Ok(Mnemonic::Movsx), _ => Err(()), } } @@ -1570,6 +1605,7 @@ impl Display for Mnemonic { Mnemonic::Cwd => "cwd", Mnemonic::Cdq => "cdq", Mnemonic::Cqo => "cqo", + Mnemonic::Movsx => "movxz", }) } } @@ -1923,6 +1959,7 @@ IsCheckerOps0!(is_cmp, Mnemonic::Cmp); IsCheckerOps0!(is_lea, Mnemonic::Lea); IsCheckerOps0!(is_mov, Mnemonic::Mov); IsCheckerOps0!(is_movzx, Mnemonic::Movzx); +IsCheckerOps0!(is_movsx, Mnemonic::Movsx); IsCheckerOps0!(is_push, Mnemonic::Push); IsCheckerOps0!(is_pop, Mnemonic::Pop); IsCheckerOps0!(is_ret, Mnemonic::Ret); @@ -1995,6 +2032,7 @@ IsCheckerOps1!(is_cmp1, Mnemonic::Cmp); IsCheckerOps1!(is_lea1, Mnemonic::Lea); IsCheckerOps1!(is_mov1, Mnemonic::Mov); IsCheckerOps1!(is_movzx1, Mnemonic::Movzx); +IsCheckerOps1!(is_movsx1, Mnemonic::Movsx); IsCheckerOps1!(is_push1, Mnemonic::Push); IsCheckerOps1!(is_pop1, Mnemonic::Pop); IsCheckerOps1!(is_imul1, Mnemonic::Imul); @@ -2054,6 +2092,7 @@ IsCheckerOps2!(is_cmp2, Mnemonic::Cmp); IsCheckerOps2!(is_lea2, Mnemonic::Lea); IsCheckerOps2!(is_mov2, Mnemonic::Mov); IsCheckerOps2!(is_movzx2, Mnemonic::Movzx); +IsCheckerOps2!(is_movsx2, Mnemonic::Movsx); IsCheckerOps2!(is_cmove2, Mnemonic::Cmove); IsCheckerOps2!(is_cmovne2, Mnemonic::Cmovne); IsCheckerOps2!(is_sal2, Mnemonic::Sal); diff --git a/src/Target/x64/compilation.rs b/src/Target/x64/compilation.rs index d6670952..ae75ee07 100644 --- a/src/Target/x64/compilation.rs +++ b/src/Target/x64/compilation.rs @@ -77,7 +77,7 @@ pub(crate) fn construct_compilation_helper(call_conv: CallConv) -> CompilationHe helper } -fn x64_after_alloc(compiler: &CompilationHelper) { +fn x64_after_alloc(_compiler: &CompilationHelper) { /*if compiler.alloc.stack_off - 8 < compiler.call.shadow(compiler.arch) { unsafe { super::lower::USE_SP_FOR_STACK = true; diff --git a/src/Target/x64/lower/zext.rs b/src/Target/x64/lower/zext.rs index edc8fca9..e81a1d4c 100644 --- a/src/Target/x64/lower/zext.rs +++ b/src/Target/x64/lower/zext.rs @@ -3,43 +3,35 @@ use crate::Target::x64::X64Reg; use crate::Target::x64::asm::instr::*; pub(crate) fn x64_lower_zext(sink: &mut Vec, instr: &MachineInstr) { - let op1 = instr.operands.get(0).expect("expected a first operand"); - let op2 = instr.operands.get(0).expect("expected a secound operand"); let out = instr.out.expect("expected a output operand"); let mut movxz = false; let op1 = (*op1).into(); - - let op2 = (*op2).into(); let out = out.into(); if let Operand::Reg(op1) = op1 { - if let Operand::Reg(op2) = op2 { - if (op1.is_gr16() | op1.is_gr8()) && (op2.is_gr32() | op2.is_gr64()) { // movxz allowes a gr8/16 zext into gr32/64 + if let Operand::Reg(out) = out { + if (op1.is_gr16() | op1.is_gr8()) && (out.is_gr32() | out.is_gr64()) { // movxz allowes a gr8/16 zext into gr32/64 movxz = true; } } } if movxz { + let mnemonic = if instr.meta.signed() { Mnemonic::Movsx } else { Mnemonic::Movzx }; + let tmp = Operand::Reg(X64Reg::Rax.sub_ty(instr.meta).sub_ty(instr.meta)); - sink.push(X64MCInstr::with2(Mnemonic::Mov, tmp.clone(), op1)); - sink.push(X64MCInstr::with2(Mnemonic::Movzx, tmp.clone(), op2)); + sink.push(X64MCInstr::with2(mnemonic, tmp.clone(), op1)); sink.push(X64MCInstr::with2(Mnemonic::Mov, out, tmp)); } else { let tmp = Operand::Reg(X64Reg::Rax.sub_ty(instr.meta).sub_ty(instr.meta)); - if op1 == out { - sink.push(X64MCInstr::with2(Mnemonic::Mov, op1, op2)); - } else { - sink.push(X64MCInstr::with2(Mnemonic::Mov, tmp.clone(), op1)); - sink.push(X64MCInstr::with2(Mnemonic::Mov, tmp.clone(), op2)); - sink.push(X64MCInstr::with2(Mnemonic::Mov, out, tmp)); - } + sink.push(X64MCInstr::with2(Mnemonic::Mov, tmp.clone(), op1)); + sink.push(X64MCInstr::with2(Mnemonic::Mov, out, tmp.clone())); } } diff --git a/src/Target/x64/optimizer.rs b/src/Target/x64/optimizer.rs index d32b6b93..516bcb8f 100644 --- a/src/Target/x64/optimizer.rs +++ b/src/Target/x64/optimizer.rs @@ -154,25 +154,25 @@ fn X64MergeAdd(instr0: &X64MCInstr, instr1: &X64MCInstr, instr2: &X64MCInstr) -> return None; } - let mut out = instr2.op1.clone(); - let mut ls = instr0.op2.clone(); - let mut rs = instr1.op2.clone(); + let out = instr2.op1.clone(); + let ls = instr0.op2.clone(); + let rs = instr1.op2.clone(); if let Some(Operand::Reg(reg)) = out { if !(reg.is_gr32() || reg.is_gr64()) { - out = Some(Operand::Reg(reg.sub16())) + return None; } } if let Some(Operand::Reg(reg)) = ls { if !(reg.is_gr32() || reg.is_gr64()) { - ls = Some(Operand::Reg(reg.sub64())) + return None; } } if let Some(Operand::Reg(reg)) = rs { if !(reg.is_gr32() || reg.is_gr64()) { - rs = Some(Operand::Reg(reg.sub64())) + return None; } } diff --git a/src/Target/x64/reg_alloc.rs b/src/Target/x64/reg_alloc.rs index 17365a3d..355c2b9b 100644 --- a/src/Target/x64/reg_alloc.rs +++ b/src/Target/x64/reg_alloc.rs @@ -109,7 +109,7 @@ pub(crate) fn x64_alloc_stack(alloc: &mut Allocator, ty: TypeMetadata) -> VarLoc alloc.epilog = true; let ret = VarLocation::Mem(alloc.stack_off, ty); - alloc.stack_off += ty.byteSize() as i64; + alloc.stack_off += 8; // alignment ret }