diff --git a/src/IR/func.rs b/src/IR/func.rs index 4181230e..1c28a87b 100644 --- a/src/IR/func.rs +++ b/src/IR/func.rs @@ -116,7 +116,12 @@ impl Function { let mut fmt = String::new(); for (name, metadata) in &self.ty.args { - fmt += &format!("{} %{},", metadata, name); + fmt += &format!("{} %{}, ", metadata, name); + } + + if self.ty.args.len() > 0 { + fmt.remove(fmt.chars().count() - 1); // The last space + fmt.remove(fmt.chars().count() - 1); // The last comma } fmt @@ -130,11 +135,12 @@ impl Function { let mut fmt = String::new(); for (name, metadata) in &self.ty.args { - fmt += &format!("{} %{},", metadata, name); + fmt += &format!("{} %{}, ", metadata, name); } - if self.ty.args.len() != 0 { - fmt.remove(fmt.len() - 2); // The last comma + if self.ty.args.len() > 0 { + fmt.remove(fmt.chars().count() - 1); // The last space + fmt.remove(fmt.chars().count() - 1); // The last comma } fmt diff --git a/src/Obj/wrapper.rs b/src/Obj/wrapper.rs index 29186b2f..1fcfe32b 100644 --- a/src/Obj/wrapper.rs +++ b/src/Obj/wrapper.rs @@ -257,7 +257,7 @@ impl ObjectBuilder { let mut offset = 0; if self.triple.getCallConv() == Ok(CallConv::WindowsFastCall) { - //addend = -1; + addend = -1; offset = -4; } else if self.triple.getCallConv() == Ok(CallConv::SystemV) { addend = 0; diff --git a/src/Target/x64/asm/instr.rs b/src/Target/x64/asm/instr.rs index 3540578a..af205820 100644 --- a/src/Target/x64/asm/instr.rs +++ b/src/Target/x64/asm/instr.rs @@ -54,7 +54,7 @@ impl Instr { else { None } } else { None }; - let (mut r, mut m, mut i, ibase, ibase8) = match self.mnemonic { + let (mut r, mut m, i, ibase, ibase8) = match self.mnemonic { Mnemonic::Add => (0x01, 0x03, 0, 0x81, 0x80), Mnemonic::Adc => (0x11, 0x03, 2, 0x81, 0x80), Mnemonic::Sub => (0x29, 0x2B, 5, 0x81, 0x80), @@ -66,7 +66,9 @@ impl Instr { }; if let Some(Operand::Reg(reg)) = &self.op1 { - if reg.is_gr8() { r -= 1; m -= 1; i -= 1; } + if reg.is_gr8() { + r -= 1; m -= 1; + } } (match self.op2.as_ref().expect("verifycation failed") { diff --git a/src/Target/x64/compilation/call.rs b/src/Target/x64/compilation/call.rs index 55c03697..51ea3eaf 100644 --- a/src/Target/x64/compilation/call.rs +++ b/src/Target/x64/compilation/call.rs @@ -6,6 +6,8 @@ pub(crate) fn CompileCall(call: &Call, Var>, registry: &mut T let mut asm = vec![]; + let mut to_pop = vec![]; + for reg in vec![x64Reg::Rcx, x64Reg::Rdx, x64Reg::Rsi, x64Reg::Rdi, x64Reg::Rsi] { // save mutable registers if !registry.backend.openUsableRegisters64.contains(®.boxed()) { let var = registry.backend.getVarByReg(reg.boxed()).cloned(); @@ -13,6 +15,7 @@ pub(crate) fn CompileCall(call: &Call, Var>, registry: &mut T if let Some(var) = var { if block.isVarUsedAfterNode(&boxed, &var) { asm.push(Instr::with1(Mnemonic::Push, Operand::Reg(reg.boxed()))); + to_pop.push(reg); } } } @@ -104,16 +107,8 @@ pub(crate) fn CompileCall(call: &Call, Var>, registry: &mut T registry.backend.insertVar(call.inner3.clone(), store); } - for reg in vec![x64Reg::Rcx, x64Reg::Rdx, x64Reg::Rsi, x64Reg::Rdi, x64Reg::Rsi] { // getback mutable registers - if !registry.backend.openUsableRegisters64.contains(®.boxed()) { - let var = registry.backend.getVarByReg(reg.boxed()).cloned(); - - if let Some(var) = var { - if block.isVarUsedAfterNode(&boxed, &var) { - asm.push(Instr::with1(Mnemonic::Pop, Operand::Reg(reg.boxed()))); - } - } - } + for reg in to_pop { + asm.push(Instr::with1(Mnemonic::Pop, Operand::Reg(reg.boxed()))); } asm diff --git a/tests/ir.rs b/tests/ir.rs new file mode 100644 index 00000000..733b558e --- /dev/null +++ b/tests/ir.rs @@ -0,0 +1,30 @@ +use ygen::prelude::*; + +#[test] +pub fn ir_optimization() { + let mut module = Module(); + + let mut builder = IRBuilder(); + + let other = module.add("cfunc", &FnTy(vec![TypeMetadata::i32, TypeMetadata::i32], TypeMetadata::i32)); + other.import(); + let other = other.clone(); + + let ty = FnTy(vec![TypeMetadata::i32, TypeMetadata::i32], TypeMetadata::i32); + + let func = module.add( + "add", &ty + ); + + func.extrn(); + + let entry = func.addBlock("entry"); + builder.positionAtEnd(entry); + + let val = builder.BuildCall( &other, vec![ty.arg(0), ty.arg(1)] ); + let val = builder.BuildAdd(val, ty.arg(0)); + + builder.BuildRet( val ); + + //assert_eq!(module.dump(), "define i32 @add(i32 %0, i32 %1) {\n entry:\n\t%2 = call i32 cfunc i32 %0 i32 %1 \n\tadd = %3 i32 %2, %0\n\tret i32 %3\n\n}\ndeclare i32 @cfunc(i32 %0, i32 %1)\n\n".to_string()); +} \ No newline at end of file diff --git a/tests/x64_instruction_encoding.rs b/tests/x64_instruction_encoding.rs new file mode 100644 index 00000000..cf611f83 --- /dev/null +++ b/tests/x64_instruction_encoding.rs @@ -0,0 +1,45 @@ +use ygen::{Optimizations::auto_max_optimize, Target::{instr::*, x64Reg, Reg}}; + +#[test] +pub fn test_mov() { + let instr = Instr::with2( + Mnemonic::Mov, + Operand::Reg(x64Reg::Rcx.boxed()), + Operand::Mem(MemOp { base: Some(x64Reg::R15.boxed()), index: None, scale: 1, displ: 5, rip: false }) + ); + + assert_eq!(instr.encode(), Ok((vec![0x49, 0x8B, 0x4F, 0x05], None))); + + let instr = Instr::with2( + Mnemonic::Mov, + Operand::Reg(x64Reg::R12b.boxed()), + Operand::Imm(12) + ); + + assert_eq!(instr.encode(), Ok((vec![0x41, 0xC6, 0xC4, 0x0C], None))); +} + +#[test] +pub fn test_ret() { + let instr = Instr::with0(Mnemonic::Ret); + + assert_eq!(instr.encode(), Ok((vec![0xC3], None))); +} + +#[test] +pub fn test_optimization() { + let mut instrs = vec![ + Instr::with2(Mnemonic::Mov, Operand::Reg(x64Reg::Rax.boxed()), Operand::Reg(x64Reg::Rcx.boxed())), + Instr::with2(Mnemonic::Add, Operand::Reg(x64Reg::Rax.boxed()), Operand::Reg(x64Reg::Rdx.boxed())), + Instr::with2(Mnemonic::Mov, Operand::Reg(x64Reg::Rcx.boxed()), Operand::Reg(x64Reg::Rax.boxed())), + ]; + + let expected_optimized = vec![ + Instr::with2(Mnemonic::Lea, Operand::Reg(x64Reg::Rax.boxed()), Operand::Mem(x64Reg::Rcx + x64Reg::Rdx)), + Instr::with2(Mnemonic::Mov, Operand::Reg(x64Reg::Rcx.boxed()), Operand::Reg(x64Reg::Rax.boxed())), + ]; + + auto_max_optimize(&mut instrs); + + assert_eq!(instrs, expected_optimized); +} \ No newline at end of file