From 92186a27743732964b5cf3be339fd568da2aa4ba Mon Sep 17 00:00:00 2001 From: Tobias Hartmann Date: Mon, 7 Oct 2024 07:58:01 +0000 Subject: [PATCH] 8341612: [BACKOUT] 8338442: AArch64: Clean up IndOffXX type and let legitimize_address() fix out-of-range operands Reviewed-by: chagedorn --- src/hotspot/cpu/aarch64/aarch64.ad | 393 ++++++++++++++---- src/hotspot/cpu/aarch64/aarch64_vector.ad | 16 +- src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 | 2 +- src/hotspot/cpu/aarch64/ad_encode.m4 | 8 +- src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad | 2 +- src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad | 2 +- .../compiler/c2/TestUnalignedAccess.java | 61 +-- 7 files changed, 361 insertions(+), 123 deletions(-) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 7d2a35cefd8..c7cae54d14c 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -2721,7 +2721,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, { Address addr = mem2address(opcode, base, index, scale, disp); if (addr.getMode() == Address::base_plus_offset) { - // Fix up any out-of-range offsets. + /* Fix up any out-of-range offsets. */ assert_different_registers(rscratch1, base); assert_different_registers(rscratch1, reg); addr = __ legitimize_address(addr, size_in_memory, rscratch1); @@ -2762,11 +2762,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, int opcode, Register base, int index, int size, int disp) { if (index == -1) { - // Fix up any out-of-range offsets. - assert_different_registers(rscratch1, base); - Address addr = Address(base, disp); - addr = __ legitimize_address(addr, (1 << T), rscratch1); - (masm->*insn)(reg, T, addr); + (masm->*insn)(reg, T, Address(base, disp)); } else { assert(disp == 0, "unsupported address mode"); (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size))); @@ -2821,7 +2817,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2829,7 +2825,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2837,7 +2833,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2845,7 +2841,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2853,7 +2849,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2861,7 +2857,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2869,7 +2865,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2877,7 +2873,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2885,7 +2881,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{ + enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2893,7 +2889,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2901,7 +2897,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2909,7 +2905,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -2917,7 +2913,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{ + enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2925,7 +2921,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{ + enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -2933,7 +2929,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb(iRegI src, memory mem) %{ + enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2941,14 +2937,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0(memory mem) %{ + enc_class aarch64_enc_strb0(memory1 mem) %{ loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strh(iRegI src, memory mem) %{ + enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2956,14 +2952,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strh0(memory mem) %{ + enc_class aarch64_enc_strh0(memory2 mem) %{ loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strw(iRegI src, memory mem) %{ + enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2971,14 +2967,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strw0(memory mem) %{ + enc_class aarch64_enc_strw0(memory4 mem) %{ loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_str(iRegL src, memory mem) %{ + enc_class aarch64_enc_str(iRegL src, memory8 mem) %{ Register src_reg = as_Register($src$$reg); // we sometimes get asked to store the stack pointer into the // current thread -- we cannot do that directly on AArch64 @@ -2993,14 +2989,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_str0(memory mem) %{ + enc_class aarch64_enc_str0(memory8 mem) %{ loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strs(vRegF src, memory mem) %{ + enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -3008,7 +3004,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strd(vRegD src, memory mem) %{ + enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -3016,7 +3012,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0_ordered(memory mem) %{ + enc_class aarch64_enc_strb0_ordered(memory4 mem) %{ __ membar(Assembler::StoreStore); loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -3218,7 +3214,7 @@ encode %{ // synchronized read/update encodings - enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{ + enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{ Register dst_reg = as_Register($dst$$reg); Register base = as_Register($mem$$base); int index = $mem$$index; @@ -3246,7 +3242,7 @@ encode %{ } %} - enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{ + enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{ Register src_reg = as_Register($src$$reg); Register base = as_Register($mem$$base); int index = $mem$$index; @@ -4174,10 +4170,60 @@ operand immIU7() interface(CONST_INTER); %} -// Offset for immediate loads and stores +// Offset for scaled or unscaled immediate loads and stores operand immIOffset() %{ - predicate(n->get_int() >= -256 && n->get_int() <= 65520); + predicate(Address::offset_ok_for_immed(n->get_int(), 0)); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immIOffset1() +%{ + predicate(Address::offset_ok_for_immed(n->get_int(), 0)); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immIOffset2() +%{ + predicate(Address::offset_ok_for_immed(n->get_int(), 1)); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immIOffset4() +%{ + predicate(Address::offset_ok_for_immed(n->get_int(), 2)); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immIOffset8() +%{ + predicate(Address::offset_ok_for_immed(n->get_int(), 3)); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immIOffset16() +%{ + predicate(Address::offset_ok_for_immed(n->get_int(), 4)); match(ConI); op_cost(0); @@ -4195,6 +4241,56 @@ operand immLOffset() interface(CONST_INTER); %} +operand immLoffset1() +%{ + predicate(Address::offset_ok_for_immed(n->get_long(), 0)); + match(ConL); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immLoffset2() +%{ + predicate(Address::offset_ok_for_immed(n->get_long(), 1)); + match(ConL); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immLoffset4() +%{ + predicate(Address::offset_ok_for_immed(n->get_long(), 2)); + match(ConL); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immLoffset8() +%{ + predicate(Address::offset_ok_for_immed(n->get_long(), 3)); + match(ConL); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immLoffset16() +%{ + predicate(Address::offset_ok_for_immed(n->get_long(), 4)); + match(ConL); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + // 5 bit signed long integer operand immL5() %{ @@ -5107,7 +5203,21 @@ operand indIndex(iRegP reg, iRegL lreg) %} %} -operand indOffI(iRegP reg, immIOffset off) +operand indOffI1(iRegP reg, immIOffset1 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffI2(iRegP reg, immIOffset2 off) %{ constraint(ALLOC_IN_RC(ptr_reg)); match(AddP reg off); @@ -5121,7 +5231,105 @@ operand indOffI(iRegP reg, immIOffset off) %} %} -operand indOffL(iRegP reg, immLOffset off) +operand indOffI4(iRegP reg, immIOffset4 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffI8(iRegP reg, immIOffset8 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffI16(iRegP reg, immIOffset16 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffL1(iRegP reg, immLoffset1 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffL2(iRegP reg, immLoffset2 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffL4(iRegP reg, immLoffset4 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffL8(iRegP reg, immLoffset8 off) +%{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP reg off); + op_cost(0); + format %{ "[$reg, $off]" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0xffffffff); + scale(0x0); + disp($off); + %} +%} + +operand indOffL16(iRegP reg, immLoffset16 off) %{ constraint(ALLOC_IN_RC(ptr_reg)); match(AddP reg off); @@ -5497,7 +5705,10 @@ operand iRegL2P(iRegL reg) %{ interface(REG_INTER) %} -opclass vmem(indirect, indIndex, indOffI, indOffL, indOffIN, indOffLN); +opclass vmem2(indirect, indIndex, indOffI2, indOffL2); +opclass vmem4(indirect, indIndex, indOffI4, indOffL4); +opclass vmem8(indirect, indIndex, indOffI8, indOffL8); +opclass vmem16(indirect, indIndex, indOffI16, indOffL16); //----------OPERAND CLASSES---------------------------------------------------- // Operand Classes are groups of operands that are used as to simplify @@ -5509,9 +5720,23 @@ opclass vmem(indirect, indIndex, indOffI, indOffL, indOffIN, indOffLN); // memory is used to define read/write location for load/store // instruction defs. we can turn a memory op into an Address -opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, - indOffLN, indirectX2P, indOffX2P); +opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P); + +opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P); + +opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); + +opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); + +// All of the memory operands. For the pipeline description. +opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, + indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); + // iRegIorL2I is used for src inputs in rules for 32 bit int (I) // operations. it allows the src to be either an iRegI or a (ConvL2I @@ -6213,7 +6438,7 @@ define %{ // Load Instructions // Load Byte (8 bit signed) -instruct loadB(iRegINoSp dst, memory mem) +instruct loadB(iRegINoSp dst, memory1 mem) %{ match(Set dst (LoadB mem)); predicate(!needs_acquiring_load(n)); @@ -6227,7 +6452,7 @@ instruct loadB(iRegINoSp dst, memory mem) %} // Load Byte (8 bit signed) into long -instruct loadB2L(iRegLNoSp dst, memory mem) +instruct loadB2L(iRegLNoSp dst, memory1 mem) %{ match(Set dst (ConvI2L (LoadB mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6241,7 +6466,7 @@ instruct loadB2L(iRegLNoSp dst, memory mem) %} // Load Byte (8 bit unsigned) -instruct loadUB(iRegINoSp dst, memory mem) +instruct loadUB(iRegINoSp dst, memory1 mem) %{ match(Set dst (LoadUB mem)); predicate(!needs_acquiring_load(n)); @@ -6255,7 +6480,7 @@ instruct loadUB(iRegINoSp dst, memory mem) %} // Load Byte (8 bit unsigned) into long -instruct loadUB2L(iRegLNoSp dst, memory mem) +instruct loadUB2L(iRegLNoSp dst, memory1 mem) %{ match(Set dst (ConvI2L (LoadUB mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6269,7 +6494,7 @@ instruct loadUB2L(iRegLNoSp dst, memory mem) %} // Load Short (16 bit signed) -instruct loadS(iRegINoSp dst, memory mem) +instruct loadS(iRegINoSp dst, memory2 mem) %{ match(Set dst (LoadS mem)); predicate(!needs_acquiring_load(n)); @@ -6283,7 +6508,7 @@ instruct loadS(iRegINoSp dst, memory mem) %} // Load Short (16 bit signed) into long -instruct loadS2L(iRegLNoSp dst, memory mem) +instruct loadS2L(iRegLNoSp dst, memory2 mem) %{ match(Set dst (ConvI2L (LoadS mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6297,7 +6522,7 @@ instruct loadS2L(iRegLNoSp dst, memory mem) %} // Load Char (16 bit unsigned) -instruct loadUS(iRegINoSp dst, memory mem) +instruct loadUS(iRegINoSp dst, memory2 mem) %{ match(Set dst (LoadUS mem)); predicate(!needs_acquiring_load(n)); @@ -6311,7 +6536,7 @@ instruct loadUS(iRegINoSp dst, memory mem) %} // Load Short/Char (16 bit unsigned) into long -instruct loadUS2L(iRegLNoSp dst, memory mem) +instruct loadUS2L(iRegLNoSp dst, memory2 mem) %{ match(Set dst (ConvI2L (LoadUS mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6325,7 +6550,7 @@ instruct loadUS2L(iRegLNoSp dst, memory mem) %} // Load Integer (32 bit signed) -instruct loadI(iRegINoSp dst, memory mem) +instruct loadI(iRegINoSp dst, memory4 mem) %{ match(Set dst (LoadI mem)); predicate(!needs_acquiring_load(n)); @@ -6339,7 +6564,7 @@ instruct loadI(iRegINoSp dst, memory mem) %} // Load Integer (32 bit signed) into long -instruct loadI2L(iRegLNoSp dst, memory mem) +instruct loadI2L(iRegLNoSp dst, memory4 mem) %{ match(Set dst (ConvI2L (LoadI mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6353,7 +6578,7 @@ instruct loadI2L(iRegLNoSp dst, memory mem) %} // Load Integer (32 bit unsigned) into long -instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask) +instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask) %{ match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load())); @@ -6367,7 +6592,7 @@ instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask) %} // Load Long (64 bit signed) -instruct loadL(iRegLNoSp dst, memory mem) +instruct loadL(iRegLNoSp dst, memory8 mem) %{ match(Set dst (LoadL mem)); predicate(!needs_acquiring_load(n)); @@ -6381,7 +6606,7 @@ instruct loadL(iRegLNoSp dst, memory mem) %} // Load Range -instruct loadRange(iRegINoSp dst, memory mem) +instruct loadRange(iRegINoSp dst, memory4 mem) %{ match(Set dst (LoadRange mem)); @@ -6394,7 +6619,7 @@ instruct loadRange(iRegINoSp dst, memory mem) %} // Load Pointer -instruct loadP(iRegPNoSp dst, memory mem) +instruct loadP(iRegPNoSp dst, memory8 mem) %{ match(Set dst (LoadP mem)); predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0)); @@ -6408,7 +6633,7 @@ instruct loadP(iRegPNoSp dst, memory mem) %} // Load Compressed Pointer -instruct loadN(iRegNNoSp dst, memory mem) +instruct loadN(iRegNNoSp dst, memory4 mem) %{ match(Set dst (LoadN mem)); predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0); @@ -6422,7 +6647,7 @@ instruct loadN(iRegNNoSp dst, memory mem) %} // Load Klass Pointer -instruct loadKlass(iRegPNoSp dst, memory mem) +instruct loadKlass(iRegPNoSp dst, memory8 mem) %{ match(Set dst (LoadKlass mem)); predicate(!needs_acquiring_load(n)); @@ -6436,7 +6661,7 @@ instruct loadKlass(iRegPNoSp dst, memory mem) %} // Load Narrow Klass Pointer -instruct loadNKlass(iRegNNoSp dst, memory mem) +instruct loadNKlass(iRegNNoSp dst, memory4 mem) %{ match(Set dst (LoadNKlass mem)); predicate(!needs_acquiring_load(n)); @@ -6450,7 +6675,7 @@ instruct loadNKlass(iRegNNoSp dst, memory mem) %} // Load Float -instruct loadF(vRegF dst, memory mem) +instruct loadF(vRegF dst, memory4 mem) %{ match(Set dst (LoadF mem)); predicate(!needs_acquiring_load(n)); @@ -6464,7 +6689,7 @@ instruct loadF(vRegF dst, memory mem) %} // Load Double -instruct loadD(vRegD dst, memory mem) +instruct loadD(vRegD dst, memory8 mem) %{ match(Set dst (LoadD mem)); predicate(!needs_acquiring_load(n)); @@ -6668,7 +6893,7 @@ instruct loadConD(vRegD dst, immD con) %{ // Store Instructions // Store CMS card-mark Immediate -instruct storeimmCM0(immI0 zero, memory mem) +instruct storeimmCM0(immI0 zero, memory1 mem) %{ match(Set mem (StoreCM mem zero)); @@ -6683,7 +6908,7 @@ instruct storeimmCM0(immI0 zero, memory mem) // Store CMS card-mark Immediate with intervening StoreStore // needed when using CMS with no conditional card marking -instruct storeimmCM0_ordered(immI0 zero, memory mem) +instruct storeimmCM0_ordered(immI0 zero, memory1 mem) %{ match(Set mem (StoreCM mem zero)); @@ -6698,7 +6923,7 @@ instruct storeimmCM0_ordered(immI0 zero, memory mem) %} // Store Byte -instruct storeB(iRegIorL2I src, memory mem) +instruct storeB(iRegIorL2I src, memory1 mem) %{ match(Set mem (StoreB mem src)); predicate(!needs_releasing_store(n)); @@ -6712,7 +6937,7 @@ instruct storeB(iRegIorL2I src, memory mem) %} -instruct storeimmB0(immI0 zero, memory mem) +instruct storeimmB0(immI0 zero, memory1 mem) %{ match(Set mem (StoreB mem zero)); predicate(!needs_releasing_store(n)); @@ -6726,7 +6951,7 @@ instruct storeimmB0(immI0 zero, memory mem) %} // Store Char/Short -instruct storeC(iRegIorL2I src, memory mem) +instruct storeC(iRegIorL2I src, memory2 mem) %{ match(Set mem (StoreC mem src)); predicate(!needs_releasing_store(n)); @@ -6739,7 +6964,7 @@ instruct storeC(iRegIorL2I src, memory mem) ins_pipe(istore_reg_mem); %} -instruct storeimmC0(immI0 zero, memory mem) +instruct storeimmC0(immI0 zero, memory2 mem) %{ match(Set mem (StoreC mem zero)); predicate(!needs_releasing_store(n)); @@ -6754,7 +6979,7 @@ instruct storeimmC0(immI0 zero, memory mem) // Store Integer -instruct storeI(iRegIorL2I src, memory mem) +instruct storeI(iRegIorL2I src, memory4 mem) %{ match(Set mem(StoreI mem src)); predicate(!needs_releasing_store(n)); @@ -6767,7 +6992,7 @@ instruct storeI(iRegIorL2I src, memory mem) ins_pipe(istore_reg_mem); %} -instruct storeimmI0(immI0 zero, memory mem) +instruct storeimmI0(immI0 zero, memory4 mem) %{ match(Set mem(StoreI mem zero)); predicate(!needs_releasing_store(n)); @@ -6781,7 +7006,7 @@ instruct storeimmI0(immI0 zero, memory mem) %} // Store Long (64 bit signed) -instruct storeL(iRegL src, memory mem) +instruct storeL(iRegL src, memory8 mem) %{ match(Set mem (StoreL mem src)); predicate(!needs_releasing_store(n)); @@ -6795,7 +7020,7 @@ instruct storeL(iRegL src, memory mem) %} // Store Long (64 bit signed) -instruct storeimmL0(immL0 zero, memory mem) +instruct storeimmL0(immL0 zero, memory8 mem) %{ match(Set mem (StoreL mem zero)); predicate(!needs_releasing_store(n)); @@ -6809,7 +7034,7 @@ instruct storeimmL0(immL0 zero, memory mem) %} // Store Pointer -instruct storeP(iRegP src, memory mem) +instruct storeP(iRegP src, memory8 mem) %{ match(Set mem (StoreP mem src)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -6823,7 +7048,7 @@ instruct storeP(iRegP src, memory mem) %} // Store Pointer -instruct storeimmP0(immP0 zero, memory mem) +instruct storeimmP0(immP0 zero, memory8 mem) %{ match(Set mem (StoreP mem zero)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -6837,7 +7062,7 @@ instruct storeimmP0(immP0 zero, memory mem) %} // Store Compressed Pointer -instruct storeN(iRegN src, memory mem) +instruct storeN(iRegN src, memory4 mem) %{ match(Set mem (StoreN mem src)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -6850,7 +7075,7 @@ instruct storeN(iRegN src, memory mem) ins_pipe(istore_reg_mem); %} -instruct storeImmN0(immN0 zero, memory mem) +instruct storeImmN0(immN0 zero, memory4 mem) %{ match(Set mem (StoreN mem zero)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -6864,7 +7089,7 @@ instruct storeImmN0(immN0 zero, memory mem) %} // Store Float -instruct storeF(vRegF src, memory mem) +instruct storeF(vRegF src, memory4 mem) %{ match(Set mem (StoreF mem src)); predicate(!needs_releasing_store(n)); @@ -6881,7 +7106,7 @@ instruct storeF(vRegF src, memory mem) // implement storeImmF0 and storeFImmPacked // Store Double -instruct storeD(vRegD src, memory mem) +instruct storeD(vRegD src, memory8 mem) %{ match(Set mem (StoreD mem src)); predicate(!needs_releasing_store(n)); @@ -6895,7 +7120,7 @@ instruct storeD(vRegD src, memory mem) %} // Store Compressed Klass Pointer -instruct storeNKlass(iRegN src, memory mem) +instruct storeNKlass(iRegN src, memory4 mem) %{ predicate(!needs_releasing_store(n)); match(Set mem (StoreNKlass mem src)); @@ -6914,7 +7139,7 @@ instruct storeNKlass(iRegN src, memory mem) // prefetch instructions // Must be safe to execute with invalid address (cannot fault). -instruct prefetchalloc( memory mem ) %{ +instruct prefetchalloc( memory8 mem ) %{ match(PrefetchAllocation mem); ins_cost(INSN_COST); @@ -7486,7 +7711,7 @@ instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{ ins_pipe(pipe_class_default); %} -instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{ +instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{ match(Set dst (PopCountI (LoadI mem))); effect(TEMP tmp); ins_cost(INSN_COST * 13); @@ -7527,7 +7752,7 @@ instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{ ins_pipe(pipe_class_default); %} -instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{ +instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{ match(Set dst (PopCountL (LoadL mem))); effect(TEMP tmp); ins_cost(INSN_COST * 13); @@ -16680,7 +16905,7 @@ instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, ins_pipe(pipe_slow); %} -instruct compressBitsI_memcon(iRegINoSp dst, memory mem, immI mask, +instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (CompressBits (LoadI mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -16717,7 +16942,7 @@ instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask, ins_pipe(pipe_slow); %} -instruct compressBitsL_memcon(iRegLNoSp dst, memory mem, immL mask, +instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (CompressBits (LoadL mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -16754,7 +16979,7 @@ instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, ins_pipe(pipe_slow); %} -instruct expandBitsI_memcon(iRegINoSp dst, memory mem, immI mask, +instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (ExpandBits (LoadI mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -16792,7 +17017,7 @@ instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask, %} -instruct expandBitsL_memcon(iRegINoSp dst, memory mem, immL mask, +instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (ExpandBits (LoadL mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad index cdbc4103df8..0d3a240cecf 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector.ad +++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad @@ -345,7 +345,7 @@ source %{ // ------------------------------ Vector load/store ---------------------------- // Load Vector (16 bits) -instruct loadV2(vReg dst, vmem mem) %{ +instruct loadV2(vReg dst, vmem2 mem) %{ predicate(n->as_LoadVector()->memory_size() == 2); match(Set dst (LoadVector mem)); format %{ "loadV2 $dst, $mem\t# vector (16 bits)" %} @@ -354,7 +354,7 @@ instruct loadV2(vReg dst, vmem mem) %{ %} // Store Vector (16 bits) -instruct storeV2(vReg src, vmem mem) %{ +instruct storeV2(vReg src, vmem2 mem) %{ predicate(n->as_StoreVector()->memory_size() == 2); match(Set mem (StoreVector mem src)); format %{ "storeV2 $mem, $src\t# vector (16 bits)" %} @@ -363,7 +363,7 @@ instruct storeV2(vReg src, vmem mem) %{ %} // Load Vector (32 bits) -instruct loadV4(vReg dst, vmem mem) %{ +instruct loadV4(vReg dst, vmem4 mem) %{ predicate(n->as_LoadVector()->memory_size() == 4); match(Set dst (LoadVector mem)); format %{ "loadV4 $dst, $mem\t# vector (32 bits)" %} @@ -372,7 +372,7 @@ instruct loadV4(vReg dst, vmem mem) %{ %} // Store Vector (32 bits) -instruct storeV4(vReg src, vmem mem) %{ +instruct storeV4(vReg src, vmem4 mem) %{ predicate(n->as_StoreVector()->memory_size() == 4); match(Set mem (StoreVector mem src)); format %{ "storeV4 $mem, $src\t# vector (32 bits)" %} @@ -381,7 +381,7 @@ instruct storeV4(vReg src, vmem mem) %{ %} // Load Vector (64 bits) -instruct loadV8(vReg dst, vmem mem) %{ +instruct loadV8(vReg dst, vmem8 mem) %{ predicate(n->as_LoadVector()->memory_size() == 8); match(Set dst (LoadVector mem)); format %{ "loadV8 $dst, $mem\t# vector (64 bits)" %} @@ -390,7 +390,7 @@ instruct loadV8(vReg dst, vmem mem) %{ %} // Store Vector (64 bits) -instruct storeV8(vReg src, vmem mem) %{ +instruct storeV8(vReg src, vmem8 mem) %{ predicate(n->as_StoreVector()->memory_size() == 8); match(Set mem (StoreVector mem src)); format %{ "storeV8 $mem, $src\t# vector (64 bits)" %} @@ -399,7 +399,7 @@ instruct storeV8(vReg src, vmem mem) %{ %} // Load Vector (128 bits) -instruct loadV16(vReg dst, vmem mem) %{ +instruct loadV16(vReg dst, vmem16 mem) %{ predicate(n->as_LoadVector()->memory_size() == 16); match(Set dst (LoadVector mem)); format %{ "loadV16 $dst, $mem\t# vector (128 bits)" %} @@ -408,7 +408,7 @@ instruct loadV16(vReg dst, vmem mem) %{ %} // Store Vector (128 bits) -instruct storeV16(vReg src, vmem mem) %{ +instruct storeV16(vReg src, vmem16 mem) %{ predicate(n->as_StoreVector()->memory_size() == 16); match(Set mem (StoreVector mem src)); format %{ "storeV16 $mem, $src\t# vector (128 bits)" %} diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 index 020a75b51fa..99708e9ef31 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 +++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 @@ -338,7 +338,7 @@ dnl VECTOR_LOAD_STORE($1, $2, $3, $4, $5 ) dnl VECTOR_LOAD_STORE(type, nbytes, arg_name, nbits, size) define(`VECTOR_LOAD_STORE', ` // ifelse(load, $1, Load, Store) Vector ($4 bits) -instruct $1V$2(vReg $3, vmem mem) %{ +instruct $1V$2(vReg $3, vmem$2 mem) %{ predicate(`n->as_'ifelse(load, $1, Load, Store)Vector()->memory_size() == $2); match(Set ifelse(load, $1, dst (LoadVector mem), mem (StoreVector mem src))); format %{ "$1V$2 ifelse(load, $1, `$dst, $mem', `$mem, $src')\t# vector ($4 bits)" %} diff --git a/src/hotspot/cpu/aarch64/ad_encode.m4 b/src/hotspot/cpu/aarch64/ad_encode.m4 index e3d8ea661b6..008dbd2c936 100644 --- a/src/hotspot/cpu/aarch64/ad_encode.m4 +++ b/src/hotspot/cpu/aarch64/ad_encode.m4 @@ -34,7 +34,7 @@ define(access, ` define(load,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2($1 dst, memory mem) %{dnl + enc_class aarch64_enc_$2($1 dst, memory$5 mem) %{dnl access(dst,$2,$3,$4,$5)')dnl load(iRegI,ldrsbw,,,1) load(iRegI,ldrsb,,,1) @@ -53,12 +53,12 @@ load(vRegD,ldrd,Float,,8) define(STORE,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2($1 src, memory mem) %{dnl + enc_class aarch64_enc_$2($1 src, memory$5 mem) %{dnl access(src,$2,$3,$4,$5)')dnl define(STORE0,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2`'0(memory mem) %{ + enc_class aarch64_enc_$2`'0(memory$4 mem) %{ choose(masm,zr,$2,$mem->opcode(), as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl STORE(iRegI,strb,,,1) @@ -82,7 +82,7 @@ STORE(vRegD,strd,Float,,8) // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0_ordered(memory mem) %{ + enc_class aarch64_enc_strb0_ordered(memory4 mem) %{ __ membar(Assembler::StoreStore); loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad index 5e690a8e47b..6e401724baa 100644 --- a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad @@ -51,7 +51,7 @@ static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, %} // Load Pointer -instruct xLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr) +instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad index 1510b42bfe9..56d45384779 100644 --- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad @@ -100,7 +100,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address %} // Load Pointer -instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr) +instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); diff --git a/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java b/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java index 033ea49e609..d05dbad4a73 100644 --- a/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java +++ b/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java @@ -46,11 +46,20 @@ public class TestUnalignedAccess { static final Unsafe UNSAFE = Unsafe.getUnsafe(); static void sink(int x) {} + public static long lseed = 1; + public static int iseed = 2; + public static short sseed = 3; + public static byte bseed = 4; + public static long lres = lseed; + public static int ires = iseed; + public static short sres = sseed; + public static byte bres = bseed; + public static class TestLong { private static final byte[] BYTES = new byte[LEN]; private static final long rawdata = 0xbeef; - private static final long lseed = 1; + private static final long data; static { sink(2); @@ -60,10 +69,13 @@ public static class TestLong { // 1030 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putLongUnaligned(BYTES, 1030, rawdata); + lres += UNSAFE.getLongUnaligned(BYTES, 1030); // 127 can be encoded into simm9 field. - UNSAFE.putLongUnaligned(BYTES, 127, rawdata+lseed); + UNSAFE.putLongUnaligned(BYTES, 127, lres); + lres += UNSAFE.getLongUnaligned(BYTES, 127); // 1096 can be encoded into uimm12 field. - UNSAFE.putLongUnaligned(BYTES, 1096, rawdata-lseed); + UNSAFE.putLongUnaligned(BYTES, 1096, lres); + data = UNSAFE.getLongUnaligned(BYTES, 1096); } } @@ -72,7 +84,7 @@ public static class TestInt { private static final byte[] BYTES = new byte[LEN]; private static final int rawdata = 0xbeef; - private static final int iseed = 2; + private static final int data; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -81,10 +93,13 @@ public static class TestInt { // 274 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putIntUnaligned(BYTES, 274, rawdata); + ires += UNSAFE.getIntUnaligned(BYTES, 274); // 255 can be encoded into simm9 field. - UNSAFE.putIntUnaligned(BYTES, 255, rawdata + iseed); + UNSAFE.putIntUnaligned(BYTES, 255, ires); + ires += UNSAFE.getIntUnaligned(BYTES, 255); // 528 can be encoded into uimm12 field. - UNSAFE.putIntUnaligned(BYTES, 528, rawdata - iseed); + UNSAFE.putIntUnaligned(BYTES, 528, ires); + data = UNSAFE.getIntUnaligned(BYTES, 528); } } @@ -93,7 +108,7 @@ public static class TestShort { private static final byte[] BYTES = new byte[LEN]; private static final short rawdata = (short)0xbeef; - private static final short sseed = 3; + private static final short data; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -102,10 +117,13 @@ public static class TestShort { // 257 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putShortUnaligned(BYTES, 257, rawdata); + sres = (short) (sres + UNSAFE.getShortUnaligned(BYTES, 257)); // 253 can be encoded into simm9 field. - UNSAFE.putShortUnaligned(BYTES, 253, (short) (rawdata + sseed)); + UNSAFE.putShortUnaligned(BYTES, 253, sres); + sres = (short) (sres + UNSAFE.getShortUnaligned(BYTES, 253)); // 272 can be encoded into uimm12 field. - UNSAFE.putShortUnaligned(BYTES, 272, (short) (rawdata - sseed)); + UNSAFE.putShortUnaligned(BYTES, 272, sres); + data = UNSAFE.getShortUnaligned(BYTES, 272); } } @@ -114,7 +132,7 @@ public static class TestByte { private static final byte[] BYTES = new byte[LEN]; private static final byte rawdata = (byte)0x3f; - private static final byte bseed = 4; + private static final byte data; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -123,34 +141,29 @@ public static class TestByte { // 272 can be encoded into simm9 field. UNSAFE.putByte(BYTES, 272, rawdata); + bres = (byte) (bres + UNSAFE.getByte(BYTES, 272)); // 53 can be encoded into simm9 field. - UNSAFE.putByte(BYTES, 53, (byte) (rawdata + bseed)); + UNSAFE.putByte(BYTES, 53, bres); + bres = (byte) (bres + UNSAFE.getByte(BYTES, 53)); // 1027 can be encoded into uimm12 field. - UNSAFE.putByte(BYTES, 1027, (byte) (rawdata - bseed)); + UNSAFE.putByte(BYTES, 1027, bres); + data = UNSAFE.getByte(BYTES, 1027); } } static void test() { TestLong ta = new TestLong(); - Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 1030), ta.rawdata, "putUnaligned long failed!"); - Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 127), ta.rawdata + ta.lseed, "putUnaligned long failed!"); - Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 1096), ta.rawdata - ta.lseed, "putUnaligned long failed!"); + Asserts.assertEquals(ta.data, (ta.rawdata + lseed) * 2, "putUnaligned long failed!"); TestInt tb = new TestInt(); - Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 274), tb.rawdata, "putUnaligned int failed!"); - Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 255), tb.rawdata + tb.iseed, "putUnaligned int failed!"); - Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 528), tb.rawdata - tb.iseed, "putUnaligned int failed!"); + Asserts.assertEquals(tb.data, (tb.rawdata + iseed) * 2, "putUnaligned int failed!"); TestShort tc = new TestShort(); - Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 257), tc.rawdata, "putUnaligned short failed!"); - Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 253), (short) (tc.rawdata + tc.sseed), "putUnaligned short failed!"); - Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 272), (short) (tc.rawdata - tc.sseed), "putUnaligned short failed!"); + Asserts.assertEquals(tc.data, (short) (((short) (tc.rawdata + sseed)) * 2), "putUnaligned short failed!"); TestByte td = new TestByte(); - Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 272), td.rawdata, "put byte failed!"); - Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 53), (byte) (td.rawdata + td.bseed), "put byte failed!"); - Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 1027), (byte) (td.rawdata - td.bseed), "put byte failed!"); + Asserts.assertEquals(td.data, (byte) (((byte) (td.rawdata + bseed)) * 2), "put byte failed!"); } public static void main(String[] strArr) {