diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8808ab80d0e..e4cd59ed84a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -65,6 +65,8 @@ jobs: - 'hs/tier1 compiler part 3' - 'hs/tier1 compiler not-xcomp' - 'hs/tier1 gc' + - 'hs/tier2_gc_shenandoah shenandoah tier2' + - 'hs/tier3_gc_shenandoah shenandoah tier3' - 'hs/tier1 runtime' - 'hs/tier1 serviceability' - 'lib-test/tier1' @@ -106,6 +108,14 @@ jobs: test-suite: 'test/hotspot/jtreg/:tier1_gc' debug-suffix: -debug + - test-name: 'hs/tier2_gc_shenandoah shenandoah tier2' + test-suite: 'test/hotspot/jtreg/:tier2_gc_shenandoah' + debug-suffix: -debug + + - test-name: 'hs/tier3_gc_shenandoah shenandoah tier3' + test-suite: 'test/hotspot/jtreg/:tier3_gc_shenandoah' + debug-suffix: -debug + - test-name: 'hs/tier1 runtime' test-suite: 'test/hotspot/jtreg/:tier1_runtime' debug-suffix: -debug diff --git a/.jcheck/conf b/.jcheck/conf index e2ca212ab3a..77c3aa1f63d 100644 --- a/.jcheck/conf +++ b/.jcheck/conf @@ -1,5 +1,5 @@ [general] -project=jdk-updates +project=shenandoah jbs=JDK version=21.0.3 @@ -22,7 +22,7 @@ ignore-tabs=.*\.gmk|Makefile message=Merge [checks "reviewers"] -reviewers=1 +committers=1 ignore=duke [checks "committer"] diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp index c02f93313b3..5fc25d616b8 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,10 +89,21 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LI LIR_Opr result = gen->new_register(T_INT); __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result)); + + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } return result; } } - return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + + LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + + if (ShenandoahCardBarrier && access.is_oop()) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } + + return result; } LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { @@ -119,6 +131,9 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result /* pre_val */); } + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), result); + } } return result; diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp index fe4df9b8c0d..3dbfd69caa4 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +32,7 @@ #include "gc/shenandoah/shenandoahRuntime.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "runtime/javaThread.hpp" @@ -77,6 +79,13 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec } } +void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register start, Register count, Register tmp, RegSet saved_regs) { + if (ShenandoahCardBarrier && is_oop) { + gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs); + } +} + void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, Register obj, Register pre_val, @@ -375,6 +384,26 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d } } +void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + __ lsr(obj, obj, CardTable::card_shift()); + + assert(CardTable::dirty_card_val() == 0, "must be"); + + __ load_byte_map_base(rscratch1); + + if (UseCondCardMark) { + Label L_already_dirty; + __ ldrb(rscratch2, Address(obj, rscratch1)); + __ cbz(rscratch2, L_already_dirty); + __ strb(zr, Address(obj, rscratch1)); + __ bind(L_already_dirty); + } else { + __ strb(zr, Address(obj, rscratch1)); + } +} + void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { bool on_oop = is_reference_type(type); @@ -411,6 +440,9 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet __ mov(new_val, val); } BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); + if (ShenandoahCardBarrier) { + store_check(masm, r3); + } } } @@ -595,6 +627,35 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, } } +void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register count, Register scratch, RegSet saved_regs) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + Label L_loop, L_done; + const Register end = count; + + // Zero count? Nothing to do. + __ cbz(count, L_done); + + // end = start + count << LogBytesPerHeapOop + // last element address to make inclusive + __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop))); + __ sub(end, end, BytesPerHeapOop); + __ lsr(start, start, CardTable::card_shift()); + __ lsr(end, end, CardTable::card_shift()); + + // number of bytes to copy + __ sub(count, end, start); + + __ load_byte_map_base(scratch); + __ add(start, start, scratch); + __ bind(L_loop); + __ strb(zr, Address(start, count)); + __ subs(count, count, 1); + __ br(Assembler::GE, L_loop); + __ bind(L_done); +} + #undef __ #ifdef COMPILER1 diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp index 375893702e1..f55a4b91d28 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,10 +56,16 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { bool tosca_live, bool expand_call); + void store_check(MacroAssembler* masm, Register obj); + void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg); void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg); void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators); + void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register count, + Register scratch, RegSet saved_regs); + public: void iu_barrier(MacroAssembler* masm, Register dst, Register tmp); @@ -74,6 +81,8 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register src, Register dst, Register count, RegSet saved_regs); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register start, Register count, Register tmp, RegSet saved_regs); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp2); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp index 6d9a1db1ed4..b64bdec74b8 100644 --- a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp @@ -107,11 +107,21 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess &access, LI __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result)); + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } + return result; } } - return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + + if (ShenandoahCardBarrier && access.is_oop()) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } + + return result; } LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess &access, LIRItem &value) { @@ -141,6 +151,10 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess &access, LIRIt if (ShenandoahSATBBarrier) { pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result); } + + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), result); + } } return result; diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp index ec91e86cd7c..9fc7544bd10 100644 --- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. - * Copyright (c) 2012, 2021 SAP SE. All rights reserved. + * Copyright (c) 2012, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "gc/shenandoah/shenandoahRuntime.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "interpreter/interpreter.hpp" #include "runtime/javaThread.hpp" #include "runtime/sharedRuntime.hpp" @@ -90,8 +91,6 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler *masm, void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count, Register preserve1, Register preserve2) { - __ block_comment("arraycopy_prologue (shenandoahgc) {"); - Register R11_tmp = R11_scratch1; assert_different_registers(src, dst, count, R11_tmp, noreg); @@ -114,6 +113,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec return; } + __ block_comment("arraycopy_prologue (shenandoahgc) {"); Label skip_prologue; // Fast path: Array is of length zero. @@ -187,6 +187,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec __ block_comment("} arraycopy_prologue (shenandoahgc)"); } +void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, + Register preserve) { + if (ShenandoahCardBarrier && is_reference_type(type)) { + __ block_comment("arraycopy_epilogue (shenandoahgc) {"); + gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve); + __ block_comment("} arraycopy_epilogue (shenandoahgc)"); + } +} + // The to-be-enqueued value can either be determined // - dynamically by passing the reference's address information (load mode) or // - statically by passing a register the value is stored in (preloaded mode) @@ -586,6 +596,25 @@ void ShenandoahBarrierSetAssembler::load_at( } } +void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set(); + CardTable* ct = ctbs->card_table(); + assert_different_registers(base, tmp, R0); + + if (ind_or_offs.is_constant()) { + __ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp); + } else { + __ add(base, ind_or_offs.as_register(), base); + } + + __ load_const_optimized(tmp, (address)ct->byte_map_base(), R0); + __ srdi(base, base, CardTable::card_shift()); + __ li(R0, CardTable::dirty_card_val()); + __ stbx(R0, tmp, base); +} + // base: Base register of the reference's address. // ind_or_offs: Index or offset of the reference's address. // val: To-be-stored value/reference's new value. @@ -608,6 +637,11 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet val, tmp1, tmp2, tmp3, preservation_level); + + // No need for post barrier if storing NULL + if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) { + store_check(masm, base, ind_or_offs, tmp1); + } } void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler *masm, @@ -757,6 +791,40 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b __ block_comment("} cmpxchg_oop (shenandoahgc)"); } +void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register preserve) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + CardTable* ct = bs->card_table(); + assert_different_registers(addr, count, R0); + + Label L_skip_loop, L_store_loop; + + __ sldi_(count, count, LogBytesPerHeapOop); + + // Zero length? Skip. + __ beq(CCR0, L_skip_loop); + + __ addi(count, count, -BytesPerHeapOop); + __ add(count, addr, count); + // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) + __ srdi(addr, addr, CardTable::card_shift()); + __ srdi(count, count, CardTable::card_shift()); + __ subf(count, addr, count); + __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0); + __ addi(count, count, 1); + __ li(R0, 0); + __ mtctr(count); + + // Byte store loop + __ bind(L_store_loop); + __ stb(R0, 0, addr); + __ addi(addr, addr, 1); + __ bdnz(L_store_loop); + __ bind(L_skip_loop); +} + #undef __ #ifdef COMPILER1 diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp index 4514f2540ac..4b59ef8f643 100644 --- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoahBarrierSetAssembler_ppc.hpp @@ -51,6 +51,10 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { Register tmp1, Register tmp2, MacroAssembler::PreservationLevel preservation_level); + void store_check(MacroAssembler* masm, + Register base, RegisterOrConstant ind_or_offs, + Register tmp); + void load_reference_barrier_impl(MacroAssembler* masm, DecoratorSet decorators, Register base, RegisterOrConstant ind_or_offs, Register dst, @@ -60,6 +64,10 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { /* ==== Helper methods for barrier implementations ==== */ void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp); + void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, + Register preserve); + public: virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } @@ -100,7 +108,11 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { /* ==== Access api ==== */ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register src, Register dst, Register count, Register preserve1, Register preserve2); + Register src, Register dst, Register count, + Register preserve1, Register preserve2); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, + Register preserve); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register base, RegisterOrConstant ind_or_offs, Register val, diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp index 26d60441c2d..48f52ad5409 100644 --- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +65,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS); __ beqz(t0, done); } else { - __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING); + __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING); __ beqz(t0, done); } @@ -642,7 +643,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss // Is marking still active? Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); __ lb(tmp, gc_state); - __ test_bit(tmp, tmp, ShenandoahHeap::MARKING_BITPOS); + __ andi(tmp, tmp, ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING); __ beqz(tmp, done); // Can we store original value in the thread's buffer? diff --git a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp index 9995a87f5cf..a0fdefd1717 100644 --- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,10 +88,21 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LI LIR_Opr result = gen->new_register(T_INT); __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result)); + + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } return result; } } - return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + + LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + + if (ShenandoahCardBarrier && access.is_oop()) { + post_barrier(access, access.resolved_addr(), new_value.result()); + } + + return result; } LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { @@ -120,6 +132,9 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result /* pre_val */); } + if (ShenandoahCardBarrier) { + post_barrier(access, access.resolved_addr(), result); + } } return result; diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp index 573edc26dad..2549102e6c8 100644 --- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +32,7 @@ #include "gc/shenandoah/shenandoahRuntime.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "interpreter/interpreter.hpp" #include "runtime/javaThread.hpp" #include "runtime/sharedRuntime.hpp" @@ -120,6 +122,29 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; if (is_reference_type(type)) { + if (ShenandoahCardBarrier) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; + bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); + + // We need to save the original element count because the array copy stub + // will destroy the value and we need it for the card marking barrier. +#ifdef _LP64 + if (!checkcast) { + if (!obj_int) { + // Save count for barrier + __ movptr(r11, count); + } else if (disjoint) { + // Save dst in r11 in the disjoint case + __ movq(r11, dst); + } + } +#else + if (disjoint) { + __ mov(rdx, dst); // save 'to' + } +#endif + } if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) { #ifdef _LP64 @@ -140,10 +165,10 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec #endif assert_different_registers(src, dst, count, thread); - Label done; + Label L_done; // Short-circuit if count == 0. __ testptr(count, count); - __ jcc(Assembler::zero, done); + __ jcc(Assembler::zero, L_done); // Avoid runtime call when not active. Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); @@ -154,7 +179,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING; } __ testb(gc_state, flags); - __ jcc(Assembler::zero, done); + __ jcc(Assembler::zero, L_done); save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); @@ -174,13 +199,43 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); - __ bind(done); + __ bind(L_done); NOT_LP64(__ pop(thread);) } } } +void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + + if (ShenandoahCardBarrier && is_reference_type(type)) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; + bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); + Register tmp = rax; + +#ifdef _LP64 + if (!checkcast) { + if (!obj_int) { + // Save count for barrier + count = r11; + } else if (disjoint) { + // Use the saved dst in the disjoint case + dst = r11; + } + } else { + tmp = rscratch1; + } +#else + if (disjoint) { + __ mov(dst, rdx); // restore 'to' + } +#endif + gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp); + } +} + void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, Register obj, Register pre_val, @@ -590,6 +645,49 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d } } +void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + // Does a store check for the oop in register obj. The content of + // register obj is destroyed afterwards. + + ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set(); + CardTable* ct = ctbs->card_table(); + + __ shrptr(obj, CardTable::card_shift()); + + Address card_addr; + + // The calculation for byte_map_base is as follows: + // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + // So this essentially converts an address to a displacement and it will + // never need to be relocated. On 64-bit however the value may be too + // large for a 32-bit displacement. + intptr_t byte_map_base = (intptr_t)ct->byte_map_base(); + if (__ is_simm32(byte_map_base)) { + card_addr = Address(noreg, obj, Address::times_1, byte_map_base); + } else { + // By doing it as an ExternalAddress 'byte_map_base' could be converted to a rip-relative + // displacement and done in a single instruction given favorable mapping and a + // smarter version of as_Address. However, 'ExternalAddress' generates a relocation + // entry and that entry is not properly handled by the relocation code. + AddressLiteral cardtable((address)byte_map_base, relocInfo::none); + Address index(noreg, obj, Address::times_1); + card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch1); + } + + int dirty = CardTable::dirty_card_val(); + if (UseCondCardMark) { + Label L_already_dirty; + __ cmpb(card_addr, dirty); + __ jccb(Assembler::equal, L_already_dirty); + __ movb(card_addr, dirty); + __ bind(L_already_dirty); + } else { + __ movb(card_addr, dirty); + } +} + void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { @@ -632,6 +730,9 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet } else { iu_barrier(masm, val, tmp3); BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); + if (ShenandoahCardBarrier) { + store_check(masm, tmp1); + } } NOT_LP64(imasm->restore_bcp()); } else { @@ -827,6 +928,63 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, } } +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) + +void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, + Register tmp) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + CardTable* ct = bs->card_table(); + intptr_t disp = (intptr_t) ct->byte_map_base(); + + Label L_loop, L_done; + const Register end = count; + assert_different_registers(addr, end); + + // Zero count? Nothing to do. + __ testl(count, count); + __ jccb(Assembler::zero, L_done); + +#ifdef _LP64 + __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size + __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive + __ shrptr(addr, CardTable::card_shift()); + __ shrptr(end, CardTable::card_shift()); + __ subptr(end, addr); // end --> cards count + + __ mov64(tmp, disp); + __ addptr(addr, tmp); + + __ BIND(L_loop); + __ movb(Address(addr, count, Address::times_1), 0); + __ decrement(count); + __ jccb(Assembler::greaterEqual, L_loop); +#else + __ lea(end, Address(addr, count, Address::times_ptr, -wordSize)); + __ shrptr(addr, CardTable::card_shift()); + __ shrptr(end, CardTable::card_shift()); + __ subptr(end, addr); // end --> count + + __ BIND(L_loop); + Address cardtable(addr, count, Address::times_1, disp); + __ movb(cardtable, 0); + __ decrement(count); + __ jccb(Assembler::greaterEqual, L_loop); +#endif + + __ BIND(L_done); +} + #undef __ #ifdef COMPILER1 diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp index 47dfe144928..bea2174aafe 100644 --- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +59,12 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { void iu_barrier_impl(MacroAssembler* masm, Register dst, Register tmp); + void store_check(MacroAssembler* masm, Register obj); + + void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, + Register tmp); + public: void iu_barrier(MacroAssembler* masm, Register dst, Register tmp); #ifdef COMPILER1 @@ -74,6 +81,8 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { bool exchange, Register tmp1, Register tmp2); virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, diff --git a/src/hotspot/share/gc/shared/ageTable.cpp b/src/hotspot/share/gc/shared/ageTable.cpp index 879bf28797f..374ad3cc5fd 100644 --- a/src/hotspot/share/gc/shared/ageTable.cpp +++ b/src/hotspot/share/gc/shared/ageTable.cpp @@ -71,6 +71,16 @@ void AgeTable::clear() { } } +#ifndef PRODUCT +bool AgeTable::is_clear() { + size_t total = 0; + for (size_t* p = sizes; p < sizes + table_size; ++p) { + total += *p; + } + return total == 0; +} +#endif // !PRODUCT + void AgeTable::merge(const AgeTable* subTable) { for (int i = 0; i < table_size; i++) { sizes[i]+= subTable->sizes[i]; diff --git a/src/hotspot/share/gc/shared/ageTable.hpp b/src/hotspot/share/gc/shared/ageTable.hpp index 9f0c10ec312..1fa50468cc6 100644 --- a/src/hotspot/share/gc/shared/ageTable.hpp +++ b/src/hotspot/share/gc/shared/ageTable.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_GC_SHARED_AGETABLE_HPP #define SHARE_GC_SHARED_AGETABLE_HPP +#include "memory/allocation.hpp" #include "oops/markWord.hpp" #include "oops/oop.hpp" #include "runtime/perfDataTypes.hpp" @@ -36,7 +37,7 @@ // // Note: all sizes are in oops -class AgeTable { +class AgeTable: public CHeapObj { friend class VMStructs; public: @@ -52,17 +53,18 @@ class AgeTable { // clear table void clear(); + // check whether it's clear + bool is_clear() PRODUCT_RETURN0; // add entry inline void add(oop p, size_t oop_size); void add(uint age, size_t oop_size) { - assert(age > 0 && age < table_size, "invalid age of object"); + assert(age < table_size, "invalid age of object"); sizes[age] += oop_size; } - // Merge another age table with the current one. Used - // for parallel young generation gc. + // Merge another age table with the current one. void merge(const AgeTable* subTable); // Calculate new tenuring threshold based on age information. diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index 4f2e0abbe95..6744e69884a 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -44,7 +44,7 @@ uint CardTable::_card_size = 0; uint CardTable::_card_size_in_words = 0; void CardTable::initialize_card_size() { - assert(UseG1GC || UseParallelGC || UseSerialGC, + assert(UseG1GC || UseParallelGC || UseSerialGC || UseShenandoahGC, "Initialize card size should only be called by card based collectors."); _card_size = GCCardSizeInBytes; diff --git a/src/hotspot/share/gc/shared/gcConfiguration.cpp b/src/hotspot/share/gc/shared/gcConfiguration.cpp index 2e8d3eb2a51..cd5c3b9f36b 100644 --- a/src/hotspot/share/gc/shared/gcConfiguration.cpp +++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp @@ -32,6 +32,7 @@ #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" #include "utilities/debug.hpp" +#include "utilities/macros.hpp" GCName GCConfiguration::young_collector() const { if (UseG1GC) { @@ -42,6 +43,15 @@ GCName GCConfiguration::young_collector() const { return ParallelScavenge; } + if (UseShenandoahGC) { +#if INCLUDE_SHENANDOAHGC + if (strcmp(ShenandoahGCMode, "generational") == 0) { + return Shenandoah; + } +#endif + return NA; + } + if (UseZGC) { if (ZGenerational) { return ZMinor; @@ -49,11 +59,6 @@ GCName GCConfiguration::young_collector() const { return NA; } } - - if (UseShenandoahGC) { - return NA; - } - return DefNew; } @@ -66,6 +71,15 @@ GCName GCConfiguration::old_collector() const { return ParallelOld; } + if (UseShenandoahGC) { +#if INCLUDE_SHENANDOAHGC + if (strcmp(ShenandoahGCMode, "generational") == 0) { + return Shenandoah; + } +#endif + return NA; + } + if (UseZGC) { if (ZGenerational) { return ZMajor; @@ -73,11 +87,6 @@ GCName GCConfiguration::old_collector() const { return Z; } } - - if (UseShenandoahGC) { - return Shenandoah; - } - return SerialOld; } diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp index 1131945dc8c..2217fd17417 100644 --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +26,7 @@ #include "precompiled.hpp" #include "c1/c1_IR.hpp" #include "gc/shared/satbMarkQueue.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" @@ -193,6 +195,16 @@ void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators()); } BarrierSetC1::store_at_resolved(access, value); + + if (ShenandoahCardBarrier && access.is_oop()) { + DecoratorSet decorators = access.decorators(); + bool is_array = (decorators & IS_ARRAY) != 0; + bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; + + bool precise = is_array || on_anonymous; + LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr(); + post_barrier(access, post_addr, value); + } } LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { @@ -291,3 +303,62 @@ void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) false, &lrb_phantom_code_gen_cl); } } + +void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + DecoratorSet decorators = access.decorators(); + LIRGenerator* gen = access.gen(); + bool in_heap = (decorators & IN_HEAP) != 0; + if (!in_heap) { + return; + } + + BarrierSet* bs = BarrierSet::barrier_set(); + ShenandoahBarrierSet* ctbs = barrier_set_cast(bs); + CardTable* ct = ctbs->card_table(); + LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base()); + if (addr->is_address()) { + LIR_Address* address = addr->as_address_ptr(); + // ptr cannot be an object because we use this barrier for array card marks + // and addr can point in the middle of an array. + LIR_Opr ptr = gen->new_pointer_register(); + if (!address->index()->is_valid() && address->disp() == 0) { + __ move(address->base(), ptr); + } else { + assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); + __ leal(addr, ptr); + } + addr = ptr; + } + assert(addr->is_register(), "must be a register at this point"); + + LIR_Opr tmp = gen->new_pointer_register(); + if (two_operand_lir_form) { + __ move(addr, tmp); + __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp); + } else { + __ unsigned_shift_right(addr, CardTable::card_shift(), tmp); + } + + LIR_Address* card_addr; + if (gen->can_inline_as_constant(card_table_base)) { + card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE); + } else { + card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE); + } + + LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val()); + if (UseCondCardMark) { + LIR_Opr cur_value = gen->new_register(T_INT); + __ move(card_addr, cur_value); + + LabelObj* L_already_dirty = new LabelObj(); + __ cmp(lir_cond_equal, cur_value, dirty); + __ branch(lir_cond_equal, L_already_dirty->label()); + __ move(dirty, card_addr); + __ branch_destination(L_already_dirty->label()); + } else { + __ move(dirty, card_addr); + } +} diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp index 71093300e82..8b82152973b 100644 --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp @@ -244,6 +244,8 @@ class ShenandoahBarrierSetC1 : public BarrierSetC1 { virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); + void post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val); + public: virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob); diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp index ac3afa774e1..0f001e42da5 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp @@ -1,5 +1,6 @@ /* - * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +34,7 @@ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" #include "gc/shenandoah/c2/shenandoahSupport.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "opto/arraycopynode.hpp" #include "opto/escape.hpp" #include "opto/graphKit.hpp" @@ -450,6 +452,90 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N kit->final_sync(ideal); } +Node* ShenandoahBarrierSetC2::byte_map_base_node(GraphKit* kit) const { + BarrierSet* bs = BarrierSet::barrier_set(); + ShenandoahBarrierSet* ctbs = barrier_set_cast(bs); + CardTable::CardValue* card_table_base = ctbs->card_table()->byte_map_base(); + if (card_table_base != nullptr) { + return kit->makecon(TypeRawPtr::make((address)card_table_base)); + } else { + return kit->null(); + } +} + +void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit, + Node* ctl, + Node* oop_store, + Node* obj, + Node* adr, + uint adr_idx, + Node* val, + BasicType bt, + bool use_precise) const { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + // No store check needed if we're storing a null. + if (val != nullptr && val->is_Con()) { + // must be either an oop or NULL + const Type* t = val->bottom_type(); + if (t == TypePtr::NULL_PTR || t == Type::TOP) + return; + } + + if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) { + // We can skip marks on a freshly-allocated object in Eden. + // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. + // That routine informs GC to take appropriate compensating steps, + // upon a slow-path allocation, so as to make this card-mark + // elision safe. + return; + } + + if (!use_precise) { + // All card marks for a (non-array) instance are in one place: + adr = obj; + } + // (Else it's an array (or unknown), and we want more precise card marks.) + assert(adr != nullptr, ""); + + IdealKit ideal(kit, true); + + // Convert the pointer to an int prior to doing math on it + Node* cast = __ CastPX(__ ctrl(), adr); + + // Divide by card size + Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) ); + + // Combine card table base and card offset + Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset ); + + // Get the alias_index for raw card-mark memory + int adr_type = Compile::AliasIdxRaw; + Node* zero = __ ConI(0); // Dirty card value + + if (UseCondCardMark) { + // The classic GC reference write barrier is typically implemented + // as a store into the global card mark table. Unfortunately + // unconditional stores can result in false sharing and excessive + // coherence traffic as well as false transactional aborts. + // UseCondCardMark enables MP "polite" conditional card mark + // stores. In theory we could relax the load from ctrl() to + // no_ctrl, but that doesn't buy much latitude. + Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type); + __ if_then(card_val, BoolTest::ne, zero); + } + + // Smash zero into card + __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered); + + if (UseCondCardMark) { + __ end_if(); + } + + // Final sync IdealKit and GraphKit. + kit->final_sync(ideal); +} + #undef __ const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() { @@ -513,6 +599,17 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val.set_node(value); shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(), static_cast(val.type()), nullptr /* pre_val */, access.type()); + + Node* result = BarrierSetC2::store_at_resolved(access, val); + + if (ShenandoahCardBarrier) { + const bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; + const bool is_array = (decorators & IS_ARRAY) != 0; + const bool use_precise = is_array || anonymous; + post_barrier(kit, kit->control(), access.raw_access(), access.base(), + adr, adr_idx, val.node(), access.type(), use_precise); + } + return result; } else { assert(access.is_opt_access(), "only for optimization passes"); assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code"); @@ -523,8 +620,8 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& Node* enqueue = gvn.transform(new ShenandoahIUBarrierNode(val.node())); val.set_node(enqueue); } + return BarrierSetC2::store_at_resolved(access, val); } - return BarrierSetC2::store_at_resolved(access, val); } Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { @@ -595,7 +692,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val } Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, - Node* new_val, const Type* value_type) const { + Node* new_val, const Type* value_type) const { GraphKit* kit = access.kit(); if (access.is_oop()) { new_val = shenandoah_iu_barrier(kit, new_val); @@ -637,6 +734,10 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess } #endif load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators())); + if (ShenandoahCardBarrier) { + post_barrier(kit, kit->control(), access.raw_access(), access.base(), + access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true); + } return load_store; } return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); @@ -692,6 +793,10 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAcces } access.set_raw_access(load_store); pin_atomic_op(access); + if (ShenandoahCardBarrier) { + post_barrier(kit, kit->control(), access.raw_access(), access.base(), + access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true); + } return load_store; } return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); @@ -708,6 +813,10 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces shenandoah_write_barrier_pre(kit, false /* do_load */, nullptr, nullptr, max_juint, nullptr, nullptr, result /* pre_val */, T_OBJECT); + if (ShenandoahCardBarrier) { + post_barrier(kit, kit->control(), access.raw_access(), access.base(), + access.addr().node(), access.alias_idx(), val, T_OBJECT, true); + } } return result; } @@ -906,9 +1015,25 @@ void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const } } -void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const { - if (is_shenandoah_wb_pre_call(n)) { - shenandoah_eliminate_wb_pre(n, ¯o->igvn()); +void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { + if (is_shenandoah_wb_pre_call(node)) { + shenandoah_eliminate_wb_pre(node, ¯o->igvn()); + } + if (ShenandoahCardBarrier && node->Opcode() == Op_CastP2X) { + Node* shift = node->unique_out(); + Node* addp = shift->unique_out(); + for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { + Node* mem = addp->last_out(j); + if (UseCondCardMark && mem->is_Load()) { + assert(mem->Opcode() == Op_LoadB, "unexpected code shape"); + // The load is checking if the card has been written so + // replace it with zero to fold the test. + macro->replace_node(mem, macro->intcon(0)); + continue; + } + assert(mem->is_Store(), "store required"); + macro->replace_node(mem, mem->in(MemNode::Memory)); + } } } diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp index 9b8e30c98a1..80649bf66fa 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp @@ -75,6 +75,18 @@ class ShenandoahBarrierSetC2 : public BarrierSetC2 { Node* shenandoah_iu_barrier(GraphKit* kit, Node* obj) const; + Node* byte_map_base_node(GraphKit* kit) const; + + void post_barrier(GraphKit* kit, + Node* ctl, + Node* store, + Node* obj, + Node* adr, + uint adr_idx, + Node* val, + BasicType bt, + bool use_precise) const; + void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar) const; diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp index da75706ac4c..83d463aee79 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved. * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index a8f71c8c5df..dac048587be 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,16 +22,21 @@ * questions. * */ - #include "precompiled.hpp" + +#include "gc/shared/gcCause.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" +#include "runtime/globals_extension.hpp" #include "utilities/quickSort.hpp" // These constants are used to adjust the margin of error for the moving @@ -54,11 +60,12 @@ const double ShenandoahAdaptiveHeuristics::HIGHEST_EXPECTED_AVAILABLE_AT_END = 0 const double ShenandoahAdaptiveHeuristics::MINIMUM_CONFIDENCE = 0.319; // 25% const double ShenandoahAdaptiveHeuristics::MAXIMUM_CONFIDENCE = 3.291; // 99.9% -ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() : - ShenandoahHeuristics(), +ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo* space_info) : + ShenandoahHeuristics(space_info), _margin_of_error_sd(ShenandoahAdaptiveInitialConfidence), _spike_threshold_sd(ShenandoahAdaptiveInitialSpikeThreshold), - _last_trigger(OTHER) { } + _last_trigger(OTHER), + _available(Moving_Average_Samples, ShenandoahAdaptiveDecayFactor) { } ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {} @@ -84,13 +91,13 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. - size_t capacity = ShenandoahHeap::heap()->soft_max_capacity(); + size_t capacity = _space_info->soft_max_capacity(); size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); - size_t free_target = (capacity / 100 * ShenandoahMinFreeThreshold) + max_cset; - size_t min_garbage = (free_target > actual_free ? (free_target - actual_free) : 0); + size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; + size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " - SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + SIZE_FORMAT "%s, Max Evacuation: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), @@ -125,22 +132,25 @@ void ShenandoahAdaptiveHeuristics::record_cycle_start() { _allocation_rate.allocation_counter_reset(); } -void ShenandoahAdaptiveHeuristics::record_success_concurrent() { - ShenandoahHeuristics::record_success_concurrent(); +void ShenandoahAdaptiveHeuristics::record_success_concurrent(bool abbreviated) { + ShenandoahHeuristics::record_success_concurrent(abbreviated); - size_t available = ShenandoahHeap::heap()->free_set()->available(); + size_t available = _space_info->available(); - _available.add(available); double z_score = 0.0; - if (_available.sd() > 0) { - z_score = (available - _available.avg()) / _available.sd(); + double available_sd = _available.sd(); + if (available_sd > 0) { + double available_avg = _available.avg(); + z_score = (double(available) - available_avg) / available_sd; + log_debug(gc, ergo)("%s Available: " SIZE_FORMAT " %sB, z-score=%.3f. Average available: %.1f %sB +/- %.1f %sB.", + _space_info->name(), + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + z_score, + byte_size_in_proper_unit(available_avg), proper_unit_for_byte_size(available_avg), + byte_size_in_proper_unit(available_sd), proper_unit_for_byte_size(available_sd)); } - log_debug(gc, ergo)("Available: " SIZE_FORMAT " %sB, z-score=%.3f. Average available: %.1f %sB +/- %.1f %sB.", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - z_score, - byte_size_in_proper_unit(_available.avg()), proper_unit_for_byte_size(_available.avg()), - byte_size_in_proper_unit(_available.sd()), proper_unit_for_byte_size(_available.sd())); + _available.add(double(available)); // In the case when a concurrent GC cycle completes successfully but with an // unusually small amount of available memory we will adjust our trigger @@ -196,42 +206,68 @@ static double saturate(double value, double min, double max) { } bool ShenandoahAdaptiveHeuristics::should_start_gc() { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - size_t max_capacity = heap->max_capacity(); - size_t capacity = heap->soft_max_capacity(); - size_t available = heap->free_set()->available(); - size_t allocated = heap->bytes_allocated_since_gc_start(); + size_t capacity = _space_info->soft_max_capacity(); + size_t available = _space_info->soft_available(); + size_t allocated = _space_info->bytes_allocated_since_gc_start(); - // Make sure the code below treats available without the soft tail. - size_t soft_tail = max_capacity - capacity; - available = (available > soft_tail) ? (available - soft_tail) : 0; + log_debug(gc)("should_start_gc (%s)? available: " SIZE_FORMAT ", soft_max_capacity: " SIZE_FORMAT + ", allocated: " SIZE_FORMAT, + _space_info->name(), available, capacity, allocated); // Track allocation rate even if we decide to start a cycle for other reasons. double rate = _allocation_rate.sample(allocated); _last_trigger = OTHER; - size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; + size_t min_threshold = min_free_threshold(); if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + log_info(gc)("Trigger (%s): Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", _space_info->name(), + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } + // Check if we need to learn a bit about the application const size_t max_learn = ShenandoahLearningSteps; if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", - _gc_times_learned + 1, max_learn, - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + log_info(gc)("Trigger (%s): Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + _space_info->name(), _gc_times_learned + 1, max_learn, + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); return true; } } - + // Rationale: + // The idea is that there is an average allocation rate and there are occasional abnormal bursts (or spikes) of + // allocations that exceed the average allocation rate. What do these spikes look like? + // + // 1. At certain phase changes, we may discard large amounts of data and replace it with large numbers of newly + // allocated objects. This "spike" looks more like a phase change. We were in steady state at M bytes/sec + // allocation rate and now we're in a "reinitialization phase" that looks like N bytes/sec. We need the "spike" + // accommodation to give us enough runway to recalibrate our "average allocation rate". + // + // 2. The typical workload changes. "Suddenly", our typical workload of N TPS increases to N+delta TPS. This means + // our average allocation rate needs to be adjusted. Once again, we need the "spike" accomodation to give us + // enough runway to recalibrate our "average allocation rate". + // + // 3. Though there is an "average" allocation rate, a given workload's demand for allocation may be very bursty. We + // allocate a bunch of LABs during the 5 ms that follow completion of a GC, then we perform no more allocations for + // the next 150 ms. It seems we want the "spike" to represent the maximum divergence from average within the + // period of time between consecutive evaluation of the should_start_gc() service. Here's the thinking: + // + // a) Between now and the next time I ask whether should_start_gc(), we might experience a spike representing + // the anticipated burst of allocations. If that would put us over budget, then we should start GC immediately. + // b) Between now and the anticipated depletion of allocation pool, there may be two or more bursts of allocations. + // If there are more than one of these bursts, we can "approximate" that these will be separated by spans of + // time with very little or no allocations so the "average" allocation rate should be a suitable approximation + // of how this will behave. + // + // For cases 1 and 2, we need to "quickly" recalibrate the average allocation rate whenever we detect a change + // in operation mode. We want some way to decide that the average rate has changed. Make average allocation rate + // computations an independent effort. // Check if allocation headroom is still okay. This also factors in: - // 1. Some space to absorb allocation spikes + // 1. Some space to absorb allocation spikes (ShenandoahAllocSpikeFactor) // 2. Accumulated penalties from Degenerated and Full GC size_t allocation_headroom = available; @@ -241,29 +277,31 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { allocation_headroom -= MIN2(allocation_headroom, spike_headroom); allocation_headroom -= MIN2(allocation_headroom, penalties); - double avg_cycle_time = _gc_time_history->davg() + (_margin_of_error_sd * _gc_time_history->dsd()); + double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd()); double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); + log_debug(gc)("%s: average GC time: %.2f ms, allocation rate: %.0f %s/s", + _space_info->name(), + avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate)); if (avg_cycle_time > allocation_headroom / avg_alloc_rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", - avg_cycle_time * 1000, + log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s)" + " to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", + _space_info->name(), avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), _margin_of_error_sd); - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); - _last_trigger = RATE; return true; } bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); if (is_spiking && avg_cycle_time > allocation_headroom / rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", - avg_cycle_time * 1000, + log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", + _space_info->name(), avg_cycle_time * 1000, byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), _spike_threshold_sd); @@ -300,6 +338,13 @@ void ShenandoahAdaptiveHeuristics::adjust_spike_threshold(double amount) { log_debug(gc, ergo)("Spike threshold now: %.2f", _spike_threshold_sd); } +size_t ShenandoahAdaptiveHeuristics::min_free_threshold() { + // Note that soft_max_capacity() / 100 * min_free_threshold is smaller than max_capacity() / 100 * min_free_threshold. + // We want to behave conservatively here, so use max_capacity(). By returning a larger value, we cause the GC to + // trigger when the remaining amount of free shrinks below the larger threshold. + return _space_info->max_capacity() / 100 * ShenandoahMinFreeThreshold; +} + ShenandoahAllocationRate::ShenandoahAllocationRate() : _last_sample_time(os::elapsedTime()), _last_sample_value(0), diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp index 17214391383..ed7c3775d1e 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +26,12 @@ #ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP #define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP +#include "runtime/globals_extension.hpp" +#include "memory/allocation.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "utilities/numberSeq.hpp" class ShenandoahAllocationRate : public CHeapObj { @@ -38,7 +43,6 @@ class ShenandoahAllocationRate : public CHeapObj { double upper_bound(double sds) const; bool is_spiking(double rate, double threshold) const; - private: double instantaneous_rate(double time, size_t allocated) const; @@ -50,9 +54,20 @@ class ShenandoahAllocationRate : public CHeapObj { TruncatedSeq _rate_avg; }; +/* + * The adaptive heuristic tracks the allocation behavior and average cycle + * time of the application. It attempts to start a cycle with enough time + * to complete before the available memory is exhausted. It errors on the + * side of starting cycles early to avoid allocation failures (degenerated + * cycles). + * + * This heuristic limits the number of regions for evacuation such that the + * evacuation reserve is respected. This helps it avoid allocation failures + * during evacuation. It preferentially selects regions with the most garbage. + */ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { public: - ShenandoahAdaptiveHeuristics(); + ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo* space_info); virtual ~ShenandoahAdaptiveHeuristics(); @@ -61,7 +76,7 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { size_t actual_free); void record_cycle_start(); - void record_success_concurrent(); + void record_success_concurrent(bool abbreviated); void record_success_degenerated(); void record_success_full(); @@ -98,11 +113,12 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { void adjust_margin_of_error(double amount); void adjust_spike_threshold(double amount); +protected: ShenandoahAllocationRate _allocation_rate; // The margin of error expressed in standard deviations to add to our // average cycle time and allocation rate. As this value increases we - // tend to over estimate the rate at which mutators will deplete the + // tend to overestimate the rate at which mutators will deplete the // heap. In other words, erring on the side of caution will trigger more // concurrent GCs. double _margin_of_error_sd; @@ -125,6 +141,8 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { // establishes what is 'normal' for the application and is used as a // source of feedback to adjust trigger parameters. TruncatedSeq _available; + + size_t min_free_threshold(); }; #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp index be758d14ed1..17175e147ae 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +32,8 @@ #include "logging/logTag.hpp" #include "runtime/os.hpp" -ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() { +ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info) : + ShenandoahHeuristics(space_info) { // Do not shortcut evacuation SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp index e90d2da7347..04e2c032c29 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,9 +28,13 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +/* + * This is a diagnostic heuristic that continuously runs collections + * cycles and adds every region with any garbage to the collection set. + */ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics { public: - ShenandoahAggressiveHeuristics(); + ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info); virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, RegionData* data, size_t size, diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp index 514a425f58b..9fa11208ac8 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +33,8 @@ #include "logging/log.hpp" #include "logging/logTag.hpp" -ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristics() { +ShenandoahCompactHeuristics::ShenandoahCompactHeuristics(ShenandoahSpaceInfo* space_info) : + ShenandoahHeuristics(space_info) { SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit); @@ -45,11 +47,9 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristic } bool ShenandoahCompactHeuristics::should_start_gc() { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - - size_t max_capacity = heap->max_capacity(); - size_t capacity = heap->soft_max_capacity(); - size_t available = heap->free_set()->available(); + size_t max_capacity = _space_info->max_capacity(); + size_t capacity = _space_info->soft_max_capacity(); + size_t available = _space_info->available(); // Make sure the code below treats available without the soft tail. size_t soft_tail = max_capacity - capacity; @@ -65,7 +65,7 @@ bool ShenandoahCompactHeuristics::should_start_gc() { return true; } - size_t bytes_allocated = heap->bytes_allocated_since_gc_start(); + size_t bytes_allocated = _space_info->bytes_allocated_since_gc_start(); if (bytes_allocated > threshold_bytes_allocated) { log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp index 0fd9641d4a2..21ec99eabc0 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp @@ -27,9 +27,13 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +/* + * This heuristic has simpler triggers than the adaptive heuristic. The + * size of the collection set is limited to 3/4 of available memory. + */ class ShenandoahCompactHeuristics : public ShenandoahHeuristics { public: - ShenandoahCompactHeuristics(); + ShenandoahCompactHeuristics(ShenandoahSpaceInfo* space_info); virtual bool should_start_gc(); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp new file mode 100644 index 00000000000..01b5374958d --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -0,0 +1,265 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" + +#include "logging/log.hpp" + +ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation) + : ShenandoahAdaptiveHeuristics(generation), _generation(generation) { +} + +void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { + assert(collection_set->is_empty(), "Must be empty"); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + + // Check all pinned regions have updated status before choosing the collection set. + heap->assert_pinned_region_status(); + + // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away. + + size_t num_regions = heap->num_regions(); + + RegionData* candidates = _region_data; + + size_t cand_idx = 0; + size_t preselected_candidates = 0; + + size_t total_garbage = 0; + + size_t immediate_garbage = 0; + size_t immediate_regions = 0; + + size_t free = 0; + size_t free_regions = 0; + + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + + // This counts number of humongous regions that we intend to promote in this cycle. + size_t humongous_regions_promoted = 0; + // This counts number of regular regions that will be promoted in place. + size_t regular_regions_promoted_in_place = 0; + // This counts bytes of memory used by regular regions to be promoted in place. + size_t regular_regions_promoted_usage = 0; + + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = heap->get_region(i); + if (!_generation->contains(region)) { + continue; + } + size_t garbage = region->garbage(); + total_garbage += garbage; + if (region->is_empty()) { + free_regions++; + free += region_size_bytes; + } else if (region->is_regular()) { + if (!region->has_live()) { + // We can recycle it right away and put it in the free set. + immediate_regions++; + immediate_garbage += garbage; + region->make_trash_immediate(); + } else { + bool is_candidate; + // This is our candidate for later consideration. + if (collection_set->is_preselected(i)) { + assert(region->age() >= tenuring_threshold, "Preselection filter"); + is_candidate = true; + preselected_candidates++; + // Set garbage value to maximum value to force this into the sorted collection set. + garbage = region_size_bytes; + } else if (region->is_young() && (region->age() >= tenuring_threshold)) { + // Note that for GLOBAL GC, region may be OLD, and OLD regions do not qualify for pre-selection + + // This region is old enough to be promoted but it was not preselected, either because its garbage is below + // ShenandoahOldGarbageThreshold so it will be promoted in place, or because there is not sufficient room + // in old gen to hold the evacuated copies of this region's live data. In both cases, we choose not to + // place this region into the collection set. + if (region->get_top_before_promote() != nullptr) { + // Region was included for promotion-in-place + regular_regions_promoted_in_place++; + regular_regions_promoted_usage += region->used_before_promote(); + } + is_candidate = false; + } else { + is_candidate = true; + } + if (is_candidate) { + candidates[cand_idx]._region = region; + candidates[cand_idx]._u._garbage = garbage; + cand_idx++; + } + } + } else if (region->is_humongous_start()) { + // Reclaim humongous regions here, and count them as the immediate garbage +#ifdef ASSERT + bool reg_live = region->has_live(); + bool bm_live = heap->complete_marking_context()->is_marked(cast_to_oop(region->bottom())); + assert(reg_live == bm_live, + "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT, + BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); +#endif + if (!region->has_live()) { + heap->trash_humongous_region_at(region); + + // Count only the start. Continuations would be counted on "trash" path + immediate_regions++; + immediate_garbage += garbage; + } else { + if (region->is_young() && region->age() >= tenuring_threshold) { + oop obj = cast_to_oop(region->bottom()); + size_t humongous_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize); + humongous_regions_promoted += humongous_regions; + } + } + } else if (region->is_trash()) { + // Count in just trashed collection set, during coalesced CM-with-UR + immediate_regions++; + immediate_garbage += garbage; + } + } + heap->reserve_promotable_humongous_regions(humongous_regions_promoted); + heap->reserve_promotable_regular_regions(regular_regions_promoted_in_place); + log_info(gc, ergo)("Planning to promote in place " SIZE_FORMAT " humongous regions and " SIZE_FORMAT + " regular regions, spanning a total of " SIZE_FORMAT " used bytes", + humongous_regions_promoted, regular_regions_promoted_in_place, + humongous_regions_promoted * ShenandoahHeapRegion::region_size_bytes() + + regular_regions_promoted_usage); + + // Step 2. Look back at garbage statistics, and decide if we want to collect anything, + // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. + + assert (immediate_garbage <= total_garbage, + "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), + byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); + + size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage); + + bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0); + if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { + // Only young collections need to prime the collection set. + if (_generation->is_young()) { + heap->old_heuristics()->prime_collection_set(collection_set); + } + + // Call the subclasses to add young-gen regions into the collection set. + choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); + } + + if (collection_set->has_old_regions()) { + heap->shenandoah_policy()->record_mixed_cycle(); + } + + size_t cset_percent = (total_garbage == 0) ? 0 : (collection_set->garbage() * 100 / total_garbage); + size_t collectable_garbage = collection_set->garbage() + immediate_garbage; + size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage); + + log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " + "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions, " + "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions", + + byte_size_in_proper_unit(collectable_garbage), + proper_unit_for_byte_size(collectable_garbage), + collectable_garbage_percent, + + byte_size_in_proper_unit(immediate_garbage), + proper_unit_for_byte_size(immediate_garbage), + immediate_percent, + immediate_regions, + + byte_size_in_proper_unit(collection_set->garbage()), + proper_unit_for_byte_size(collection_set->garbage()), + cset_percent, + collection_set->count()); + + if (collection_set->garbage() > 0) { + size_t young_evac_bytes = collection_set->get_young_bytes_reserved_for_evacuation(); + size_t promote_evac_bytes = collection_set->get_young_bytes_to_be_promoted(); + size_t old_evac_bytes = collection_set->get_old_bytes_reserved_for_evacuation(); + size_t total_evac_bytes = young_evac_bytes + promote_evac_bytes + old_evac_bytes; + log_info(gc, ergo)("Evacuation Targets: YOUNG: " SIZE_FORMAT "%s, " + "PROMOTE: " SIZE_FORMAT "%s, " + "OLD: " SIZE_FORMAT "%s, " + "TOTAL: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(young_evac_bytes), proper_unit_for_byte_size(young_evac_bytes), + byte_size_in_proper_unit(promote_evac_bytes), proper_unit_for_byte_size(promote_evac_bytes), + byte_size_in_proper_unit(old_evac_bytes), proper_unit_for_byte_size(old_evac_bytes), + byte_size_in_proper_unit(total_evac_bytes), proper_unit_for_byte_size(total_evac_bytes)); + } +} + + +size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset, + const RegionData* data, + size_t size) const { +#ifdef ASSERT + const uint tenuring_threshold = ShenandoahHeap::heap()->age_census()->tenuring_threshold(); +#endif + + // cur_young_garbage represents the amount of memory to be reclaimed from young-gen. In the case that live objects + // are known to be promoted out of young-gen, we count this as cur_young_garbage because this memory is reclaimed + // from young-gen and becomes available to serve future young-gen allocation requests. + size_t cur_young_garbage = 0; + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + if (cset->is_preselected(r->index())) { + assert(r->age() >= tenuring_threshold, "Preselected regions must have tenure age"); + // Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve. + // This region has been pre-selected and its impact on promotion reserve is already accounted for. + + // r->used() is r->garbage() + r->get_live_data_bytes() + // Since all live data in this region is being evacuated from young-gen, it is as if this memory + // is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to + // reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim + // within young-gen memory. + + cur_young_garbage += r->garbage(); + cset->add_region(r); + } + } + return cur_young_garbage; +} + +void ShenandoahGenerationalHeuristics::log_cset_composition(ShenandoahCollectionSet* cset) const { + size_t collected_old = cset->get_old_bytes_reserved_for_evacuation(); + size_t collected_promoted = cset->get_young_bytes_to_be_promoted(); + size_t collected_young = cset->get_young_bytes_reserved_for_evacuation(); + + log_info(gc, ergo)( + "Chosen CSet evacuates young: " SIZE_FORMAT "%s (of which at least: " SIZE_FORMAT "%s are to be promoted), " + "old: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(collected_young), proper_unit_for_byte_size(collected_young), + byte_size_in_proper_unit(collected_promoted), proper_unit_for_byte_size(collected_promoted), + byte_size_in_proper_unit(collected_old), proper_unit_for_byte_size(collected_old)); +} diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp new file mode 100644 index 00000000000..6708c63f042 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGENERATIONALHEURISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGENERATIONALHEURISTICS_HPP + + +#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" + +class ShenandoahGeneration; + +/* + * This class serves as the base class for heuristics used to trigger and + * choose the collection sets for young and global collections. It leans + * heavily on the existing functionality of ShenandoahAdaptiveHeuristics. + * + * It differs from the base class primarily in that choosing the collection + * set is responsible for mixed collections and in-place promotions of tenured + * regions. + */ +class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics { + +public: + explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation); + + void choose_collection_set(ShenandoahCollectionSet* collection_set) override; +protected: + ShenandoahGeneration* _generation; + + size_t add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset, + const RegionData* data, + size_t size) const; + + void log_cset_composition(ShenandoahCollectionSet* cset) const; +}; + + +#endif //SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGENERATIONALHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp new file mode 100644 index 00000000000..d8ae9bf84ae --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -0,0 +1,174 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahGlobalGeneration.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" + +#include "utilities/quickSort.hpp" + +ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation) + : ShenandoahGenerationalHeuristics(generation) { +} + + +void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { + // The logic for cset selection in adaptive is as follows: + // + // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME + // during evacuation, and thus guarantee full GC. In practice, we also want to let + // application to allocate something. This is why we limit CSet to some fraction of + // available space. In non-overloaded heap, max_cset would contain all plausible candidates + // over garbage threshold. + // + // 2. We should not get cset too low so that free threshold would not be met right + // after the cycle. Otherwise we get back-to-back cycles for no reason if heap is + // too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero. + // + // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates + // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before + // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, + // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. + + // In generational mode, the sort order within the data array is not strictly descending amounts of garbage. In + // particular, regions that have reached tenure age will be sorted into this array before younger regions that contain + // more garbage. This represents one of the reasons why we keep looking at regions even after we decide, for example, + // to exclude one of the regions because it might require evacuation of too much live data. + + + + // Better select garbage-first regions + QuickSort::sort(data, (int) size, compare_by_garbage, false); + + size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size); + + choose_global_collection_set(cset, data, size, actual_free, cur_young_garbage); + + log_cset_composition(cset); +} + + +void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollectionSet* cset, + const ShenandoahHeuristics::RegionData* data, + size_t size, size_t actual_free, + size_t cur_young_garbage) const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t capacity = heap->young_generation()->max_capacity(); + size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100; + size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100; + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + + size_t max_young_cset = (size_t) (heap->get_young_evac_reserve() / ShenandoahEvacWaste); + size_t young_cur_cset = 0; + size_t max_old_cset = (size_t) (heap->get_old_evac_reserve() / ShenandoahOldEvacWaste); + size_t old_cur_cset = 0; + + // Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young + // collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage + // than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back + // to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available + // to the mutator as it needs to be able to consume this memory during concurrent GC. + + size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions(); + size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + + if (unaffiliated_young_memory > max_young_cset) { + size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset; + unaffiliated_young_memory -= unaffiliated_mutator_memory; + unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down + unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + } + + // We'll affiliate these unaffiliated regions with either old or young, depending on need. + max_young_cset -= unaffiliated_young_memory; + + // Keep track of how many regions we plan to transfer from young to old. + size_t regions_transferred_to_old = 0; + + size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset; + size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; + + log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: " SIZE_FORMAT + "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset), + byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + if (cset->is_preselected(r->index())) { + fatal("There should be no preselected regions during GLOBAL GC"); + continue; + } + bool add_region = false; + if (r->is_old() || (r->age() >= tenuring_threshold)) { + size_t new_cset = old_cur_cset + r->get_live_data_bytes(); + if ((r->garbage() > garbage_threshold)) { + while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) { + unaffiliated_young_regions--; + regions_transferred_to_old++; + max_old_cset += region_size_bytes / ShenandoahOldEvacWaste; + } + } + if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) { + add_region = true; + old_cur_cset = new_cset; + } + } else { + assert(r->is_young() && (r->age() < tenuring_threshold), "DeMorgan's law (assuming r->is_affiliated)"); + size_t new_cset = young_cur_cset + r->get_live_data_bytes(); + size_t region_garbage = r->garbage(); + size_t new_garbage = cur_young_garbage + region_garbage; + bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); + + if (add_regardless || (r->garbage() > garbage_threshold)) { + while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) { + unaffiliated_young_regions--; + max_young_cset += region_size_bytes / ShenandoahEvacWaste; + } + } + if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) { + add_region = true; + young_cur_cset = new_cset; + cur_young_garbage = new_garbage; + } + } + if (add_region) { + cset->add_region(r); + } + } + + if (regions_transferred_to_old > 0) { + heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); + heap->set_young_evac_reserve(heap->get_young_evac_reserve() - regions_transferred_to_old * region_size_bytes); + heap->set_old_evac_reserve(heap->get_old_evac_reserve() + regions_transferred_to_old * region_size_bytes); + } +} diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp new file mode 100644 index 00000000000..1f95f75c521 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP + + +#include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp" + +class ShenandoahGlobalGeneration; + +/* + * This is a specialization of the generational heuristics which is aware + * of old and young regions and respects the configured evacuation parameters + * for such regions during a global collection of a generational heap. + */ +class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics { +public: + ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation); + + void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) override; + +private: + void choose_global_collection_set(ShenandoahCollectionSet* cset, + const ShenandoahHeuristics::RegionData* data, + size_t size, size_t actual_free, + size_t cur_young_garbage) const; +}; + + +#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHGLOBALHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeapStats.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeapStats.hpp new file mode 100644 index 00000000000..c59659ff5ad --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeapStats.hpp @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEAPCHARACTERISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEAPCHARACTERISTICS_HPP + +#include "utilities/globalDefinitions.hpp" + +class ShenandoahHeapStats { +public: + virtual const char* name() const = 0; + virtual size_t soft_max_capacity() const = 0; + virtual size_t max_capacity() const = 0; + virtual size_t used() const = 0; + virtual size_t available() const = 0; + virtual size_t soft_available() const = 0; + virtual size_t bytes_allocated_since_gc_start() const = 0; +}; + +#endif //SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEAPCHARACTERISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp index ad924f87b67..a776ab7f430 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,33 +25,33 @@ #include "precompiled.hpp" #include "gc/shared/gcCause.hpp" -#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" -#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" #include "runtime/globals_extension.hpp" +#include "utilities/quickSort.hpp" +// sort by decreasing garbage (so most garbage comes first) int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) { - if (a._garbage > b._garbage) + if (a._u._garbage > b._u._garbage) return -1; - else if (a._garbage < b._garbage) + else if (a._u._garbage < b._u._garbage) return 1; else return 0; } -ShenandoahHeuristics::ShenandoahHeuristics() : +ShenandoahHeuristics::ShenandoahHeuristics(ShenandoahSpaceInfo* space_info) : + _space_info(space_info), _region_data(nullptr), - _degenerated_cycles_in_a_row(0), - _successful_cycles_in_a_row(0), + _guaranteed_gc_interval(0), _cycle_start(os::elapsedTime()), _last_cycle_end(0), _gc_times_learned(0), _gc_time_penalties(0), - _gc_time_history(new TruncatedSeq(10, ShenandoahAdaptiveDecayFactor)), + _gc_cycle_time_history(new TruncatedSeq(Moving_Average_Samples, ShenandoahAdaptiveDecayFactor)), _metaspace_oom() { size_t num_regions = ShenandoahHeap::heap()->num_regions(); @@ -64,7 +65,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() { } void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { - assert(collection_set->count() == 0, "Must be empty"); + assert(collection_set->is_empty(), "Must be empty"); ShenandoahHeap* heap = ShenandoahHeap::heap(); @@ -107,7 +108,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec } else { // This is our candidate for later consideration. candidates[cand_idx]._region = region; - candidates[cand_idx]._garbage = garbage; + candidates[cand_idx]._u._garbage = garbage; cand_idx++; } } else if (region->is_humongous_start()) { @@ -148,13 +149,12 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec } size_t cset_percent = (total_garbage == 0) ? 0 : (collection_set->garbage() * 100 / total_garbage); - size_t collectable_garbage = collection_set->garbage() + immediate_garbage; size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage); log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " - "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " - "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%)", + "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions, " + "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions", byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), @@ -163,10 +163,12 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), immediate_percent, + immediate_regions, byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()), - cset_percent); + cset_percent, + collection_set->count()); } void ShenandoahHeuristics::record_cycle_start() { @@ -185,11 +187,11 @@ bool ShenandoahHeuristics::should_start_gc() { return true; } - if (ShenandoahGuaranteedGCInterval > 0) { + if (_guaranteed_gc_interval > 0) { double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; - if (last_time_ms > ShenandoahGuaranteedGCInterval) { - log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", - last_time_ms, ShenandoahGuaranteedGCInterval); + if (last_time_ms > _guaranteed_gc_interval) { + log_info(gc)("Trigger (%s): Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", + _space_info->name(), last_time_ms, _guaranteed_gc_interval); return true; } } @@ -198,12 +200,12 @@ bool ShenandoahHeuristics::should_start_gc() { } bool ShenandoahHeuristics::should_degenerate_cycle() { - return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold; + return ShenandoahHeap::heap()->shenandoah_policy()->consecutive_degenerated_gc_count() <= ShenandoahFullGCThreshold; } void ShenandoahHeuristics::adjust_penalty(intx step) { assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100, - "In range before adjustment: " INTX_FORMAT, _gc_time_penalties); + "In range before adjustment: " INTX_FORMAT, _gc_time_penalties); intx new_val = _gc_time_penalties + step; if (new_val < 0) { @@ -215,30 +217,24 @@ void ShenandoahHeuristics::adjust_penalty(intx step) { _gc_time_penalties = new_val; assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100, - "In range after adjustment: " INTX_FORMAT, _gc_time_penalties); + "In range after adjustment: " INTX_FORMAT, _gc_time_penalties); } -void ShenandoahHeuristics::record_success_concurrent() { - _degenerated_cycles_in_a_row = 0; - _successful_cycles_in_a_row++; - - _gc_time_history->add(time_since_last_gc()); +void ShenandoahHeuristics::record_success_concurrent(bool abbreviated) { _gc_times_learned++; adjust_penalty(Concurrent_Adjust); + + if (_gc_times_learned <= ShenandoahLearningSteps || !(abbreviated && ShenandoahAdaptiveIgnoreShortCycles)) { + _gc_cycle_time_history->add(elapsed_cycle_time()); + } } void ShenandoahHeuristics::record_success_degenerated() { - _degenerated_cycles_in_a_row++; - _successful_cycles_in_a_row = 0; - adjust_penalty(Degenerated_Penalty); } void ShenandoahHeuristics::record_success_full() { - _degenerated_cycles_in_a_row = 0; - _successful_cycles_in_a_row++; - adjust_penalty(Full_Penalty); } @@ -253,8 +249,7 @@ void ShenandoahHeuristics::record_requested_gc() { } bool ShenandoahHeuristics::can_unload_classes() { - if (!ClassUnloading) return false; - return true; + return ClassUnloading; } bool ShenandoahHeuristics::should_unload_classes() { @@ -267,6 +262,6 @@ void ShenandoahHeuristics::initialize() { // Nothing to do by default. } -double ShenandoahHeuristics::time_since_last_gc() const { +double ShenandoahHeuristics::elapsed_cycle_time() const { return os::elapsedTime() - _cycle_start; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp index 8efe321692e..c78ef018f4f 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +26,7 @@ #ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP #define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP -#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "memory/allocation.hpp" @@ -58,34 +59,62 @@ class ShenandoahCollectionSet; class ShenandoahHeapRegion; +/* + * Shenandoah heuristics are primarily responsible for deciding when to start + * a collection cycle and choosing which regions will be evacuated during the + * cycle. + */ class ShenandoahHeuristics : public CHeapObj { static const intx Concurrent_Adjust = -1; // recover from penalties static const intx Degenerated_Penalty = 10; // how much to penalize average GC duration history on Degenerated GC static const intx Full_Penalty = 20; // how much to penalize average GC duration history on Full GC protected: + static const uint Moving_Average_Samples = 10; // Number of samples to store in moving averages + typedef struct { ShenandoahHeapRegion* _region; - size_t _garbage; + union { + size_t _garbage; // Not used by old-gen heuristics. + size_t _live_data; // Only used for old-gen heuristics, which prioritizes retention of _live_data over garbage reclaim + } _u; } RegionData; + // Source of information about the memory space managed by this heuristic + ShenandoahSpaceInfo* _space_info; + + // Depending on generation mode, region data represents the results of the relevant + // most recently completed marking pass: + // - in GLOBAL mode, global marking pass + // - in OLD mode, old-gen marking pass + // - in YOUNG mode, young-gen marking pass + // + // Note that there is some redundancy represented in region data because + // each instance is an array large enough to hold all regions. However, + // any region in young-gen is not in old-gen. And any time we are + // making use of the GLOBAL data, there is no need to maintain the + // YOUNG or OLD data. Consider this redundancy of data structure to + // have negligible cost unless proven otherwise. RegionData* _region_data; - uint _degenerated_cycles_in_a_row; - uint _successful_cycles_in_a_row; + size_t _guaranteed_gc_interval; double _cycle_start; double _last_cycle_end; size_t _gc_times_learned; intx _gc_time_penalties; - TruncatedSeq* _gc_time_history; + TruncatedSeq* _gc_cycle_time_history; // There may be many threads that contend to set this flag ShenandoahSharedFlag _metaspace_oom; static int compare_by_garbage(RegionData a, RegionData b); + // TODO: We need to enhance this API to give visibility to accompanying old-gen evacuation effort. + // In the case that the old-gen evacuation effort is small or zero, the young-gen heuristics + // should feel free to dedicate increased efforts to young-gen evacuation. + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) = 0; @@ -93,13 +122,17 @@ class ShenandoahHeuristics : public CHeapObj { void adjust_penalty(intx step); public: - ShenandoahHeuristics(); + ShenandoahHeuristics(ShenandoahSpaceInfo* space_info); virtual ~ShenandoahHeuristics(); void record_metaspace_oom() { _metaspace_oom.set(); } void clear_metaspace_oom() { _metaspace_oom.unset(); } bool has_metaspace_oom() const { return _metaspace_oom.is_set(); } + void set_guaranteed_gc_interval(size_t guaranteed_gc_interval) { + _guaranteed_gc_interval = guaranteed_gc_interval; + } + virtual void record_cycle_start(); virtual void record_cycle_end(); @@ -108,7 +141,7 @@ class ShenandoahHeuristics : public CHeapObj { virtual bool should_degenerate_cycle(); - virtual void record_success_concurrent(); + virtual void record_success_concurrent(bool abbreviated); virtual void record_success_degenerated(); @@ -128,7 +161,7 @@ class ShenandoahHeuristics : public CHeapObj { virtual bool is_experimental() = 0; virtual void initialize(); - double time_since_last_gc() const; + double elapsed_cycle_time() const; }; #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp new file mode 100644 index 00000000000..f7cfdedb4f3 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -0,0 +1,661 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "utilities/quickSort.hpp" + +#define BYTES_FORMAT SIZE_FORMAT "%s" +#define FORMAT_BYTES(b) byte_size_in_proper_unit(b), proper_unit_for_byte_size(b) + +uint ShenandoahOldHeuristics::NOT_FOUND = -1U; + +// sort by increasing live (so least live comes first) +int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) { + if (a._u._live_data < b._u._live_data) + return -1; + else if (a._u._live_data > b._u._live_data) + return 1; + else return 0; +} + +// sort by increasing index +int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { + if (a._region->index() < b._region->index()) { + return -1; + } else if (a._region->index() > b._region->index()) { + return 1; + } else { + // quicksort may compare to self during search for pivot + return 0; + } +} + +ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation) : + ShenandoahHeuristics(generation), + _first_pinned_candidate(NOT_FOUND), + _last_old_collection_candidate(0), + _next_old_collection_candidate(0), + _last_old_region(0), + _live_bytes_in_unprocessed_candidates(0), + _old_generation(generation), + _cannot_expand_trigger(false), + _fragmentation_trigger(false), + _growth_trigger(false) { +} + +bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (unprocessed_old_collection_candidates() == 0) { + return false; + } + + _first_pinned_candidate = NOT_FOUND; + + uint included_old_regions = 0; + size_t evacuated_old_bytes = 0; + size_t collected_old_bytes = 0; + + // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer + // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount + // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount + // of live memory in that region and by the amount of unallocated memory in that region if the evacuation + // budget is constrained by availability of free memory. + size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahOldEvacWaste); + size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); + size_t fragmented_available; + size_t excess_fragmented_available; + + if (unfragmented_available > old_evacuation_budget) { + unfragmented_available = old_evacuation_budget; + fragmented_available = 0; + excess_fragmented_available = 0; + } else { + assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); + fragmented_available = _old_generation->available() - unfragmented_available; + assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); + if (fragmented_available + unfragmented_available > old_evacuation_budget) { + excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; + fragmented_available -= excess_fragmented_available; + } + } + + size_t remaining_old_evacuation_budget = old_evacuation_budget; + log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", + byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), + unprocessed_old_collection_candidates()); + + size_t lost_evacuation_capacity = 0; + + // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen + // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). + // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to + // evacuate region N, then there is no need to even consider evacuating region N+1. + while (unprocessed_old_collection_candidates() > 0) { + // Old collection candidates are sorted in order of decreasing garbage contained therein. + ShenandoahHeapRegion* r = next_old_collection_candidate(); + if (r == nullptr) { + break; + } + assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); + + // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need + // to decrease the capacity of the fragmented memory by the scaled loss. + + size_t live_data_for_evacuation = r->get_live_data_bytes(); + size_t lost_available = r->free(); + + if ((lost_available > 0) && (excess_fragmented_available > 0)) { + if (lost_available < excess_fragmented_available) { + excess_fragmented_available -= lost_available; + lost_evacuation_capacity -= lost_available; + lost_available = 0; + } else { + lost_available -= excess_fragmented_available; + lost_evacuation_capacity -= excess_fragmented_available; + excess_fragmented_available = 0; + } + } + size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); + if ((lost_available > 0) && (fragmented_available > 0)) { + if (scaled_loss + live_data_for_evacuation < fragmented_available) { + fragmented_available -= scaled_loss; + scaled_loss = 0; + } else { + // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother + // to decrement scaled_loss + } + } + if (scaled_loss > 0) { + // We were not able to account for the lost free memory within fragmented memory, so we need to take this + // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. + if (live_data_for_evacuation > unfragmented_available) { + // There is not room to evacuate this region or any that come after it in within the candidates array. + break; + } else { + unfragmented_available -= live_data_for_evacuation; + } + } else { + // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either + // fragmented or unfragmented available memory. Use up the fragmented memory budget first. + size_t evacuation_need = live_data_for_evacuation; + + if (evacuation_need > fragmented_available) { + evacuation_need -= fragmented_available; + fragmented_available = 0; + } else { + fragmented_available -= evacuation_need; + evacuation_need = 0; + } + if (evacuation_need > unfragmented_available) { + // There is not room to evacuate this region or any that come after it in within the candidates array. + break; + } else { + unfragmented_available -= evacuation_need; + // dead code: evacuation_need == 0; + } + } + collection_set->add_region(r); + included_old_regions++; + evacuated_old_bytes += live_data_for_evacuation; + collected_old_bytes += r->garbage(); + consume_old_collection_candidate(); + } + + if (_first_pinned_candidate != NOT_FOUND) { + // Need to deal with pinned regions + slide_pinned_regions_to_front(); + } + decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); + if (included_old_regions > 0) { + log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", + included_old_regions, + byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), + byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); + } + + if (unprocessed_old_collection_candidates() == 0) { + // We have added the last of our collection candidates to a mixed collection. + // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. + clear_triggers(); + if (has_coalesce_and_fill_candidates()) { + _old_generation->transition_to(ShenandoahOldGeneration::FILLING); + } else { + _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + } + } else if (included_old_regions == 0) { + // We have candidates, but none were included for evacuation - are they all pinned? + // or did we just not have enough room for any of them in this collection set? + // We don't want a region with a stuck pin to prevent subsequent old collections, so + // if they are all pinned we transition to a state that will allow us to make these uncollected + // (pinned) regions parsable. + if (all_candidates_are_pinned()) { + log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); + _old_generation->transition_to(ShenandoahOldGeneration::FILLING); + } else { + log_info(gc)("No regions selected for mixed collection. " + "Old evacuation budget: " BYTES_FORMAT ", Remaining evacuation budget: " BYTES_FORMAT + ", Lost capacity: " BYTES_FORMAT + ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, + FORMAT_BYTES(heap->get_old_evac_reserve()), + FORMAT_BYTES(remaining_old_evacuation_budget), + FORMAT_BYTES(lost_evacuation_capacity), + _next_old_collection_candidate, _last_old_collection_candidate); + } + } + + return (included_old_regions > 0); +} + +bool ShenandoahOldHeuristics::all_candidates_are_pinned() { +#ifdef ASSERT + if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { + return true; + } +#endif + + for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { + ShenandoahHeapRegion* region = _region_data[i]._region; + if (!region->is_pinned()) { + return false; + } + } + return true; +} + +void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { + // Find the first unpinned region to the left of the next region that + // will be added to the collection set. These regions will have been + // added to the cset, so we can use them to hold pointers to regions + // that were pinned when the cset was chosen. + // [ r p r p p p r r ] + // ^ ^ ^ + // | | | pointer to next region to add to a mixed collection is here. + // | | first r to the left should be in the collection set now. + // | first pinned region, we don't need to look past this + uint write_index = NOT_FOUND; + for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { + ShenandoahHeapRegion* region = _region_data[search]._region; + if (!region->is_pinned()) { + write_index = search; + assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); + break; + } + } + + // If we could not find an unpinned region, it means there are no slots available + // to move up the pinned regions. In this case, we just reset our next index in the + // hopes that some of these regions will become unpinned before the next mixed + // collection. We may want to bailout of here instead, as it should be quite + // rare to have so many pinned regions and may indicate something is wrong. + if (write_index == NOT_FOUND) { + assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); + _next_old_collection_candidate = _first_pinned_candidate; + return; + } + + // Find pinned regions to the left and move their pointer into a slot + // that was pointing at a region that has been added to the cset (or was pointing + // to a pinned region that we've already moved up). We are done when the leftmost + // pinned region has been slid up. + // [ r p r x p p p r ] + // ^ ^ + // | | next region for mixed collections + // | Write pointer is here. We know this region is already in the cset + // | so we can clobber it with the next pinned region we find. + for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { + RegionData& skipped = _region_data[search]; + if (skipped._region->is_pinned()) { + RegionData& available_slot = _region_data[write_index]; + available_slot._region = skipped._region; + available_slot._u._live_data = skipped._u._live_data; + --write_index; + } + } + + // Update to read from the leftmost pinned region. Plus one here because we decremented + // the write index to hold the next found pinned region. We are just moving it back now + // to point to the first pinned region. + _next_old_collection_candidate = write_index + 1; +} + +void ShenandoahOldHeuristics::prepare_for_old_collections() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + size_t cand_idx = 0; + size_t total_garbage = 0; + size_t num_regions = heap->num_regions(); + size_t immediate_garbage = 0; + size_t immediate_regions = 0; + size_t live_data = 0; + + RegionData* candidates = _region_data; + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = heap->get_region(i); + if (!_old_generation->contains(region)) { + continue; + } + + size_t garbage = region->garbage(); + size_t live_bytes = region->get_live_data_bytes(); + total_garbage += garbage; + live_data += live_bytes; + + // Only place regular regions into the candidate set + if (region->is_regular()) { + if (!region->has_live()) { + assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); + region->make_trash_immediate(); + immediate_regions++; + immediate_garbage += garbage; + } else { + region->begin_preemptible_coalesce_and_fill(); + candidates[cand_idx]._region = region; + candidates[cand_idx]._u._live_data = live_bytes; + cand_idx++; + } + } else if (region->is_humongous_start()) { + if (!region->has_live()) { + assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); + // The humongous object is dead, we can just return this region and the continuations + // immediately to the freeset - no evacuations are necessary here. The continuations + // will be made into trash by this method, so they'll be skipped by the 'is_regular' + // check above, but we still need to count the start region. + immediate_regions++; + immediate_garbage += garbage; + size_t region_count = heap->trash_humongous_region_at(region); + log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); + } + } else if (region->is_trash()) { + // Count humongous objects made into trash here. + immediate_regions++; + immediate_garbage += garbage; + } + } + + _old_generation->set_live_bytes_after_last_mark(live_data); + + // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage. + // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate + // threshold for the old generation. + + // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. + // Some regular regions may have been promoted in place with no garbage but also with very little live data. When we "compact" + // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions + // in old-gen. + + QuickSort::sort(candidates, cand_idx, compare_by_live, false); + + // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be + // added to the list of candidates for subsequent mixed evacuations. + // + // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics. + + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + // The convention is to collect regions that have more than this amount of garbage. + const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100; + + // Enlightened interpretation: collect regions that have less than this amount of live. + const size_t live_threshold = region_size_bytes - garbage_threshold; + + size_t candidates_garbage = 0; + _last_old_region = (uint)cand_idx; + _last_old_collection_candidate = (uint)cand_idx; + _next_old_collection_candidate = 0; + + size_t unfragmented = 0; + + for (size_t i = 0; i < cand_idx; i++) { + size_t live = candidates[i]._u._live_data; + if (live > live_threshold) { + // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold. + _last_old_collection_candidate = (uint)i; + break; + } + size_t region_garbage = candidates[i]._region->garbage(); + size_t region_free = candidates[i]._region->free(); + candidates_garbage += region_garbage; + unfragmented += region_free; + } + + size_t defrag_count = 0; + if (cand_idx > _last_old_collection_candidate) { + // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory + // that they contain is below a particular old-garbage threshold. Regions that were not selected for the collection + // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these + // at the current time. + // + // However, if any of these regions that were rejected from the collection set reside within areas of memory that + // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time. + // Humongous allocations target the bottom of the heap. We want old-gen regions to congregate at the top of the + // heap. + // + // Sort the regions that were initially rejected from the collection set in order of index. This allows us to + // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap). + QuickSort::sort(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate, + compare_by_index, false); + + size_t first_unselected_old_region = candidates[_last_old_collection_candidate]._region->index(); + size_t last_unselected_old_region = candidates[cand_idx - 1]._region->index(); + size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region; + size_t total_uncollected_old_regions = cand_idx - _last_old_collection_candidate; + + // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates. + const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8; + size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS; + + // The heuristic old_is_fragmented trigger may be seeking to achieve up to 7/8 density. Allow ourselves to overshoot + // that target (at 15/16) so we will not have to do another defragmenting old collection right away. + while ((defrag_count < bound_on_additional_regions) && + (total_uncollected_old_regions < 15 * span_of_uncollected_regions / 16)) { + ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate]._region; + assert (r->is_regular(), "Only regular regions are in the candidate set"); + size_t region_garbage = candidates[_last_old_collection_candidate]._region->garbage(); + size_t region_free = r->free(); + candidates_garbage += region_garbage; + unfragmented += region_free; + defrag_count++; + _last_old_collection_candidate++; + + // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region. + total_uncollected_old_regions--; + span_of_uncollected_regions = 1 + last_unselected_old_region - candidates[_last_old_collection_candidate]._region->index(); + } + } + + // Note that we do not coalesce and fill occupied humongous regions + // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions + size_t collectable_garbage = immediate_garbage + candidates_garbage; + size_t old_candidates = _last_old_collection_candidate; + size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); + set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); + + log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s " + "consolidated with free: " SIZE_FORMAT "%s, over " SIZE_FORMAT " regions (humongous defragmentation: " + SIZE_FORMAT " regions), Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.", + byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), + byte_size_in_proper_unit(unfragmented), proper_unit_for_byte_size(unfragmented), + old_candidates, defrag_count, + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), immediate_regions); + + if (unprocessed_old_collection_candidates() > 0) { + _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); + } else if (has_coalesce_and_fill_candidates()) { + _old_generation->transition_to(ShenandoahOldGeneration::FILLING); + } else { + _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + } +} + +size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const { + return _live_bytes_in_unprocessed_candidates; +} + +void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) { + _live_bytes_in_unprocessed_candidates = initial_live; +} + +void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) { + assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present"); + _live_bytes_in_unprocessed_candidates -= evacuated_live; +} + +// Used by unit test: test_shenandoahOldHeuristic.cpp +uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const { + return _last_old_collection_candidate; +} + +uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const { + return _last_old_collection_candidate - _next_old_collection_candidate; +} + +ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() { + while (_next_old_collection_candidate < _last_old_collection_candidate) { + ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region; + if (!next->is_pinned()) { + return next; + } else { + assert(next->is_pinned(), "sanity"); + if (_first_pinned_candidate == NOT_FOUND) { + _first_pinned_candidate = _next_old_collection_candidate; + } + } + + _next_old_collection_candidate++; + } + return nullptr; +} + +void ShenandoahOldHeuristics::consume_old_collection_candidate() { + _next_old_collection_candidate++; +} + +unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) { + uint end = _last_old_region; + uint index = _next_old_collection_candidate; + while (index < end) { + *buffer++ = _region_data[index++]._region; + } + return (_last_old_region - _next_old_collection_candidate); +} + +void ShenandoahOldHeuristics::abandon_collection_candidates() { + _last_old_collection_candidate = 0; + _next_old_collection_candidate = 0; + _last_old_region = 0; +} + +void ShenandoahOldHeuristics::record_cycle_end() { + this->ShenandoahHeuristics::record_cycle_end(); + clear_triggers(); +} + +void ShenandoahOldHeuristics::clear_triggers() { + // Clear any triggers that were set during mixed evacuations. Conditions may be different now that this phase has finished. + _cannot_expand_trigger = false; + _fragmentation_trigger = false; + _growth_trigger = false; + } + +bool ShenandoahOldHeuristics::should_start_gc() { + // Cannot start a new old-gen GC until previous one has finished. + // + // Future refinement: under certain circumstances, we might be more sophisticated about this choice. + // For example, we could choose to abandon the previous old collection before it has completed evacuations. + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { + return false; + } + + if (_cannot_expand_trigger) { + size_t old_gen_capacity = _old_generation->max_capacity(); + size_t heap_capacity = heap->capacity(); + double percent = percent_of(old_gen_capacity, heap_capacity); + log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", + byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); + return true; + } + + if (_fragmentation_trigger) { + size_t used = _old_generation->used(); + size_t used_regions_size = _old_generation->used_regions_size(); + + // used_regions includes humongous regions + size_t used_regions = _old_generation->used_regions(); + assert(used_regions_size > used_regions, "Cannot have more used than used regions"); + + size_t first_old_region, last_old_region; + double density; + get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region); + size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0; + size_t fragmented_free = used_regions_size - used; + + log_info(gc)("Trigger (OLD): Old has become fragmented: " + SIZE_FORMAT "%s available bytes spread between range spanned from " + SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", + byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), + first_old_region, last_old_region, span_of_old_regions, density * 100); + return true; + } + + if (_growth_trigger) { + // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been + // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. + size_t current_usage = _old_generation->used(); + size_t trigger_threshold = _old_generation->usage_trigger_threshold(); + size_t heap_size = heap->capacity(); + size_t consecutive_young_cycles; + size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; + if ((current_usage < ignore_threshold) && + ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) + < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { + log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" + SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", + byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), + byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), + consecutive_young_cycles); + _growth_trigger = false; + } else if (current_usage > trigger_threshold) { + size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); + double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); + log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: " + SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", + byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), + byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); + return true; + } else { + _growth_trigger = false; + } + } + + // Otherwise, defer to inherited heuristic for gc trigger. + return this->ShenandoahHeuristics::should_start_gc(); +} + +void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) { + // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. + clear_triggers(); + this->ShenandoahHeuristics::record_success_concurrent(abbreviated); +} + +void ShenandoahOldHeuristics::record_success_degenerated() { + // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. + clear_triggers(); + this->ShenandoahHeuristics::record_success_degenerated(); +} + +void ShenandoahOldHeuristics::record_success_full() { + // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. + clear_triggers(); + this->ShenandoahHeuristics::record_success_full(); +} + +const char* ShenandoahOldHeuristics::name() { + return "Old"; +} + +bool ShenandoahOldHeuristics::is_diagnostic() { + return false; +} + +bool ShenandoahOldHeuristics::is_experimental() { + return true; +} + +void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + ShenandoahHeuristics::RegionData* data, + size_t data_size, size_t free) { + ShouldNotReachHere(); +} + + +#undef BYTES_FORMAT +#undef FORMAT_BYTES diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp new file mode 100644 index 00000000000..158f959ac17 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -0,0 +1,201 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHOLDHEURISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHOLDHEURISTICS_HPP + + +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" + +class ShenandoahCollectionSet; +class ShenandoahHeapRegion; +class ShenandoahOldGeneration; + +/* + * This heuristic is responsible for choosing a set of candidates for inclusion + * in mixed collections. These candidates are chosen when marking of the old + * generation is complete. Note that this list of candidates may live through + * several mixed collections. + * + * This heuristic is also responsible for triggering old collections. It has its + * own collection of triggers to decide whether to start an old collection. It does + * _not_ use any of the functionality from the adaptive heuristics for triggers. + * It also does not use any of the functionality from the heuristics base classes + * to choose the collection set. For these reasons, it does not extend from + * ShenandoahGenerationalHeuristics. + */ +class ShenandoahOldHeuristics : public ShenandoahHeuristics { + +private: + + static uint NOT_FOUND; + + // After final marking of the old generation, this heuristic will select + // a set of candidate regions to be included in subsequent mixed collections. + // The regions are sorted into a `_region_data` array (declared in base + // class) in decreasing order of garbage. The heuristic will give priority + // to regions containing more garbage. + + // The following members are used to keep track of which candidate regions + // have yet to be added to a mixed collection. There is also some special + // handling for pinned regions, described further below. + + // Pinned regions may not be included in the collection set. Any old regions + // which were pinned at the time when old regions were added to the mixed + // collection will have been skipped. These regions are still contain garbage, + // so we want to include them at the start of the list of candidates for the + // _next_ mixed collection cycle. This variable is the index of the _first_ + // old region which is pinned when the mixed collection set is formed. + uint _first_pinned_candidate; + + // This is the index of the last region which is above the garbage threshold. + // No regions after this will be considered for inclusion in a mixed collection + // set. + uint _last_old_collection_candidate; + + // This index points to the first candidate in line to be added to the mixed + // collection set. It is updated as regions are added to the collection set. + uint _next_old_collection_candidate; + + // This is the last index in the array of old regions which were active at + // the end of old final mark. + uint _last_old_region; + + // How much live data must be evacuated from within the unprocessed mixed evacuation candidates? + size_t _live_bytes_in_unprocessed_candidates; + + // Keep a pointer to our generation that we can use without down casting a protected member from the base class. + ShenandoahOldGeneration* _old_generation; + + // Flags are set when promotion failure is detected (by gc thread), and cleared when + // old generation collection begins (by control thread). Flags are set and cleared at safepoints. + bool _cannot_expand_trigger; + bool _fragmentation_trigger; + bool _growth_trigger; + + // Motivation for a fragmentation_trigger + double _fragmentation_density; + size_t _fragmentation_first_old_region; + size_t _fragmentation_last_old_region; + + // Compare by live is used to prioritize compaction of old-gen regions. With old-gen compaction, the goal is + // to tightly pack long-lived objects into available regions. In most cases, there has not been an accumulation + // of garbage within old-gen regions. The more likely opportunity will be to combine multiple sparsely populated + // old-gen regions which may have been promoted in place into a smaller number of densely packed old-gen regions. + // This improves subsequent allocation efficiency and reduces the likelihood of allocation failure (including + // humongous allocation failure) due to fragmentation of the available old-gen allocation pool + static int compare_by_live(RegionData a, RegionData b); + + static int compare_by_index(RegionData a, RegionData b); + + protected: + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, + size_t free) override; + +public: + ShenandoahOldHeuristics(ShenandoahOldGeneration* generation); + + // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. + void prepare_for_old_collections(); + + // Return true iff the collection set is primed with at least one old-gen region. + bool prime_collection_set(ShenandoahCollectionSet* set); + + // How many old-collection candidates have not yet been processed? + uint unprocessed_old_collection_candidates() const; + + // How much live memory must be evacuated from within old-collection candidates that have not yet been processed? + size_t unprocessed_old_collection_candidates_live_memory() const; + + void set_unprocessed_old_collection_candidates_live_memory(size_t initial_live); + + void decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live); + + // How many old or hidden collection candidates have not yet been processed? + uint last_old_collection_candidate_index() const; + + // Return the next old-collection candidate in order of decreasing amounts of garbage. (We process most-garbage regions + // first.) This does not consume the candidate. If the candidate is selected for inclusion in a collection set, then + // the candidate is consumed by invoking consume_old_collection_candidate(). + ShenandoahHeapRegion* next_old_collection_candidate(); + + // Adjust internal state to reflect that one fewer old-collection candidate remains to be processed. + void consume_old_collection_candidate(); + + // Fill in buffer with all the old-collection regions that were identified at the end of the most recent old-gen + // mark to require their unmarked objects to be coalesced and filled. The buffer array must have at least + // last_old_region_index() entries, or memory may be corrupted when this function overwrites the + // end of the array. + unsigned int get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer); + + // True if there are old regions that need to be filled. + bool has_coalesce_and_fill_candidates() const { return coalesce_and_fill_candidates_count() > 0; } + + // Return the number of old regions that need to be filled. + size_t coalesce_and_fill_candidates_count() const { return _last_old_region - _next_old_collection_candidate; } + + // If a GLOBAL gc occurs, it will collect the entire heap which invalidates any collection candidates being + // held by this heuristic for supplying mixed collections. + void abandon_collection_candidates(); + + void trigger_cannot_expand() { _cannot_expand_trigger = true; }; + + inline void trigger_old_is_fragmented(double density, size_t first_old_index, size_t last_old_index) { + _fragmentation_trigger = true; + _fragmentation_density = density; + _fragmentation_first_old_region = first_old_index; + _fragmentation_last_old_region = last_old_index; + } + void trigger_old_has_grown() { _growth_trigger = true; } + + inline void get_fragmentation_trigger_reason_for_log_message(double &density, size_t &first_index, size_t &last_index) { + density = _fragmentation_density; + first_index = _fragmentation_first_old_region; + last_index = _fragmentation_last_old_region; + } + + void clear_triggers(); + + void record_cycle_end() override; + + bool should_start_gc() override; + + void record_success_concurrent(bool abbreviated) override; + + void record_success_degenerated() override; + + void record_success_full() override; + + const char* name() override; + + bool is_diagnostic() override; + + bool is_experimental() override; + + private: + void slide_pinned_regions_to_front(); + bool all_candidates_are_pinned(); +}; + +#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHOLDHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp index 2e7da1f1dd2..2e6b3d46ebe 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp @@ -31,6 +31,9 @@ #include "logging/log.hpp" #include "logging/logTag.hpp" +ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics(ShenandoahSpaceInfo* space_info) : + ShenandoahHeuristics(space_info) {} + bool ShenandoahPassiveHeuristics::should_start_gc() { // Never do concurrent GCs. return false; @@ -53,7 +56,7 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando // Do not select too large CSet that would overflow the available free space. // Take at least the entire evacuation reserve, and be free to overflow to free space. - size_t max_capacity = ShenandoahHeap::heap()->max_capacity(); + size_t max_capacity = _space_info->max_capacity(); size_t available = MAX2(max_capacity / 100 * ShenandoahEvacReserve, actual_free); size_t max_cset = (size_t)(available / ShenandoahEvacWaste); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp index 86ea5651b61..be4e91b1800 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp @@ -27,8 +27,19 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +/* + * The passive heuristic is for use only with the passive mode. In + * the passive mode, Shenandoah only performs STW (i.e., degenerated) + * collections. All the barriers are disabled and there are no concurrent + * activities. Therefore, this heuristic _never_ triggers a cycle. It + * will select regions for evacuation based on ShenandoahEvacReserve, + * ShenandoahEvacWaste and ShenandoahGarbageThreshold. Note that it does + * not attempt to evacuate regions with more garbage. + */ class ShenandoahPassiveHeuristics : public ShenandoahHeuristics { public: + ShenandoahPassiveHeuristics(ShenandoahSpaceInfo* space_info); + virtual bool should_start_gc(); virtual bool should_unload_classes(); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp new file mode 100644 index 00000000000..29b94e2f68f --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSPACEINFO_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSPACEINFO_HPP + +#include "utilities/globalDefinitions.hpp" + +/* + * The purpose of this interface is to decouple the heuristics from a + * direct dependency on the ShenandoahHeap singleton instance. This is + * done to facilitate future unit testing of the heuristics and to support + * future operational modes of Shenandoah in which the heap may be split + * into generations. + */ +class ShenandoahSpaceInfo { +public: + virtual const char* name() const = 0; + virtual size_t soft_max_capacity() const = 0; + virtual size_t max_capacity() const = 0; + virtual size_t soft_available() const = 0; + virtual size_t available() const = 0; + virtual size_t used() const = 0; + virtual size_t bytes_allocated_since_gc_start() const = 0; +}; + +#endif //SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSPACEINFO_HPP diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp index db8740d9ae1..7c7e88600d3 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +33,8 @@ #include "logging/log.hpp" #include "logging/logTag.hpp" -ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics() { +ShenandoahStaticHeuristics::ShenandoahStaticHeuristics(ShenandoahSpaceInfo* space_info) : + ShenandoahHeuristics(space_info) { SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); } @@ -40,11 +42,9 @@ ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics( ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {} bool ShenandoahStaticHeuristics::should_start_gc() { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - - size_t max_capacity = heap->max_capacity(); - size_t capacity = heap->soft_max_capacity(); - size_t available = heap->free_set()->available(); + size_t max_capacity = _space_info->max_capacity(); + size_t capacity = _space_info->soft_max_capacity(); + size_t available = _space_info->available(); // Make sure the code below treats available without the soft tail. size_t soft_tail = max_capacity - capacity; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp index 5ecd1848d85..24cb5547921 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp @@ -27,9 +27,14 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +/* + * The static heuristic will trigger cycles if the available memory falls + * below ShenandoahMinFreeThreshold percentage of total capacity. This + * heuristic will attempt to evacuation any region with any garbage. + */ class ShenandoahStaticHeuristics : public ShenandoahHeuristics { public: - ShenandoahStaticHeuristics(); + ShenandoahStaticHeuristics(ShenandoahSpaceInfo* space_info); virtual ~ShenandoahStaticHeuristics(); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp new file mode 100644 index 00000000000..c9d7d03adc1 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -0,0 +1,246 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" + +#include "utilities/quickSort.hpp" + +ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation) + : ShenandoahGenerationalHeuristics(generation) { +} + + +void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) { + // The logic for cset selection in adaptive is as follows: + // + // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME + // during evacuation, and thus guarantee full GC. In practice, we also want to let + // application to allocate something. This is why we limit CSet to some fraction of + // available space. In non-overloaded heap, max_cset would contain all plausible candidates + // over garbage threshold. + // + // 2. We should not get cset too low so that free threshold would not be met right + // after the cycle. Otherwise we get back-to-back cycles for no reason if heap is + // too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero. + // + // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates + // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before + // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, + // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. + + // In generational mode, the sort order within the data array is not strictly descending amounts of garbage. In + // particular, regions that have reached tenure age will be sorted into this array before younger regions that contain + // more garbage. This represents one of the reasons why we keep looking at regions even after we decide, for example, + // to exclude one of the regions because it might require evacuation of too much live data. + + // Better select garbage-first regions + QuickSort::sort(data, (int) size, compare_by_garbage, false); + + size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size); + + choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage); + + log_cset_composition(cset); +} + +void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset, + const RegionData* data, + size_t size, size_t actual_free, + size_t cur_young_garbage) const { + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + size_t capacity = heap->young_generation()->max_capacity(); + size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; + size_t ignore_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahIgnoreGarbageThreshold / 100; + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + + // This is young-gen collection or a mixed evacuation. + // If this is mixed evacuation, the old-gen candidate regions have already been added. + size_t max_cset = (size_t) (heap->get_young_evac_reserve() / ShenandoahEvacWaste); + size_t cur_cset = 0; + size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; + size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; + + + log_info(gc, ergo)( + "Adaptive CSet Selection for YOUNG. Max Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + + for (size_t idx = 0; idx < size; idx++) { + ShenandoahHeapRegion* r = data[idx]._region; + if (cset->is_preselected(r->index())) { + continue; + } + if (r->age() < tenuring_threshold) { + size_t new_cset = cur_cset + r->get_live_data_bytes(); + size_t region_garbage = r->garbage(); + size_t new_garbage = cur_young_garbage + region_garbage; + bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); + assert(r->is_young(), "Only young candidates expected in the data array"); + if ((new_cset <= max_cset) && (add_regardless || (region_garbage > garbage_threshold))) { + cur_cset = new_cset; + cur_young_garbage = new_garbage; + cset->add_region(r); + } + } + // Note that we do not add aged regions if they were not pre-selected. The reason they were not preselected + // is because there is not sufficient room in old-gen to hold their to-be-promoted live objects or because + // they are to be promoted in place. + } +} + + +bool ShenandoahYoungHeuristics::should_start_gc() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics(); + + // Checks that an old cycle has run for at least ShenandoahMinimumOldMarkTimeMs before allowing a young cycle. + if (ShenandoahMinimumOldMarkTimeMs > 0 && ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) { + size_t old_mark_elapsed = size_t(old_heuristics->elapsed_cycle_time() * 1000); + if (old_mark_elapsed < ShenandoahMinimumOldMarkTimeMs) { + return false; + } + } + + // inherited triggers have already decided to start a cycle, so no further evaluation is required + if (ShenandoahAdaptiveHeuristics::should_start_gc()) { + return true; + } + + // Get through promotions and mixed evacuations as quickly as possible. These cycles sometimes require significantly + // more time than traditional young-generation cycles so start them up as soon as possible. This is a "mitigation" + // for the reality that old-gen and young-gen activities are not truly "concurrent". If there is old-gen work to + // be done, we start up the young-gen GC threads so they can do some of this old-gen work. As implemented, promotion + // gets priority over old-gen marking. + size_t promo_expedite_threshold = percent_of(heap->young_generation()->max_capacity(), ShenandoahExpeditePromotionsThreshold); + size_t promo_potential = heap->get_promotion_potential(); + if (promo_potential > promo_expedite_threshold) { + // Detect unsigned arithmetic underflow + assert(promo_potential < heap->capacity(), "Sanity"); + log_info(gc)("Trigger (%s): expedite promotion of " SIZE_FORMAT "%s", + _space_info->name(), + byte_size_in_proper_unit(promo_potential), + proper_unit_for_byte_size(promo_potential)); + return true; + } + + size_t mixed_candidates = old_heuristics->unprocessed_old_collection_candidates(); + if (mixed_candidates > ShenandoahExpediteMixedThreshold && !heap->is_concurrent_weak_root_in_progress()) { + // We need to run young GC in order to open up some free heap regions so we can finish mixed evacuations. + // If concurrent weak root processing is in progress, it means the old cycle has chosen mixed collection + // candidates, but has not completed. There is no point in trying to start the young cycle before the old + // cycle completes. + log_info(gc)("Trigger (%s): expedite mixed evacuation of " SIZE_FORMAT " regions", + _space_info->name(), mixed_candidates); + return true; + } + + return false; +} + +// Return a conservative estimate of how much memory can be allocated before we need to start GC. The estimate is based +// on memory that is currently available within young generation plus all of the memory that will be added to the young +// generation at the end of the current cycle (as represented by young_regions_to_be_reclaimed) and on the anticipated +// amount of time required to perform a GC. +size_t ShenandoahYoungHeuristics::bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_reclaimed) { + size_t capacity = _space_info->soft_max_capacity(); + size_t usage = _space_info->used(); + size_t available = (capacity > usage)? capacity - usage: 0; + size_t allocated = _space_info->bytes_allocated_since_gc_start(); + + size_t available_young_collected = ShenandoahHeap::heap()->collection_set()->get_young_available_bytes_collected(); + size_t anticipated_available = + available + young_regions_to_be_reclaimed * ShenandoahHeapRegion::region_size_bytes() - available_young_collected; + size_t spike_headroom = capacity * ShenandoahAllocSpikeFactor / 100; + size_t penalties = capacity * _gc_time_penalties / 100; + + double rate = _allocation_rate.sample(allocated); + + // At what value of available, would avg and spike triggers occur? + // if allocation_headroom < avg_cycle_time * avg_alloc_rate, then we experience avg trigger + // if allocation_headroom < avg_cycle_time * rate, then we experience spike trigger if is_spiking + // + // allocation_headroom = + // 0, if penalties > available or if penalties + spike_headroom > available + // available - penalties - spike_headroom, otherwise + // + // so we trigger if available - penalties - spike_headroom < avg_cycle_time * avg_alloc_rate, which is to say + // available < avg_cycle_time * avg_alloc_rate + penalties + spike_headroom + // or if available < penalties + spike_headroom + // + // since avg_cycle_time * avg_alloc_rate > 0, the first test is sufficient to test both conditions + // + // thus, evac_slack_avg is MIN2(0, available - avg_cycle_time * avg_alloc_rate + penalties + spike_headroom) + // + // similarly, evac_slack_spiking is MIN2(0, available - avg_cycle_time * rate + penalties + spike_headroom) + // but evac_slack_spiking is only relevant if is_spiking, as defined below. + + double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd()); + + // TODO: Consider making conservative adjustments to avg_cycle_time, such as: (avg_cycle_time *= 2) in cases where + // we expect a longer-than-normal GC duration. This includes mixed evacuations, evacuation that perform promotion + // including promotion in place, and OLD GC bootstrap cycles. It has been observed that these cycles sometimes + // require twice or more the duration of "normal" GC cycles. We have experimented with this approach. While it + // does appear to reduce the frequency of degenerated cycles due to late triggers, it also has the effect of reducing + // evacuation slack so that there is less memory available to be transferred to OLD. The result is that we + // throttle promotion and it takes too long to move old objects out of the young generation. + + double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); + size_t evac_slack_avg; + if (anticipated_available > avg_cycle_time * avg_alloc_rate + penalties + spike_headroom) { + evac_slack_avg = anticipated_available - (avg_cycle_time * avg_alloc_rate + penalties + spike_headroom); + } else { + // we have no slack because it's already time to trigger + evac_slack_avg = 0; + } + + bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); + size_t evac_slack_spiking; + if (is_spiking) { + if (anticipated_available > avg_cycle_time * rate + penalties + spike_headroom) { + evac_slack_spiking = anticipated_available - (avg_cycle_time * rate + penalties + spike_headroom); + } else { + // we have no slack because it's already time to trigger + evac_slack_spiking = 0; + } + } else { + evac_slack_spiking = evac_slack_avg; + } + + size_t threshold = min_free_threshold(); + size_t evac_min_threshold = (anticipated_available > threshold)? anticipated_available - threshold: 0; + return MIN3(evac_slack_spiking, evac_slack_avg, evac_min_threshold); +} + diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp new file mode 100644 index 00000000000..b9d64059680 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHYOUNGHEURISTICS_HPP +#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHYOUNGHEURISTICS_HPP + +#include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp" + +class ShenandoahYoungGeneration; + +/* + * This is a specialization of the generational heuristic which chooses + * young regions for evacuation. This heuristic also has additional triggers + * designed to expedite mixed collections and promotions. + */ +class ShenandoahYoungHeuristics : public ShenandoahGenerationalHeuristics { +public: + explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation); + + + void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, + RegionData* data, size_t size, + size_t actual_free) override; + + bool should_start_gc() override; + + size_t bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_reclaimed); + +private: + void choose_young_collection_set(ShenandoahCollectionSet* cset, + const RegionData* data, + size_t size, size_t actual_free, + size_t cur_young_garbage) const; + +}; + +#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHYOUNGHEURISTICS_HPP diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.cpp new file mode 100644 index 00000000000..3d66c5bce4d --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.cpp @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" +#include "runtime/globals_extension.hpp" + +void ShenandoahGenerationalMode::initialize_flags() const { + +#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64) + vm_exit_during_initialization("Shenandoah Generational GC is not supported on this platform."); +#endif + + // Exit if the user has asked ShenandoahCardBarrier to be disabled + if (!FLAG_IS_DEFAULT(ShenandoahCardBarrier)) { + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCardBarrier); + } + + // Enable card-marking post-write barrier for tracking old to young pointers + FLAG_SET_DEFAULT(ShenandoahCardBarrier, true); + + if (ClassUnloading) { + FLAG_SET_DEFAULT(VerifyBeforeExit, false); + } + + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(GCTimeRatio, 70); + SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); + SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); + + // This helps most multi-core hardware hosts, enable by default + SHENANDOAH_ERGO_ENABLE_FLAG(UseCondCardMark); + + // Final configuration checks + SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); + SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahIUBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); + SHENANDOAH_CHECK_FLAG_SET(ShenandoahCardBarrier); +} diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.hpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.hpp new file mode 100644 index 00000000000..0946858169a --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.hpp @@ -0,0 +1,39 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHGENERATIONALMODE_HPP +#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHGENERATIONALMODE_HPP + +#include "gc/shenandoah/mode/shenandoahMode.hpp" + +class ShenandoahGenerationalMode : public ShenandoahMode { +public: + virtual void initialize_flags() const; + virtual const char* name() { return "Generational"; } + virtual bool is_diagnostic() { return false; } + virtual bool is_experimental() { return true; } + virtual bool is_generational() { return true; } +}; + +#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHGENERATIONALMODE_HPP diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp index d94ade25977..b31ded8e5e5 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp @@ -23,10 +23,7 @@ */ #include "precompiled.hpp" -#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "gc/shenandoah/mode/shenandoahIUMode.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" @@ -60,21 +57,5 @@ void ShenandoahIUMode::initialize_flags() const { SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahStackWatermarkBarrier); -} - -ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const { - if (ShenandoahGCHeuristics == nullptr) { - vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)"); - } - if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { - return new ShenandoahAggressiveHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { - return new ShenandoahStaticHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { - return new ShenandoahAdaptiveHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { - return new ShenandoahCompactHeuristics(); - } - vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); - return nullptr; + SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahCardBarrier); } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.hpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.hpp index c20c483c77d..455eb4543a8 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.hpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.hpp @@ -32,8 +32,6 @@ class ShenandoahHeuristics; class ShenandoahIUMode : public ShenandoahMode { public: virtual void initialize_flags() const; - virtual ShenandoahHeuristics* initialize_heuristics() const; - virtual const char* name() { return "Incremental-Update (IU)"; } virtual bool is_diagnostic() { return false; } virtual bool is_experimental() { return true; } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp new file mode 100644 index 00000000000..126062ab993 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" +#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" + +ShenandoahHeuristics* ShenandoahMode::initialize_heuristics(ShenandoahSpaceInfo* space_info) const { + if (ShenandoahGCHeuristics == nullptr) { + vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)"); + } + + if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { + return new ShenandoahAggressiveHeuristics(space_info); + } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { + return new ShenandoahStaticHeuristics(space_info); + } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { + return new ShenandoahAdaptiveHeuristics(space_info); + } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { + return new ShenandoahCompactHeuristics(space_info); + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); + } + + ShouldNotReachHere(); + return nullptr; +} + diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.hpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.hpp index 5af6fa826d5..f3e98d92b2e 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.hpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahMode.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +26,12 @@ #ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP #define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP +#include "gc/shared/gc_globals.hpp" #include "memory/allocation.hpp" +#include "runtime/java.hpp" +#include "utilities/formatBuffer.hpp" +class ShenandoahSpaceInfo; class ShenandoahHeuristics; #define SHENANDOAH_CHECK_FLAG_SET(name) \ @@ -48,10 +53,11 @@ class ShenandoahHeuristics; class ShenandoahMode : public CHeapObj { public: virtual void initialize_flags() const = 0; - virtual ShenandoahHeuristics* initialize_heuristics() const = 0; + virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahSpaceInfo* space_info) const; virtual const char* name() = 0; virtual bool is_diagnostic() = 0; virtual bool is_experimental() = 0; + virtual bool is_generational() { return false; } }; #endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp index c22c88217e9..a2f26d3fa2f 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp @@ -23,11 +23,13 @@ */ #include "precompiled.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" -#include "runtime/globals_extension.hpp" #include "runtime/java.hpp" void ShenandoahPassiveMode::initialize_flags() const { @@ -50,13 +52,15 @@ void ShenandoahPassiveMode::initialize_flags() const { SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStackWatermarkBarrier); + SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCardBarrier); // Final configuration checks // No barriers are required to run. } -ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics() const { + +ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics(ShenandoahSpaceInfo* space_info) const { if (ShenandoahGCHeuristics == nullptr) { vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)"); } - return new ShenandoahPassiveHeuristics(); + return new ShenandoahPassiveHeuristics(space_info); } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.hpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.hpp index c0e778174b3..032092a70ec 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.hpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.hpp @@ -30,8 +30,7 @@ class ShenandoahPassiveMode : public ShenandoahMode { public: virtual void initialize_flags() const; - virtual ShenandoahHeuristics* initialize_heuristics() const; - + virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahSpaceInfo* space_info) const; virtual const char* name() { return "Passive"; } virtual bool is_diagnostic() { return true; } virtual bool is_experimental() { return false; } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp index ff1ff5c2ed3..8f163ec45a8 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp @@ -23,10 +23,7 @@ */ #include "precompiled.hpp" -#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" -#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "gc/shenandoah/mode/shenandoahSATBMode.hpp" #include "logging/log.hpp" #include "logging/logTag.hpp" @@ -48,21 +45,6 @@ void ShenandoahSATBMode::initialize_flags() const { SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); SHENANDOAH_CHECK_FLAG_SET(ShenandoahStackWatermarkBarrier); -} - -ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const { - if (ShenandoahGCHeuristics == nullptr) { - vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)"); - } - if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { - return new ShenandoahAggressiveHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { - return new ShenandoahStaticHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { - return new ShenandoahAdaptiveHeuristics(); - } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { - return new ShenandoahCompactHeuristics(); - } - vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); - return nullptr; + assert(strcmp(ShenandoahGCMode, "generational") != 0, "Error"); + SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahCardBarrier); } diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.hpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.hpp index f246f9b20c7..3c2ae5bde93 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.hpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.hpp @@ -32,7 +32,6 @@ class ShenandoahHeuristics; class ShenandoahSATBMode : public ShenandoahMode { public: virtual void initialize_flags() const; - virtual ShenandoahHeuristics* initialize_heuristics() const; virtual const char* name() { return "Snapshot-At-The-Beginning (SATB)"; } virtual bool is_diagnostic() { return false; } virtual bool is_experimental() { return false; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAffiliation.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAffiliation.hpp new file mode 100644 index 00000000000..cacc62b78bf --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahAffiliation.hpp @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHAFFILIATION_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHAFFILIATION_HPP + +enum ShenandoahAffiliation { + FREE, + YOUNG_GENERATION, + OLD_GENERATION, +}; + +inline const char* shenandoah_affiliation_code(ShenandoahAffiliation type) { + switch(type) { + case FREE: + return "F"; + case YOUNG_GENERATION: + return "Y"; + case OLD_GENERATION: + return "O"; + default: + ShouldNotReachHere(); + return "?"; + } +} + +inline const char* shenandoah_affiliation_name(ShenandoahAffiliation type) { + switch (type) { + case FREE: + return "FREE"; + case YOUNG_GENERATION: + return "YOUNG"; + case OLD_GENERATION: + return "OLD"; + default: + ShouldNotReachHere(); + return "?"; + } +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHAFFILIATION_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp new file mode 100644 index 00000000000..5889882d933 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp @@ -0,0 +1,358 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp" +#include "gc/shenandoah/shenandoahAgeCensus.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" + +ShenandoahAgeCensus::ShenandoahAgeCensus() { + assert(ShenandoahHeap::heap()->mode()->is_generational(), "Only in generational mode"); + if (ShenandoahGenerationalMinTenuringAge > ShenandoahGenerationalMaxTenuringAge) { + vm_exit_during_initialization( + err_msg("ShenandoahGenerationalMinTenuringAge=" SIZE_FORMAT + " should be no more than ShenandoahGenerationalMaxTenuringAge=" SIZE_FORMAT, + ShenandoahGenerationalMinTenuringAge, ShenandoahGenerationalMaxTenuringAge)); + } + + _global_age_table = NEW_C_HEAP_ARRAY(AgeTable*, MAX_SNAPSHOTS, mtGC); + CENSUS_NOISE(_global_noise = NEW_C_HEAP_ARRAY(ShenandoahNoiseStats, MAX_SNAPSHOTS, mtGC);) + _tenuring_threshold = NEW_C_HEAP_ARRAY(uint, MAX_SNAPSHOTS, mtGC); + + for (int i = 0; i < MAX_SNAPSHOTS; i++) { + // Note that we don't now get perfdata from age_table + _global_age_table[i] = new AgeTable(false); + CENSUS_NOISE(_global_noise[i].clear();) + // Sentinel value + _tenuring_threshold[i] = MAX_COHORTS; + } + if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { + size_t max_workers = ShenandoahHeap::heap()->max_workers(); + _local_age_table = NEW_C_HEAP_ARRAY(AgeTable*, max_workers, mtGC); + CENSUS_NOISE(_local_noise = NEW_C_HEAP_ARRAY(ShenandoahNoiseStats, max_workers, mtGC);) + for (uint i = 0; i < max_workers; i++) { + _local_age_table[i] = new AgeTable(false); + CENSUS_NOISE(_local_noise[i].clear();) + } + } else { + _local_age_table = nullptr; + } + _epoch = MAX_SNAPSHOTS - 1; // see update_epoch() +} + +CENSUS_NOISE(void ShenandoahAgeCensus::add(uint obj_age, uint region_age, uint region_youth, size_t size, uint worker_id) {) +NO_CENSUS_NOISE(void ShenandoahAgeCensus::add(uint obj_age, uint region_age, size_t size, uint worker_id) {) + if (obj_age <= markWord::max_age) { + assert(obj_age < MAX_COHORTS && region_age < MAX_COHORTS, "Should have been tenured"); +#ifdef SHENANDOAH_CENSUS_NOISE + // Region ageing is stochastic and non-monotonic; this vitiates mortality + // demographics in ways that might defeat our algorithms. Marking may be a + // time when we might be able to correct this, but we currently do not do + // this. Like skipped statistics further below, we want to track the + // impact of this noise to see if this may be worthwhile. JDK-. + uint age = obj_age; + if (region_age > 0) { + add_aged(size, worker_id); // this tracking is coarse for now + age += region_age; + if (age >= MAX_COHORTS) { + age = (uint)(MAX_COHORTS - 1); // clamp + add_clamped(size, worker_id); + } + } + if (region_youth > 0) { // track object volume with retrograde age + add_young(size, worker_id); + } +#else // SHENANDOAH_CENSUS_NOISE + uint age = MIN2(obj_age + region_age, (uint)(MAX_COHORTS - 1)); // clamp +#endif // SHENANDOAH_CENSUS_NOISE + get_local_age_table(worker_id)->add(age, size); + } else { + // update skipped statistics + CENSUS_NOISE(add_skipped(size, worker_id);) + } +} + +#ifdef SHENANDOAH_CENSUS_NOISE +void ShenandoahAgeCensus::add_skipped(size_t size, uint worker_id) { + _local_noise[worker_id].skipped += size; +} + +void ShenandoahAgeCensus::add_aged(size_t size, uint worker_id) { + _local_noise[worker_id].aged += size; +} + +void ShenandoahAgeCensus::add_clamped(size_t size, uint worker_id) { + _local_noise[worker_id].clamped += size; +} + +void ShenandoahAgeCensus::add_young(size_t size, uint worker_id) { + _local_noise[worker_id].young += size; +} +#endif // SHENANDOAH_CENSUS_NOISE + +// Prepare for a new census update, by clearing appropriate global slots. +void ShenandoahAgeCensus::prepare_for_census_update() { + assert(_epoch < MAX_SNAPSHOTS, "Out of bounds"); + if (++_epoch >= MAX_SNAPSHOTS) { + _epoch=0; + } + _global_age_table[_epoch]->clear(); + CENSUS_NOISE(_global_noise[_epoch].clear();) +} + +// Update the census data from appropriate sources, +// and compute the new tenuring threshold. +void ShenandoahAgeCensus::update_census(size_t age0_pop, AgeTable* pv1, AgeTable* pv2) { + // Check that we won't overwrite existing data: caller is + // responsible for explicitly clearing the slot via calling + // prepare_for_census_update(). + assert(_global_age_table[_epoch]->is_clear(), "Dirty decks"); + CENSUS_NOISE(assert(_global_noise[_epoch].is_clear(), "Dirty decks");) + if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { + assert(pv1 == nullptr && pv2 == nullptr, "Error, check caller"); + // Seed cohort 0 with population that may have been missed during + // regular census. + _global_age_table[_epoch]->add((uint)0, age0_pop); + + size_t max_workers = ShenandoahHeap::heap()->max_workers(); + // Merge data from local age tables into the global age table for the epoch, + // clearing the local tables. + for (uint i = 0; i < max_workers; i++) { + // age stats + _global_age_table[_epoch]->merge(_local_age_table[i]); + _local_age_table[i]->clear(); // clear for next census + // Merge noise stats + CENSUS_NOISE(_global_noise[_epoch].merge(_local_noise[i]);) + CENSUS_NOISE(_local_noise[i].clear();) + } + } else { + // census during evac + assert(pv1 != nullptr && pv2 != nullptr, "Error, check caller"); + _global_age_table[_epoch]->merge(pv1); + _global_age_table[_epoch]->merge(pv2); + } + + update_tenuring_threshold(); +} + + +// Reset the epoch for the global age tables, +// clearing all history. +void ShenandoahAgeCensus::reset_global() { + assert(_epoch < MAX_SNAPSHOTS, "Out of bounds"); + for (uint i = 0; i < MAX_SNAPSHOTS; i++) { + _global_age_table[i]->clear(); + CENSUS_NOISE(_global_noise[i].clear();) + } + _epoch = MAX_SNAPSHOTS; + assert(_epoch < MAX_SNAPSHOTS, "Error"); +} + +// Reset the local age tables, clearing any partial census. +void ShenandoahAgeCensus::reset_local() { + if (!ShenandoahGenerationalAdaptiveTenuring || ShenandoahGenerationalCensusAtEvac) { + assert(_local_age_table == nullptr, "Error"); + return; + } + size_t max_workers = ShenandoahHeap::heap()->max_workers(); + for (uint i = 0; i < max_workers; i++) { + _local_age_table[i]->clear(); + CENSUS_NOISE(_local_noise[i].clear();) + } +} + +// Is global census information clear? +bool ShenandoahAgeCensus::is_clear_global() { + assert(_epoch < MAX_SNAPSHOTS, "Out of bounds"); + for (uint i = 0; i < MAX_SNAPSHOTS; i++) { + bool clear = _global_age_table[i]->is_clear(); + CENSUS_NOISE(clear |= _global_noise[i].is_clear();) + if (!clear) { + return false; + } + } + return true; +} + +// Is local census information clear? +bool ShenandoahAgeCensus::is_clear_local() { + if (!ShenandoahGenerationalAdaptiveTenuring || ShenandoahGenerationalCensusAtEvac) { + assert(_local_age_table == nullptr, "Error"); + return true; + } + size_t max_workers = ShenandoahHeap::heap()->max_workers(); + for (uint i = 0; i < max_workers; i++) { + bool clear = _local_age_table[i]->is_clear(); + CENSUS_NOISE(clear |= _local_noise[i].is_clear();) + if (!clear) { + return false; + } + } + return true; +} + +void ShenandoahAgeCensus::update_tenuring_threshold() { + if (!ShenandoahGenerationalAdaptiveTenuring) { + _tenuring_threshold[_epoch] = InitialTenuringThreshold; + } else { + uint tt = compute_tenuring_threshold(); + assert(tt <= MAX_COHORTS, "Out of bounds"); + _tenuring_threshold[_epoch] = tt; + } + print(); + log_trace(gc, age)("New tenuring threshold " UINTX_FORMAT " (min " UINTX_FORMAT ", max " UINTX_FORMAT")", + (uintx) _tenuring_threshold[_epoch], ShenandoahGenerationalMinTenuringAge, ShenandoahGenerationalMaxTenuringAge); +} + +// Currently Shenandoah{Min,Max}TenuringAge have a floor of 1 because we +// aren't set up to promote age 0 objects. +uint ShenandoahAgeCensus::compute_tenuring_threshold() { + // Dispose of the extremal cases early so the loop below + // is less fragile. + if (ShenandoahGenerationalMaxTenuringAge == ShenandoahGenerationalMinTenuringAge) { + return ShenandoahGenerationalMaxTenuringAge; // Any value in [1,16] + } + assert(ShenandoahGenerationalMinTenuringAge < ShenandoahGenerationalMaxTenuringAge, "Error"); + + // Starting with the oldest cohort with a non-trivial population + // (as specified by ShenandoahGenerationalTenuringCohortPopulationThreshold) in the + // previous epoch, and working down the cohorts by age, find the + // oldest age that has a significant mortality rate (as specified by + // ShenandoahGenerationalTenuringMortalityRateThreshold). We use this as + // tenuring age to be used for the evacuation cycle to follow. + // Results are clamped between user-specified min & max guardrails, + // so we ignore any cohorts outside ShenandoahGenerational[Min,Max]Age. + + // Current and previous epoch in ring + const uint cur_epoch = _epoch; + const uint prev_epoch = cur_epoch > 0 ? cur_epoch - 1 : markWord::max_age; + + // Current and previous population vectors in ring + const AgeTable* cur_pv = _global_age_table[cur_epoch]; + const AgeTable* prev_pv = _global_age_table[prev_epoch]; + uint upper_bound = ShenandoahGenerationalMaxTenuringAge; + const uint prev_tt = previous_tenuring_threshold(); + if (ShenandoahGenerationalCensusIgnoreOlderCohorts && prev_tt > 0) { + // We stay below the computed tenuring threshold for the last cycle plus 1, + // ignoring the mortality rates of any older cohorts. + upper_bound = MIN2(upper_bound, prev_tt + 1); + } + upper_bound = MIN2(upper_bound, markWord::max_age); + + const uint lower_bound = MAX2((uint)ShenandoahGenerationalMinTenuringAge, (uint)1); + + uint tenuring_threshold = upper_bound; + for (uint i = upper_bound; i >= lower_bound; i--) { + assert(i > 0, "Index (i-1) would underflow/wrap"); + assert(i <= markWord::max_age, "Index i would overflow"); + // Cohort of current age i + const size_t cur_pop = cur_pv->sizes[i]; + const size_t prev_pop = prev_pv->sizes[i-1]; + const double mr = mortality_rate(prev_pop, cur_pop); + if (prev_pop > ShenandoahGenerationalTenuringCohortPopulationThreshold && + mr > ShenandoahGenerationalTenuringMortalityRateThreshold) { + // This is the oldest cohort that has high mortality. + // We ignore any cohorts that had a very low population count, or + // that have a lower mortality rate than we care to age in young; these + // cohorts are considered eligible for tenuring when all older + // cohorts are. We return the next higher age as the tenuring threshold + // so that we do not prematurely promote objects of this age. + assert(tenuring_threshold == i+1 || tenuring_threshold == upper_bound, "Error"); + assert(tenuring_threshold >= lower_bound && tenuring_threshold <= upper_bound, "Error"); + return tenuring_threshold; + } + // Remember that we passed over this cohort, looking for younger cohorts + // showing high mortality. We want to tenure cohorts of this age. + tenuring_threshold = i; + } + assert(tenuring_threshold >= lower_bound && tenuring_threshold <= upper_bound, "Error"); + return tenuring_threshold; +} + +// Mortality rate of a cohort, given its previous and current population +double ShenandoahAgeCensus::mortality_rate(size_t prev_pop, size_t cur_pop) { + // The following also covers the case where both entries are 0 + if (prev_pop <= cur_pop) { + // adjust for inaccurate censuses by finessing the + // reappearance of dark matter as normal matter; + // mortality rate is 0 if population remained the same + // or increased. + if (cur_pop > prev_pop) { + log_trace(gc, age) + (" (dark matter) Cohort population " SIZE_FORMAT_W(10) " to " SIZE_FORMAT_W(10), + prev_pop*oopSize, cur_pop*oopSize); + } + return 0.0; + } + assert(prev_pop > 0 && prev_pop > cur_pop, "Error"); + return 1.0 - (((double)cur_pop)/((double)prev_pop)); +} + +void ShenandoahAgeCensus::print() { + // Print the population vector for the current epoch, and + // for the previous epoch, as well as the computed mortality + // ratio for each extant cohort. + const uint cur_epoch = _epoch; + const uint prev_epoch = cur_epoch > 0 ? cur_epoch - 1: markWord::max_age; + + const AgeTable* cur_pv = _global_age_table[cur_epoch]; + const AgeTable* prev_pv = _global_age_table[prev_epoch]; + + const uint tt = tenuring_threshold(); + + size_t total= 0; + for (uint i = 1; i < MAX_COHORTS; i++) { + const size_t prev_pop = prev_pv->sizes[i-1]; // (i-1) OK because i >= 1 + const size_t cur_pop = cur_pv->sizes[i]; + double mr = mortality_rate(prev_pop, cur_pop); + // Suppress printing when everything is zero + if (prev_pop + cur_pop > 0) { + log_info(gc, age) + (" - age %3u: prev " SIZE_FORMAT_W(10) " bytes, curr " SIZE_FORMAT_W(10) " bytes, mortality %.2f ", + i, prev_pop*oopSize, cur_pop*oopSize, mr); + } + total += cur_pop; + if (i == tt) { + // Underline the cohort for tenuring threshold (if < MAX_COHORTS) + log_info(gc, age)("----------------------------------------------------------------------------"); + } + } + CENSUS_NOISE(_global_noise[cur_epoch].print(total);) +} + +#ifdef SHENANDOAH_CENSUS_NOISE +void ShenandoahNoiseStats::print(size_t total) { + if (total > 0) { + float f_skipped = (float)skipped/(float)total; + float f_aged = (float)aged/(float)total; + float f_clamped = (float)clamped/(float)total; + float f_young = (float)young/(float)total; + log_info(gc, age)("Skipped: " SIZE_FORMAT_W(10) " (%.2f), R-Aged: " SIZE_FORMAT_W(10) " (%.2f), " + "Clamped: " SIZE_FORMAT_W(10) " (%.2f), R-Young: " SIZE_FORMAT_W(10) " (%.2f)", + skipped*oopSize, f_skipped, aged*oopSize, f_aged, + clamped*oopSize, f_clamped, young*oopSize, f_young); + } +} +#endif // SHENANDOAH_CENSUS_NOISE diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp new file mode 100644 index 00000000000..419a79d8344 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.hpp @@ -0,0 +1,189 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHAGECENSUS_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHAGECENSUS_HPP + +#include "gc/shared/ageTable.hpp" + +#ifndef PRODUCT +// Enable noise instrumentation +#define SHENANDOAH_CENSUS_NOISE 1 +#endif // PRODUCT + +#ifdef SHENANDOAH_CENSUS_NOISE + +#define CENSUS_NOISE(x) x +#define NO_CENSUS_NOISE(x) + +struct ShenandoahNoiseStats { + size_t skipped; // Volume of objects skipped + size_t aged; // Volume of objects from aged regions + size_t clamped; // Volume of objects whose ages were clamped + size_t young; // Volume of (rejuvenated) objects of retrograde age + + ShenandoahNoiseStats() { + clear(); + } + + void clear() { + skipped = 0; + aged = 0; + clamped = 0; + young = 0; + } + +#ifndef PRODUCT + bool is_clear() { + return (skipped + aged + clamped + young) == 0; + } +#endif // !PRODUCT + + void merge(ShenandoahNoiseStats& other) { + skipped += other.skipped; + aged += other.aged; + clamped += other.clamped; + young += other.young; + } + + void print(size_t total); +}; +#else // SHENANDOAH_CENSUS_NOISE +#define CENSUS_NOISE(x) +#define NO_CENSUS_NOISE(x) x +#endif // SHENANDOAH_CENSUS_NOISE + +// A class for tracking a sequence of cohort population vectors (or, +// interchangeably, age tables) for up to C=MAX_COHORTS age cohorts, where a cohort +// represents the set of objects allocated during a specific inter-GC epoch. +// Epochs are demarcated by GC cycles, with those surviving a cycle aging by +// an epoch. The census tracks the historical variation of cohort demographics +// across N=MAX_SNAPSHOTS recent epochs. Since there are at most C age cohorts in +// the population, we need only track at most N=C epochal snapshots to track a +// maximal longitudinal demographics of every object's longitudinal cohort in +// the young generation. The _global_age_table is thus, currently, a C x N (row-major) +// matrix, with C=16, and, for now N=C=16, currently. +// In theory, we might decide to track even longer (N=MAX_SNAPSHOTS) demographic +// histories, but that isn't the case today. In particular, the current tenuring +// threshold algorithm uses only 2 most recent snapshots, with the remaining +// MAX_SNAPSHOTS-2=14 reserved for research purposes. +// +// In addition, this class also maintains per worker population vectors into which +// census for the current minor GC is accumulated (during marking or, optionally, during +// evacuation). These are cleared after each marking (resectively, evacuation) cycle, +// once the per-worker data is consolidated into the appropriate population vector +// per minor collection. The _local_age_table is thus C x N, for N GC workers. +class ShenandoahAgeCensus: public CHeapObj { + AgeTable** _global_age_table; // Global age table used for adapting tenuring threshold, one per snapshot + AgeTable** _local_age_table; // Local scratch age tables to track object ages, one per worker + +#ifdef SHENANDOAH_CENSUS_NOISE + ShenandoahNoiseStats* _global_noise; // Noise stats, one per snapshot + ShenandoahNoiseStats* _local_noise; // Local scratch table for noise stats, one per worker +#endif // SHENANDOAH_CENSUS_NOISE + + uint _epoch; // Current epoch (modulo max age) + uint *_tenuring_threshold; // An array of the last N tenuring threshold values we + // computed. + + // Mortality rate of a cohort, given its population in + // previous and current epochs + double mortality_rate(size_t prev_pop, size_t cur_pop); + + // Update the tenuring threshold, calling + // compute_tenuring_threshold to calculate the new + // value + void update_tenuring_threshold(); + + // This uses the data in the ShenandoahAgeCensus object's _global_age_table and the + // current _epoch to compute a new tenuring threshold, which will be remembered + // until the next invocation of compute_tenuring_threshold. + uint compute_tenuring_threshold(); + + public: + enum { + MAX_COHORTS = AgeTable::table_size, // = markWord::max_age + 1 + MAX_SNAPSHOTS = MAX_COHORTS // May change in the future + }; + + ShenandoahAgeCensus(); + + // Return the local age table (population vector) for worker_id. + // Only used in the case of (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) + AgeTable* get_local_age_table(uint worker_id) { + return (AgeTable*) _local_age_table[worker_id]; + } + + // Update the local age table for worker_id by size for + // given obj_age, region_age, and region_youth + CENSUS_NOISE(void add(uint obj_age, uint region_age, uint region_youth, size_t size, uint worker_id);) + NO_CENSUS_NOISE(void add(uint obj_age, uint region_age, size_t size, uint worker_id);) + +#ifdef SHENANDOAH_CENSUS_NOISE + // Update the local skip table for worker_id by size + void add_skipped(size_t size, uint worker_id); + // Update the local aged region volume table for worker_id by size + void add_aged(size_t size, uint worker_id); + // Update the local clamped object volume table for worker_id by size + void add_clamped(size_t size, uint worker_id); + // Update the local (rejuvenated) object volume (retrograde age) for worker_id by size + void add_young(size_t size, uint worker_id); +#endif // SHENANDOAH_CENSUS_NOISE + + // Update to a new epoch, creating a slot for new census. + void prepare_for_census_update(); + + // Update the census data, and compute the new tenuring threshold. + // age0_pop is the population of Cohort 0 that may have been missed in + // the regular census. + void update_census(size_t age0_pop, AgeTable* pv1 = nullptr, AgeTable* pv2 = nullptr); + + // Return the most recently computed tenuring threshold + uint tenuring_threshold() const { return _tenuring_threshold[_epoch]; } + + // Return the tenuring threshold computed for the previous epoch + uint previous_tenuring_threshold() const { + assert(_epoch < MAX_SNAPSHOTS, "Error"); + uint prev = _epoch - 1; + if (prev >= MAX_SNAPSHOTS) { + // _epoch is 0 + prev = MAX_SNAPSHOTS - 1; + } + return _tenuring_threshold[prev]; + } + + // Reset the epoch, clearing accumulated census history + void reset_global(); + // Reset any partial census information + void reset_local(); + + // Check whether census information is clear + bool is_clear_global(); + bool is_clear_local(); + + // Print the age census information + void print(); +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHAGECENSUS_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp index 59b4615d326..bd0fbe53dc0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,15 +26,17 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP +#include "gc/shenandoah/shenandoahAffiliation.hpp" #include "memory/allocation.hpp" class ShenandoahAllocRequest : StackObj { public: enum Type { _alloc_shared, // Allocate common, outside of TLAB - _alloc_shared_gc, // Allocate common, outside of GCLAB + _alloc_shared_gc, // Allocate common, outside of GCLAB/PLAB _alloc_tlab, // Allocate TLAB _alloc_gclab, // Allocate GCLAB + _alloc_plab, // Allocate PLAB _ALLOC_LIMIT }; @@ -47,6 +50,8 @@ class ShenandoahAllocRequest : StackObj { return "TLAB"; case _alloc_gclab: return "GCLAB"; + case _alloc_plab: + return "PLAB"; default: ShouldNotReachHere(); return ""; @@ -54,17 +59,35 @@ class ShenandoahAllocRequest : StackObj { } private: + // When ShenandoahElasticTLAB is enabled, the request cannot be made smaller than _min_size. size_t _min_size; + + // The size of the request in words. size_t _requested_size; + + // The allocation may be increased for padding or decreased to fit in the remaining space of a region. size_t _actual_size; + + // For a humongous object, the _waste is the amount of free memory in the last region. + // For other requests, the _waste will be non-zero if the request enountered one or more regions + // with less memory than _min_size. This waste does not contribute to the used memory for + // the heap, but it does contribute to the allocation rate for heuristics. + size_t _waste; + + // This is the type of the request. Type _alloc_type; + + // This is the generation which the request is targeting. + ShenandoahAffiliation const _affiliation; + #ifdef ASSERT + // Check that this is set before being read. bool _actual_size_set; #endif - ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type) : + ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type, ShenandoahAffiliation affiliation) : _min_size(_min_size), _requested_size(_requested_size), - _actual_size(0), _alloc_type(_alloc_type) + _actual_size(0), _waste(0), _alloc_type(_alloc_type), _affiliation(affiliation) #ifdef ASSERT , _actual_size_set(false) #endif @@ -72,39 +95,43 @@ class ShenandoahAllocRequest : StackObj { public: static inline ShenandoahAllocRequest for_tlab(size_t min_size, size_t requested_size) { - return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab); + return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab, ShenandoahAffiliation::YOUNG_GENERATION); } static inline ShenandoahAllocRequest for_gclab(size_t min_size, size_t requested_size) { - return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab); + return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab, ShenandoahAffiliation::YOUNG_GENERATION); + } + + static inline ShenandoahAllocRequest for_plab(size_t min_size, size_t requested_size) { + return ShenandoahAllocRequest(min_size, requested_size, _alloc_plab, ShenandoahAffiliation::OLD_GENERATION); } - static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size) { - return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc); + static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size, ShenandoahAffiliation affiliation) { + return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc, affiliation); } static inline ShenandoahAllocRequest for_shared(size_t requested_size) { - return ShenandoahAllocRequest(0, requested_size, _alloc_shared); + return ShenandoahAllocRequest(0, requested_size, _alloc_shared, ShenandoahAffiliation::YOUNG_GENERATION); } - inline size_t size() { + inline size_t size() const { return _requested_size; } - inline Type type() { + inline Type type() const { return _alloc_type; } - inline const char* type_string() { + inline const char* type_string() const { return alloc_type_to_string(_alloc_type); } - inline size_t min_size() { + inline size_t min_size() const { assert (is_lab_alloc(), "Only access for LAB allocs"); return _min_size; } - inline size_t actual_size() { + inline size_t actual_size() const { assert (_actual_size_set, "Should be set"); return _actual_size; } @@ -117,12 +144,21 @@ class ShenandoahAllocRequest : StackObj { _actual_size = v; } - inline bool is_mutator_alloc() { + inline size_t waste() const { + return _waste; + } + + inline void set_waste(size_t v) { + _waste = v; + } + + inline bool is_mutator_alloc() const { switch (_alloc_type) { case _alloc_tlab: case _alloc_shared: return true; case _alloc_gclab: + case _alloc_plab: case _alloc_shared_gc: return false; default: @@ -131,12 +167,13 @@ class ShenandoahAllocRequest : StackObj { } } - inline bool is_gc_alloc() { + inline bool is_gc_alloc() const { switch (_alloc_type) { case _alloc_tlab: case _alloc_shared: return false; case _alloc_gclab: + case _alloc_plab: case _alloc_shared_gc: return true; default: @@ -145,10 +182,11 @@ class ShenandoahAllocRequest : StackObj { } } - inline bool is_lab_alloc() { + inline bool is_lab_alloc() const { switch (_alloc_type) { case _alloc_tlab: case _alloc_gclab: + case _alloc_plab: return true; case _alloc_shared: case _alloc_shared_gc: @@ -158,6 +196,22 @@ class ShenandoahAllocRequest : StackObj { return false; } } + + bool is_old() const { + return _affiliation == OLD_GENERATION; + } + + bool is_young() const { + return _affiliation == YOUNG_GENERATION; + } + + ShenandoahAffiliation affiliation() const { + return _affiliation; + } + + const char* affiliation_name() const { + return shenandoah_affiliation_name(_affiliation); + } }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp index 7d31ff02e1a..b08377223e1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +29,7 @@ #include "gc/shared/workerPolicy.hpp" #include "gc/shenandoah/shenandoahArguments.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "runtime/globals_extension.hpp" @@ -50,6 +52,7 @@ void ShenandoahArguments::initialize() { FLAG_SET_DEFAULT(ShenandoahLoadRefBarrier, false); FLAG_SET_DEFAULT(ShenandoahIUBarrier, false); FLAG_SET_DEFAULT(ShenandoahCASBarrier, false); + FLAG_SET_DEFAULT(ShenandoahCardBarrier, false); FLAG_SET_DEFAULT(ShenandoahCloneBarrier, false); FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false); @@ -69,6 +72,13 @@ void ShenandoahArguments::initialize() { FLAG_SET_DEFAULT(UseNUMA, true); } + // We use this as the time period for tracking minimum mutator utilization (MMU). + // In generational mode, the MMU is used as a signal to adjust the size of the + // young generation. + if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { + FLAG_SET_DEFAULT(GCPauseIntervalMillis, 5000); + } + // Set up default number of concurrent threads. We want to have cycles complete fast // enough, but we also do not want to steal too much CPU from the concurrently running // application. Using 1/4 of available threads for concurrent GC seems a good @@ -144,6 +154,10 @@ void ShenandoahArguments::initialize() { #endif // ASSERT #endif // COMPILER2 + if (ShenandoahIUBarrier) { + assert(strcmp(ShenandoahGCMode, "generational"), "Generational mode does not support IU barrier"); + } + // Record more information about previous cycles for improved debugging pleasure if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) { FLAG_SET_DEFAULT(LogEventsBufferEntries, 250); @@ -178,6 +192,8 @@ size_t ShenandoahArguments::conservative_max_heap_alignment() { } void ShenandoahArguments::initialize_alignments() { + CardTable::initialize_card_size(); + // Need to setup sizes early to get correct alignments. MaxHeapSize = ShenandoahHeapRegion::setup_sizes(MaxHeapSize); @@ -191,5 +207,8 @@ void ShenandoahArguments::initialize_alignments() { } CollectedHeap* ShenandoahArguments::create_heap() { + if (strcmp(ShenandoahGCMode, "generational") == 0) { + return new ShenandoahGenerationalHeap(new ShenandoahCollectorPolicy()); + } return new ShenandoahHeap(new ShenandoahCollectorPolicy()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index beb4a1d2892..918f79d6bd2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -71,6 +71,9 @@ void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) { msg.append(" %3s marked strong\n", ctx->is_marked_strong(obj) ? "" : "not"); msg.append(" %3s marked weak\n", ctx->is_marked_weak(obj) ? "" : "not"); msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); + if (heap->mode()->is_generational() && !obj->is_forwarded()) { + msg.append(" age: %d\n", obj->age()); + } msg.append(" mark:%s\n", mw_ss.freeze()); msg.append(" region: %s", ss.freeze()); } @@ -385,7 +388,7 @@ void ShenandoahAsserts::assert_locked_or_shenandoah_safepoint(Mutex* lock, const return; } - ShenandoahMessageBuffer msg("Must ba at a Shenandoah safepoint or held %s lock", lock->name()); + ShenandoahMessageBuffer msg("Must be at a Shenandoah safepoint or held %s lock", lock->name()); report_vm_error(file, line, msg.buffer()); } @@ -425,3 +428,20 @@ void ShenandoahAsserts::assert_heaplocked_or_safepoint(const char* file, int lin ShenandoahMessageBuffer msg("Heap lock must be owned by current thread, or be at safepoint"); report_vm_error(file, line, msg.buffer()); } + +// Unlike assert_heaplocked_or_safepoint(), this does not require current thread in safepoint to be a VM thread +// TODO: This should be more aptly named. Nothing in this method checks we are actually in Full GC. +void ShenandoahAsserts::assert_heaplocked_or_fullgc_safepoint(const char* file, int line) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + if (heap->lock()->owned_by_self()) { + return; + } + + if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { + return; + } + + ShenandoahMessageBuffer msg("Heap lock must be owned by current thread, or be at safepoint"); + report_vm_error(file, line, msg.buffer()); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp index c730eafb89d..af10169396e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,6 +73,7 @@ class ShenandoahAsserts { static void assert_heaplocked(const char* file, int line); static void assert_not_heaplocked(const char* file, int line); static void assert_heaplocked_or_safepoint(const char* file, int line); + static void assert_heaplocked_or_fullgc_safepoint(const char* file, int line); #ifdef ASSERT #define shenandoah_assert_in_heap(interior_loc, obj) \ @@ -163,6 +165,14 @@ class ShenandoahAsserts { #define shenandoah_assert_heaplocked_or_safepoint() \ ShenandoahAsserts::assert_heaplocked_or_safepoint(__FILE__, __LINE__) + +#define shenandoah_assert_heaplocked_or_fullgc_safepoint() \ + ShenandoahAsserts::assert_heaplocked_or_fullgc_safepoint(__FILE__, __LINE__) +#define shenandoah_assert_control_or_vm_thread() \ + assert(Thread::current()->is_VM_thread() || Thread::current() == ShenandoahHeap::heap()->control_thread(), "Expected control thread or vm thread") + +#define shenandoah_assert_generational() \ + assert(ShenandoahHeap::heap()->mode()->is_generational(), "Must be in generational mode here.") #else #define shenandoah_assert_in_heap(interior_loc, obj) #define shenandoah_assert_in_heap_or_null(interior_loc, obj) @@ -213,6 +223,9 @@ class ShenandoahAsserts { #define shenandoah_assert_heaplocked() #define shenandoah_assert_not_heaplocked() #define shenandoah_assert_heaplocked_or_safepoint() +#define shenandoah_assert_heaplocked_or_fullgc_safepoint() +#define shenandoah_assert_control_or_vm_thread() +#define shenandoah_assert_generational() #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp index d2857daccf6..2589648bae4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +42,7 @@ class ShenandoahBarrierSetC1; class ShenandoahBarrierSetC2; -ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) : +ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) : BarrierSet(make_barrier_set_assembler(), make_barrier_set_c1(), make_barrier_set_c2(), @@ -52,6 +53,10 @@ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) : _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize), _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator) { + if (ShenandoahCardBarrier) { + _card_table = new ShenandoahCardTable(heap_region); + _card_table->initialize(); + } } ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() { @@ -124,6 +129,15 @@ void ShenandoahBarrierSet::on_thread_detach(Thread *thread) { gclab->retire(); } + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock. + // This is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each + // PLAB is aligned with the start of each card's memory range. + // TODO: Assert this in retire_plab? + if (plab != nullptr) { + _heap->retire_plab(plab); + } + // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC if (ShenandoahStackWatermarkBarrier) { if (_heap->is_concurrent_mark_in_progress()) { @@ -142,3 +156,22 @@ void ShenandoahBarrierSet::clone_barrier_runtime(oop src) { clone_barrier(src); } } + +void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + + HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize)); + // In the case of compressed oops, start and end may potentially be misaligned; + // so we need to conservatively align the first downward (this is not + // strictly necessary for current uses, but a case of good hygiene and, + // if you will, aesthetics) and the second upward (this is essential for + // current uses) to a HeapWord boundary, so we mark all cards overlapping + // this write. + HeapWord* aligned_start = align_down(start, HeapWordSize); + HeapWord* aligned_end = align_up (end, HeapWordSize); + // If compressed oops were not being used, these should already be aligned + assert(UseCompressedOops || (aligned_start == start && aligned_end == end), + "Expected heap word alignment of start and end"); + _heap->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start)); +} + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp index 1d4dd7b9e3e..4927b599711 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +27,7 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP #include "gc/shared/barrierSet.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" class ShenandoahHeap; @@ -34,11 +36,12 @@ class ShenandoahBarrierSetAssembler; class ShenandoahBarrierSet: public BarrierSet { private: ShenandoahHeap* const _heap; + ShenandoahCardTable* _card_table; BufferNode::Allocator _satb_mark_queue_buffer_allocator; ShenandoahSATBMarkQueueSet _satb_mark_queue_set; public: - ShenandoahBarrierSet(ShenandoahHeap* heap); + ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region); static ShenandoahBarrierSetAssembler* assembler(); @@ -46,6 +49,10 @@ class ShenandoahBarrierSet: public BarrierSet { return barrier_set_cast(BarrierSet::barrier_set()); } + inline ShenandoahCardTable* card_table() { + return _card_table; + } + static ShenandoahSATBMarkQueueSet& satb_mark_queue_set() { return barrier_set()->_satb_mark_queue_set; } @@ -111,9 +118,14 @@ class ShenandoahBarrierSet: public BarrierSet { template inline oop oop_xchg(DecoratorSet decorators, T* addr, oop new_value); + template + void write_ref_field_post(T* field); + + void write_ref_array(HeapWord* start, size_t count); + private: template - inline void arraycopy_marking(T* src, T* dst, size_t count); + inline void arraycopy_marking(T* src, T* dst, size_t count, bool is_old_marking); template inline void arraycopy_evacuation(T* src, size_t count); template diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp index b8da50dd6e1..7d8846a293b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +29,7 @@ #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shared/accessBarrierSupport.inline.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" #include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp" @@ -36,6 +38,8 @@ #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" +#include "memory/iterator.inline.hpp" #include "oops/oop.inline.hpp" inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) { @@ -103,6 +107,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators, // Prevent resurrection of unreachable phantom (i.e. weak-native) references. if ((decorators & ON_PHANTOM_OOP_REF) != 0 && _heap->is_concurrent_weak_root_in_progress() && + _heap->is_in_active_generation(obj) && !_heap->marking_context()->is_marked(obj)) { return nullptr; } @@ -110,6 +115,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators, // Prevent resurrection of unreachable weak references. if ((decorators & ON_WEAK_OOP_REF) != 0 && _heap->is_concurrent_weak_root_in_progress() && + _heap->is_in_active_generation(obj) && !_heap->marking_context()->is_marked_strong(obj)) { return nullptr; } @@ -179,6 +185,13 @@ inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oo } } +template +inline void ShenandoahBarrierSet::write_ref_field_post(T* field) { + assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); + volatile CardTable::CardValue* byte = card_table()->byte_for(field); + *byte = CardTable::dirty_card_val(); +} + template inline oop ShenandoahBarrierSet::oop_load(DecoratorSet decorators, T* addr) { oop value = RawAccess<>::oop_load(addr); @@ -242,7 +255,10 @@ inline oop ShenandoahBarrierSet::AccessBarrier::oop_loa template template inline void ShenandoahBarrierSet::AccessBarrier::oop_store_common(T* addr, oop value) { - shenandoah_assert_marked_if(nullptr, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress()); + shenandoah_assert_marked_if(nullptr, value, + !CompressedOops::is_null(value) && + ShenandoahHeap::heap()->is_evacuation_in_progress() && + !(ShenandoahHeap::heap()->is_gc_generation_young() && ShenandoahHeap::heap()->heap_region_containing(value)->is_old())); shenandoah_assert_not_in_cset_if(addr, value, value != nullptr && !ShenandoahHeap::heap()->cancelled_gc()); ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set(); bs->iu_barrier(value); @@ -263,6 +279,10 @@ inline void ShenandoahBarrierSet::AccessBarrier::oop_st shenandoah_assert_not_forwarded_except (addr, value, value == nullptr || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); oop_store_common(addr, value); + if (ShenandoahCardBarrier) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->write_ref_field_post(addr); + } } template @@ -283,7 +303,11 @@ template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { assert((decorators & (AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF)) == 0, "must be absent"); ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); - return bs->oop_cmpxchg(decorators, addr, compare_value, new_value); + oop result = bs->oop_cmpxchg(decorators, addr, compare_value, new_value); + if (ShenandoahCardBarrier) { + bs->write_ref_field_post(addr); + } + return result; } template @@ -291,7 +315,12 @@ inline oop ShenandoahBarrierSet::AccessBarrier::oop_ato assert((decorators & AS_NO_KEEPALIVE) == 0, "must be absent"); ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); DecoratorSet resolved_decorators = AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset); - return bs->oop_cmpxchg(resolved_decorators, AccessInternal::oop_field_addr(base, offset), compare_value, new_value); + auto addr = AccessInternal::oop_field_addr(base, offset); + oop result = bs->oop_cmpxchg(resolved_decorators, addr, compare_value, new_value); + if (ShenandoahCardBarrier) { + bs->write_ref_field_post(addr); + } + return result; } template @@ -307,7 +336,11 @@ template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { assert((decorators & (AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF)) == 0, "must be absent"); ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); - return bs->oop_xchg(decorators, addr, new_value); + oop result = bs->oop_xchg(decorators, addr, new_value); + if (ShenandoahCardBarrier) { + bs->write_ref_field_post(addr); + } + return result; } template @@ -315,7 +348,12 @@ inline oop ShenandoahBarrierSet::AccessBarrier::oop_ato assert((decorators & AS_NO_KEEPALIVE) == 0, "must be absent"); ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); DecoratorSet resolved_decorators = AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset); - return bs->oop_xchg(resolved_decorators, AccessInternal::oop_field_addr(base, offset), new_value); + auto addr = AccessInternal::oop_field_addr(base, offset); + oop result = bs->oop_xchg(resolved_decorators, addr, new_value); + if (ShenandoahCardBarrier) { + bs->write_ref_field_post(addr); + } + return result; } // Clone barrier support @@ -332,16 +370,23 @@ template bool ShenandoahBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { + T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); + T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); - bs->arraycopy_barrier(arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw), - arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw), - length); - return Raw::oop_arraycopy_in_heap(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); + bs->arraycopy_barrier(src, dst, length); + bool result = Raw::oop_arraycopy_in_heap(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); + if (ShenandoahCardBarrier) { + bs->write_ref_array((HeapWord*) dst, length); + } + return result; } template void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { - assert(HAS_FWD == _heap->has_forwarded_objects(), "Forwarded object status is sane"); + // We allow forwarding in young generation and marking in old generation + // to happen simultaneously. + assert(_heap->mode()->is_generational() || HAS_FWD == _heap->has_forwarded_objects(), "Forwarded object status is sane"); Thread* thread = Thread::current(); SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); @@ -361,7 +406,7 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { ShenandoahHeap::atomic_update_oop(fwd, elem_ptr, o); obj = fwd; } - if (ENQUEUE && !ctx->is_marked_strong(obj)) { + if (ENQUEUE && !ctx->is_marked_strong_or_old(obj)) { _satb_mark_queue_set.enqueue_known_active(queue, obj); } } @@ -374,21 +419,80 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) { return; } int gc_state = _heap->gc_state(); - if ((gc_state & ShenandoahHeap::MARKING) != 0) { - arraycopy_marking(src, dst, count); - } else if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { + if ((gc_state & ShenandoahHeap::YOUNG_MARKING) != 0) { + arraycopy_marking(src, dst, count, false); + return; + } + + if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { arraycopy_evacuation(src, count); } else if ((gc_state & ShenandoahHeap::UPDATEREFS) != 0) { arraycopy_update(src, count); } + + if (_heap->mode()->is_generational()) { + assert(ShenandoahSATBBarrier, "Generational mode assumes SATB mode"); + // TODO: Could we optimize here by checking that dst is in an old region? + if ((gc_state & ShenandoahHeap::OLD_MARKING) != 0) { + // Note that we can't do the arraycopy marking using the 'src' array when + // SATB mode is enabled (so we can't do this as part of the iteration for + // evacuation or update references). + arraycopy_marking(src, dst, count, true); + } + } } template -void ShenandoahBarrierSet::arraycopy_marking(T* src, T* dst, size_t count) { +void ShenandoahBarrierSet::arraycopy_marking(T* src, T* dst, size_t count, bool is_old_marking) { assert(_heap->is_concurrent_mark_in_progress(), "only during marking"); - T* array = ShenandoahSATBBarrier ? dst : src; - if (!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast(array))) { - arraycopy_work(array, count); + /* + * Note that an old-gen object is considered live if it is live at the start of OLD marking or if it is promoted + * following the start of OLD marking. + * + * 1. Every object promoted following the start of OLD marking will be above TAMS within its old-gen region + * 2. Every object live at the start of OLD marking will be referenced from a "root" or it will be referenced from + * another live OLD-gen object. With regards to old-gen, roots include stack locations and all of live young-gen. + * All root references to old-gen are identified during a bootstrap young collection. All references from other + * old-gen objects will be marked during the traversal of all old objects, or will be marked by the SATB barrier. + * + * During old-gen marking (which is interleaved with young-gen collections), call arraycopy_work() if: + * + * 1. The overwritten array resides in old-gen and it is below TAMS within its old-gen region + * 2. Do not call arraycopy_work for any array residing in young-gen because young-gen collection is idle at this time + * + * During young-gen marking, call arraycopy_work() if: + * + * 1. The overwritten array resides in young-gen and is below TAMS within its young-gen region + * 2. Additionally, if array resides in old-gen, regardless of its relationship to TAMS because this old-gen array + * may hold references to young-gen + */ + if (ShenandoahSATBBarrier) { + T* array = dst; + HeapWord* array_addr = reinterpret_cast(array); + ShenandoahHeapRegion* r = _heap->heap_region_containing(array_addr); + if (is_old_marking) { + // Generational, old marking + assert(_heap->mode()->is_generational(), "Invariant"); + if (r->is_old() && (array_addr < _heap->marking_context()->top_at_mark_start(r))) { + arraycopy_work(array, count); + } + } else if (_heap->mode()->is_generational()) { + // Generational, young marking + if (r->is_old() || (array_addr < _heap->marking_context()->top_at_mark_start(r))) { + arraycopy_work(array, count); + } + } else if (array_addr < _heap->marking_context()->top_at_mark_start(r)) { + // Non-generational, marking + arraycopy_work(array, count); + } + } else { + // Incremental Update mode, marking + T* array = src; + HeapWord* array_addr = reinterpret_cast(array); + ShenandoahHeapRegion* r = _heap->heap_region_containing(array_addr); + if (array_addr < _heap->marking_context()->top_at_mark_start(r)) { + arraycopy_work(array, count); + } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp index b548073be33..0ca2c6da539 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp @@ -104,8 +104,12 @@ void ShenandoahBarrierSet::clone_barrier(oop obj) { assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled"); shenandoah_assert_correct(nullptr, obj); + // We only need to handle YOUNG_MARKING here because the clone barrier + // is only invoked during marking if Shenandoah is in incremental update + // mode. OLD_MARKING should only happen when Shenandoah is in generational + // mode, which uses the SATB write barrier. int gc_state = _heap->gc_state(); - if ((gc_state & ShenandoahHeap::MARKING) != 0) { + if ((gc_state & ShenandoahHeap::YOUNG_MARKING) != 0) { clone_marking(obj); } else if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { clone_evacuation(obj); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp new file mode 100644 index 00000000000..ef2d6e134b2 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahCardStats.hpp" +#include "logging/log.hpp" + +#ifndef PRODUCT +void ShenandoahCardStats::log() const { + if (ShenandoahEnableCardStats) { + log_info(gc,remset)("Card stats: dirty " SIZE_FORMAT " (max run: " SIZE_FORMAT ")," + " clean " SIZE_FORMAT " (max run: " SIZE_FORMAT ")," + " dirty scans/objs " SIZE_FORMAT, + _dirty_card_cnt, _max_dirty_run, + _clean_card_cnt, _max_clean_run, + _dirty_scan_obj_cnt); + } +} +#endif // !PRODUCT + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCardStats.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.hpp new file mode 100644 index 00000000000..de21e226acb --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.hpp @@ -0,0 +1,132 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCARDSTATS_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHCARDSTATS_HPP + +#include "gc/shared/gc_globals.hpp" +#include "gc/shenandoah/shenandoahNumberSeq.hpp" + +enum CardStatType { + DIRTY_RUN = 0, + CLEAN_RUN = 1, + DIRTY_CARDS = 2, + CLEAN_CARDS = 3, + MAX_DIRTY_RUN = 4, + MAX_CLEAN_RUN = 5, + DIRTY_SCAN_OBJS = 6, + ALTERNATIONS = 7, + MAX_CARD_STAT_TYPE = 8 +}; + +enum CardStatLogType { + CARD_STAT_SCAN_RS = 0, + CARD_STAT_UPDATE_REFS = 1, + MAX_CARD_STAT_LOG_TYPE = 2 +}; + +class ShenandoahCardStats: public CHeapObj { +private: + size_t _cards_in_cluster; + HdrSeq* _local_card_stats; + + size_t _dirty_card_cnt; + size_t _clean_card_cnt; + + size_t _max_dirty_run; + size_t _max_clean_run; + + size_t _dirty_scan_obj_cnt; + + size_t _alternation_cnt; + +public: + ShenandoahCardStats(size_t cards_in_cluster, HdrSeq* card_stats) : + _cards_in_cluster(cards_in_cluster), + _local_card_stats(card_stats), + _dirty_card_cnt(0), + _clean_card_cnt(0), + _max_dirty_run(0), + _max_clean_run(0), + _dirty_scan_obj_cnt(0), + _alternation_cnt(0) + { } + + ~ShenandoahCardStats() { + record(); + } + + void record() { + if (ShenandoahEnableCardStats) { + // Update global stats for distribution of dirty/clean cards as a percentage of chunk + _local_card_stats[DIRTY_CARDS].add(percent_of(_dirty_card_cnt, _cards_in_cluster)); + _local_card_stats[CLEAN_CARDS].add(percent_of(_clean_card_cnt, _cards_in_cluster)); + + // Update global stats for max dirty/clean run distribution as a percentage of chunk + _local_card_stats[MAX_DIRTY_RUN].add(percent_of(_max_dirty_run, _cards_in_cluster)); + _local_card_stats[MAX_CLEAN_RUN].add(percent_of(_max_clean_run, _cards_in_cluster)); + + // Update global stats for dirty obj scan counts + _local_card_stats[DIRTY_SCAN_OBJS].add(_dirty_scan_obj_cnt); + + // Update global stats for alternation counts + _local_card_stats[ALTERNATIONS].add(_alternation_cnt); + } + } + +public: + inline void record_dirty_run(size_t len) { + if (ShenandoahEnableCardStats) { + _alternation_cnt++; + if (len > _max_dirty_run) { + _max_dirty_run = len; + } + _dirty_card_cnt += len; + assert(len <= _cards_in_cluster, "Error"); + _local_card_stats[DIRTY_RUN].add(percent_of(len, _cards_in_cluster)); + } + } + + inline void record_clean_run(size_t len) { + if (ShenandoahEnableCardStats) { + _alternation_cnt++; + if (len > _max_clean_run) { + _max_clean_run = len; + } + _clean_card_cnt += len; + assert(len <= _cards_in_cluster, "Error"); + _local_card_stats[CLEAN_RUN].add(percent_of(len, _cards_in_cluster)); + } + } + + inline void record_scan_obj_cnt(size_t i) { + if (ShenandoahEnableCardStats) { + _dirty_scan_obj_cnt += i; + } + } + + void log() const PRODUCT_RETURN; +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCARDSTATS_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCardTable.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCardTable.cpp new file mode 100644 index 00000000000..1c76847ea68 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahCardTable.cpp @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "runtime/init.hpp" +#include "services/memTracker.hpp" + +void ShenandoahCardTable::initialize() { + size_t num_cards = cards_required(_whole_heap.word_size()); + + // each card takes 1 byte; + 1 for the guard card + size_t num_bytes = num_cards + 1; + const size_t granularity = os::vm_allocation_granularity(); + _byte_map_size = align_up(num_bytes, MAX2(_page_size, granularity)); + + HeapWord* low_bound = _whole_heap.start(); + HeapWord* high_bound = _whole_heap.end(); + + // TODO: Why rs_align is 0 on page_size == os::vm_page_size? + // ReservedSpace constructor would assert rs_align >= os::vm_page_size(). + const size_t rs_align = _page_size == os::vm_page_size() ? 0 : MAX2(_page_size, granularity); + + ReservedSpace write_space(_byte_map_size, rs_align, _page_size); + initialize(write_space); + + // The assembler store_check code will do an unsigned shift of the oop, + // then add it to _byte_map_base, i.e. + // + // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) + _byte_map = (CardValue*) write_space.base(); + _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift); + assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); + assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map"); + + CardValue* guard_card = &_byte_map[num_cards]; + assert(is_aligned(guard_card, _page_size), "must be on its own OS page"); + _guard_region = MemRegion((HeapWord*)guard_card, _page_size); + + _write_byte_map = _byte_map; + _write_byte_map_base = _byte_map_base; + + ReservedSpace read_space(_byte_map_size, rs_align, _page_size); + initialize(read_space); + + _read_byte_map = (CardValue*) read_space.base(); + _read_byte_map_base = _read_byte_map - (uintptr_t(low_bound) >> card_shift()); + assert(read_byte_for(low_bound) == &_read_byte_map[0], "Checking start of map"); + assert(read_byte_for(high_bound-1) <= &_read_byte_map[last_valid_index()], "Checking end of map"); + + _covered[0] = _whole_heap; + + log_trace(gc, barrier)("ShenandoahCardTable::ShenandoahCardTable:"); + log_trace(gc, barrier)(" &_write_byte_map[0]: " INTPTR_FORMAT " &_write_byte_map[_last_valid_index]: " INTPTR_FORMAT, + p2i(&_write_byte_map[0]), p2i(&_write_byte_map[last_valid_index()])); + log_trace(gc, barrier)(" _write_byte_map_base: " INTPTR_FORMAT, p2i(_write_byte_map_base)); + log_trace(gc, barrier)(" &_read_byte_map[0]: " INTPTR_FORMAT " &_read_byte_map[_last_valid_index]: " INTPTR_FORMAT, + p2i(&_read_byte_map[0]), p2i(&_read_byte_map[last_valid_index()])); + log_trace(gc, barrier)(" _read_byte_map_base: " INTPTR_FORMAT, p2i(_read_byte_map_base)); + + // TODO: As currently implemented, we do not swap pointers between _read_byte_map and _write_byte_map + // because the mutator write barrier hard codes the address of the _write_byte_map_base. Instead, + // the current implementation simply copies contents of _write_byte_map onto _read_byte_map and cleans + // the entirety of _write_byte_map at the init_mark safepoint. + // + // If we choose to modify the mutator write barrier so that we can swap _read_byte_map_base and + // _write_byte_map_base pointers, we may also have to figure out certain details about how the + // _guard_region is implemented so that we can replicate the read and write versions of this region. + // + // Alternatively, we may switch to a SATB-based write barrier and replace the direct card-marking + // remembered set with something entirely different. +} + +void ShenandoahCardTable::initialize(const ReservedSpace& card_table) { + MemTracker::record_virtual_memory_type((address)card_table.base(), mtGC); + + os::trace_page_sizes("Card Table", _byte_map_size, _byte_map_size, + _page_size, card_table.base(), card_table.size()); + if (!card_table.is_reserved()) { + vm_exit_during_initialization("Could not reserve enough space for the card marking array"); + } + os::commit_memory_or_exit(card_table.base(), _byte_map_size, card_table.alignment(), false, + "Cannot commit memory for card table"); +} + +bool ShenandoahCardTable::is_in_young(const void* obj) const { + return ShenandoahHeap::heap()->is_in_young(obj); +} + +CardValue* ShenandoahCardTable::read_byte_for(const void* p) { + CardValue* result = &_read_byte_map_base[uintptr_t(p) >> _card_shift]; + assert(result >= _read_byte_map && result < _read_byte_map + _byte_map_size, + "out of bounds accessor for card marking array"); + return result; +} + +size_t ShenandoahCardTable::last_valid_index() { + return CardTable::last_valid_index(); +} + +// TODO: This service is not currently used because we are not able to swap _read_byte_map_base and +// _write_byte_map_base pointers. If we were able to do so, we would invoke clear_read_table "immediately" +// following the end of concurrent remembered set scanning so that this read card table would be ready +// to serve as the new write card table at the time these pointer values were next swapped. +// +// In the current implementation, the write-table is cleared immediately after its contents is copied to +// the read table, obviating the need for this service. +void ShenandoahCardTable::clear_read_table() { + for (size_t i = 0; i < _byte_map_size; i++) { + _read_byte_map[i] = clean_card; + } +} + +// TODO: This service is not currently used because the mutator write barrier implementation hard codes the +// location of the _write_byte_may_base. If we change the mutator's write barrier implementation, then we +// may use this service to exchange the roles of the read-card-table and write-card-table. +void ShenandoahCardTable::swap_card_tables() { + shenandoah_assert_safepoint(); + + CardValue* save_value = _read_byte_map; + _read_byte_map = _write_byte_map; + _write_byte_map = save_value; + + save_value = _read_byte_map_base; + _read_byte_map_base = _write_byte_map_base; + _write_byte_map_base = save_value; + + // update the superclass instance variables + _byte_map = _write_byte_map; + _byte_map_base = _write_byte_map_base; +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCardTable.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCardTable.hpp new file mode 100644 index 00000000000..dc6b95ea26a --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahCardTable.hpp @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCARDTABLE_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCARDTABLE_HPP + +#include "gc/g1/g1RegionToSpaceMapper.hpp" +#include "gc/shared/cardTable.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/macros.hpp" + +class ShenandoahCardTable: public CardTable { + friend class VMStructs; + +protected: + // We maintain two copies of the card table to facilitate concurrent remembered set scanning + // and concurrent clearing of stale remembered set information. During the init_mark safepoint, + // we copy the contents of _write_byte_map to _read_byte_map and clear _write_byte_map. + // + // Concurrent remembered set scanning reads from _read_byte_map while concurrent mutator write + // barriers are overwriting cards of the _write_byte_map with DIRTY codes. Concurrent remembered + // set scanning also overwrites cards of the _write_byte_map with DIRTY codes whenever it discovers + // interesting pointers. + // + // During a concurrent update-references phase, we scan the _write_byte_map concurrently to find + // all old-gen references that may need to be updated. + // + // In a future implementation, we may swap the values of _read_byte_map and _write_byte_map during + // the init-mark safepoint to avoid the need for bulk STW copying and initialization. Doing so + // requires a change to the implementation of mutator write barriers as the address of the card + // table is currently in-lined and hard-coded. + CardValue* _read_byte_map; + CardValue* _write_byte_map; + CardValue* _read_byte_map_base; + CardValue* _write_byte_map_base; + +public: + ShenandoahCardTable(MemRegion whole_heap) : CardTable(whole_heap) { } + + virtual void initialize(); + + virtual bool is_in_young(const void* obj) const; + + CardValue* read_byte_for(const void* p); + + size_t last_valid_index(); + + void clear_read_table(); + + // Exchange the roles of the read and write card tables. + void swap_card_tables(); + + CardValue* read_byte_map() { + return _read_byte_map; + } + + CardValue* write_byte_map() { + return _write_byte_map; + } + + CardValue* write_byte_map_base() { + return _write_byte_map_base; + } + +private: + void initialize(const ReservedSpace& card_table); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCARDTABLE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp index 6674c40f768..45efab8a975 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +49,7 @@ bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { } obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); shenandoah_assert_not_forwarded_if(nullptr, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); - return _mark_context->is_marked(obj); + return _mark_context->is_marked_or_old(obj); } ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : @@ -60,7 +61,7 @@ bool ShenandoahIsAliveClosure::do_object_b(oop obj) { return false; } shenandoah_assert_not_forwarded(nullptr, obj); - return _mark_context->is_marked(obj); + return _mark_context->is_marked_or_old(obj); } BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() { @@ -88,7 +89,7 @@ void ShenandoahKeepAliveClosure::do_oop(narrowOop* p) { template void ShenandoahKeepAliveClosure::do_oop_work(T* p) { assert(ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Only for concurrent marking phase"); - assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected"); + assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress() || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected"); T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index cc5e58aee25..ba876856d63 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,9 +41,13 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)), _biased_cset_map(_map_space.base()), _heap(heap), + _has_old_regions(false), _garbage(0), _used(0), + _live(0), _region_count(0), + _old_garbage(0), + _preselected_regions(nullptr), _current_index(0) { // The collection set map is reserved to cover the entire heap *and* zero addresses. @@ -83,17 +88,35 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); assert(Thread::current()->is_VM_thread(), "Must be VMThread"); assert(!is_in(r), "Already in collection set"); + assert(!r->is_humongous(), "Only add regular regions to the collection set"); + _cset_map[r->index()] = 1; + size_t live = r->get_live_data_bytes(); + size_t garbage = r->garbage(); + size_t free = r->free(); + if (r->is_young()) { + _young_bytes_to_evacuate += live; + _young_available_bytes_collected += free; + if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahHeap::heap()->age_census()->tenuring_threshold()) { + _young_bytes_to_promote += live; + } + } else if (r->is_old()) { + _old_bytes_to_evacuate += live; + _old_garbage += garbage; + } + _region_count++; - _garbage += r->garbage(); + _has_old_regions |= r->is_old(); + _garbage += garbage; _used += r->used(); - + _live += live; // Update the region status too. State transition would be checked internally. r->make_cset(); } void ShenandoahCollectionSet::clear() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + Copy::zero_to_bytes(_cset_map, _map_size); #ifdef ASSERT @@ -103,10 +126,20 @@ void ShenandoahCollectionSet::clear() { #endif _garbage = 0; + _old_garbage = 0; _used = 0; + _live = 0; _region_count = 0; _current_index = 0; + + _young_bytes_to_evacuate = 0; + _young_bytes_to_promote = 0; + _old_bytes_to_evacuate = 0; + + _young_available_bytes_collected = 0; + + _has_old_regions = false; } ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { @@ -150,7 +183,11 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::next() { } void ShenandoahCollectionSet::print_on(outputStream* out) const { - out->print_cr("Collection Set : " SIZE_FORMAT "", count()); + out->print_cr("Collection Set: Regions: " + SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(), + byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()), + byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()), + byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); debug_only(size_t regions = 0;) for (size_t index = 0; index < _heap->num_regions(); index ++) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp index 8ac2d9fb2ea..ae1971f30d6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +34,14 @@ class ShenandoahCollectionSet : public CHeapObj { friend class ShenandoahHeap; + friend class ShenandoahCollectionSetPreselector; + + void establish_preselected(bool *preselected) { + assert(_preselected_regions == nullptr, "Over-writing"); + _preselected_regions = preselected; + } + void abandon_preselected() { _preselected_regions = nullptr; } + private: size_t const _map_size; size_t const _region_size_bytes_shift; @@ -43,10 +52,28 @@ class ShenandoahCollectionSet : public CHeapObj { ShenandoahHeap* const _heap; + bool _has_old_regions; size_t _garbage; size_t _used; + size_t _live; size_t _region_count; + size_t _young_bytes_to_evacuate; + size_t _young_bytes_to_promote; + size_t _old_bytes_to_evacuate; + + // How many bytes of old garbage are present in a mixed collection set? + size_t _old_garbage; + + // Points to array identifying which tenure-age regions have been preselected + // for inclusion in collection set. This field is only valid during brief + // spans of time while collection set is being constructed. + bool* _preselected_regions; + + // When a region having memory available to be allocated is added to the collection set, the region's available memory + // should be subtracted from what's available. + size_t _young_available_bytes_collected; + shenandoah_padding(0); volatile size_t _current_index; shenandoah_padding(1); @@ -77,8 +104,31 @@ class ShenandoahCollectionSet : public CHeapObj { void print_on(outputStream* out) const; - size_t used() const { return _used; } - size_t garbage() const { return _garbage; } + // It is not known how many of these bytes will be promoted. + inline size_t get_young_bytes_reserved_for_evacuation(); + inline size_t get_old_bytes_reserved_for_evacuation(); + + inline size_t get_young_bytes_to_be_promoted(); + + size_t get_young_available_bytes_collected() { return _young_available_bytes_collected; } + + inline size_t get_old_garbage(); + + bool is_preselected(size_t region_idx) { + assert(_preselected_regions != nullptr, "Missing etsablish after abandon"); + return _preselected_regions[region_idx]; + } + + bool* preselected_regions() { + assert(_preselected_regions != nullptr, "Null ptr"); + return _preselected_regions; + } + + bool has_old_regions() const { return _has_old_regions; } + size_t used() const { return _used; } + size_t live() const { return _live; } + size_t garbage() const { return _garbage; } + void clear(); private: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp index 6eb026561e4..3779e268ace 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,4 +54,20 @@ bool ShenandoahCollectionSet::is_in_loc(void* p) const { return _biased_cset_map[index] == 1; } +size_t ShenandoahCollectionSet::get_old_bytes_reserved_for_evacuation() { + return _old_bytes_to_evacuate; +} + +size_t ShenandoahCollectionSet::get_young_bytes_reserved_for_evacuation() { + return _young_bytes_to_evacuate - _young_bytes_to_promote; +} + +size_t ShenandoahCollectionSet::get_young_bytes_to_be_promoted() { + return _young_bytes_to_promote; +} + +size_t ShenandoahCollectionSet::get_old_garbage() { + return _old_garbage; +} + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp new file mode 100644 index 00000000000..d038695a39e --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSetPreselector.hpp @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP + +#include "gc/shenandoah/shenandoahCollectionSet.hpp" + +class ShenandoahCollectionSetPreselector : public StackObj { + ShenandoahCollectionSet* _cset; + bool* _pset; +public: + ShenandoahCollectionSetPreselector(ShenandoahCollectionSet* cset, size_t num_regions): + _cset(cset) { + _pset = NEW_RESOURCE_ARRAY(bool, num_regions); + for (unsigned int i = 0; i < num_regions; i++) { + _pset[i] = false; + } + _cset->establish_preselected(_pset); + } + + ~ShenandoahCollectionSetPreselector() { + _cset->abandon_preselected(); + } +}; + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSETPRESELECTOR_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp index 7a034e70936..194f45511b5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,8 +32,15 @@ ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : _success_concurrent_gcs(0), + _mixed_gcs(0), + _abbreviated_concurrent_gcs(0), + _abbreviated_degenerated_gcs(0), + _success_old_gcs(0), + _interrupted_old_gcs(0), _success_degenerated_gcs(0), _success_full_gcs(0), + _consecutive_young_gcs(0), + _consecutive_degenerated_gcs(0), _alloc_failure_degenerated(0), _alloc_failure_degenerated_upgrade_to_full(0), _alloc_failure_full(0), @@ -75,18 +83,55 @@ void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahGC } void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() { + _consecutive_degenerated_gcs = 0; _alloc_failure_degenerated_upgrade_to_full++; } -void ShenandoahCollectorPolicy::record_success_concurrent() { +void ShenandoahCollectorPolicy::record_success_concurrent(bool is_young, bool is_abbreviated) { + update_young(is_young); + + _consecutive_degenerated_gcs = 0; _success_concurrent_gcs++; + if (is_abbreviated) { + _abbreviated_concurrent_gcs++; + } +} + +void ShenandoahCollectorPolicy::record_mixed_cycle() { + _mixed_gcs++; +} + +void ShenandoahCollectorPolicy::record_success_old() { + _consecutive_young_gcs = 0; + _success_old_gcs++; +} + +void ShenandoahCollectorPolicy::record_interrupted_old() { + _consecutive_young_gcs = 0; + _interrupted_old_gcs++; } -void ShenandoahCollectorPolicy::record_success_degenerated() { +void ShenandoahCollectorPolicy::record_success_degenerated(bool is_young, bool is_abbreviated) { + update_young(is_young); + _success_degenerated_gcs++; + _consecutive_degenerated_gcs++; + if (is_abbreviated) { + _abbreviated_degenerated_gcs++; + } +} + +void ShenandoahCollectorPolicy::update_young(bool is_young) { + if (is_young) { + _consecutive_young_gcs++; + } else { + _consecutive_young_gcs = 0; + } } void ShenandoahCollectorPolicy::record_success_full() { + _consecutive_degenerated_gcs = 0; + _consecutive_young_gcs = 0; _success_full_gcs++; } @@ -110,28 +155,41 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle"); out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,"); out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate"); - out->print_cr("to avoid Degenerated and Full GC cycles."); + out->print_cr("to avoid Degenerated and Full GC cycles. Abbreviated cycles are those which found"); + out->print_cr("enough regions with no live objects to skip evacuation."); out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_concurrent); + size_t completed_gcs = _success_full_gcs + _success_degenerated_gcs + _success_concurrent_gcs + _success_old_gcs; + out->print_cr(SIZE_FORMAT_W(5) " Completed GCs", completed_gcs); + out->print_cr(SIZE_FORMAT_W(5) " Successful Concurrent GCs (%.2f%%)", _success_concurrent_gcs, percent_of(_success_concurrent_gcs, completed_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", _explicit_concurrent, percent_of(_explicit_concurrent, _success_concurrent_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", _implicit_concurrent, percent_of(_implicit_concurrent, _success_concurrent_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_concurrent_gcs, percent_of(_abbreviated_concurrent_gcs, _success_concurrent_gcs)); out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs", _success_degenerated_gcs); - out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated); + if (ShenandoahHeap::heap()->mode()->is_generational()) { + out->print_cr(SIZE_FORMAT_W(5) " Completed Old GCs (%.2f%%)", _success_old_gcs, percent_of(_success_old_gcs, completed_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " mixed", _mixed_gcs); + out->print_cr(" " SIZE_FORMAT_W(5) " interruptions", _interrupted_old_gcs); + out->cr(); + } + + size_t degenerated_gcs = _alloc_failure_degenerated_upgrade_to_full + _success_degenerated_gcs; + out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs (%.2f%%)", degenerated_gcs, percent_of(degenerated_gcs, completed_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, degenerated_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_degenerated, percent_of(_alloc_failure_degenerated, degenerated_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_degenerated_gcs, percent_of(_abbreviated_degenerated_gcs, degenerated_gcs)); for (int c = 0; c < ShenandoahGC::_DEGENERATED_LIMIT; c++) { if (_degen_points[c] > 0) { const char* desc = ShenandoahGC::degen_point_to_string((ShenandoahGC::ShenandoahDegenPoint)c); out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc); } } - out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC", _alloc_failure_degenerated_upgrade_to_full); out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " Full GCs", _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_full); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_full); - out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full); - out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full); + out->print_cr(SIZE_FORMAT_W(5) " Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", _explicit_full, percent_of(_explicit_full, _success_full_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", _implicit_full, percent_of(_implicit_full, _success_full_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_full, percent_of(_alloc_failure_full, _success_full_gcs)); + out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, _success_full_gcs)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp index a6ea6e976ae..77831e9604d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,9 +40,16 @@ class ShenandoahTracer : public GCTracer, public CHeapObj { class ShenandoahCollectorPolicy : public CHeapObj { private: size_t _success_concurrent_gcs; + size_t _mixed_gcs; + size_t _abbreviated_concurrent_gcs; + size_t _abbreviated_degenerated_gcs; + size_t _success_old_gcs; + size_t _interrupted_old_gcs; size_t _success_degenerated_gcs; // Written by control thread, read by mutators volatile size_t _success_full_gcs; + volatile size_t _consecutive_young_gcs; + uint _consecutive_degenerated_gcs; size_t _alloc_failure_degenerated; size_t _alloc_failure_degenerated_upgrade_to_full; size_t _alloc_failure_full; @@ -49,13 +57,12 @@ class ShenandoahCollectorPolicy : public CHeapObj { size_t _explicit_full; size_t _implicit_concurrent; size_t _implicit_full; + size_t _cycle_counter; size_t _degen_points[ShenandoahGC::_DEGENERATED_LIMIT]; ShenandoahSharedFlag _in_shutdown; - ShenandoahTracer* _tracer; - size_t _cycle_counter; public: ShenandoahCollectorPolicy(); @@ -64,8 +71,12 @@ class ShenandoahCollectorPolicy : public CHeapObj { // These two encompass the entire cycle. void record_cycle_start(); - void record_success_concurrent(); - void record_success_degenerated(); + void record_mixed_cycle(); + + void record_success_concurrent(bool is_young, bool is_abbreviated); + void record_success_old(); + void record_interrupted_old(); + void record_success_degenerated(bool is_young, bool is_abbreviated); void record_success_full(); void record_alloc_failure_to_degenerated(ShenandoahGC::ShenandoahDegenPoint point); void record_alloc_failure_to_full(); @@ -87,6 +98,17 @@ class ShenandoahCollectorPolicy : public CHeapObj { size_t full_gc_count() const { return _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full; } + + inline size_t consecutive_young_gc_count() const { + return _consecutive_young_gcs; + } + + inline size_t consecutive_degenerated_gc_count() const { + return _consecutive_degenerated_gcs; + } + +private: + void update_young(bool is_young); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 7564af5f6b7..5047b3a058b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +32,9 @@ #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "gc/shenandoah/shenandoahLock.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" @@ -85,21 +89,21 @@ class ShenandoahBreakpointMarkScope : public StackObj { } }; -ShenandoahConcurrentGC::ShenandoahConcurrentGC() : - _mark(), - _degen_point(ShenandoahDegenPoint::_degenerated_unset) { +ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) : + _mark(generation), + _degen_point(ShenandoahDegenPoint::_degenerated_unset), + _abbreviated(false), + _do_old_gc_bootstrap(do_old_gc_bootstrap), + _generation(generation) { } ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { return _degen_point; } -void ShenandoahConcurrentGC::cancel() { - ShenandoahConcurrentMark::cancel(); -} - bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { ShenandoahHeap* const heap = ShenandoahHeap::heap(); + ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); // Reset for upcoming marking @@ -110,18 +114,45 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { { ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); + + // Reset task queue stats here, rather than in mark_concurrent_roots, + // because remembered set scan will `push` oops into the queues and + // resetting after this happens will lose those counts. + TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats()); + + // Concurrent remembered set scanning + entry_scan_remembered_set(); + // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here. + // Concurrent mark roots entry_mark_roots(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) { + return false; + } // Continue concurrent mark entry_mark(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { + return false; + } } // Complete marking under STW, and start evacuation vmop_entry_final_mark(); + // If GC was cancelled before final mark, then the safepoint operation will do nothing + // and the concurrent mark will still be in progress. In this case it is safe to resume + // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled + // after final mark (but before this check), then the final mark safepoint operation + // will have finished the mark (setting concurrent mark in progress to false). Final mark + // will also have setup state (in concurrent stack processing) that will not be safe to + // resume from the marking phase in the degenerated cycle. That is, if the cancellation + // occurred after final mark, we must resume the degenerated cycle after the marking phase. + if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { + assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress"); + return false; + } + // Concurrent stack processing if (heap->is_evacuation_in_progress()) { entry_thread_roots(); @@ -134,10 +165,13 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { } // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim - // the space. This would be the last action if there is nothing to evacuate. + // the space. This would be the last action if there is nothing to evacuate. Note that + // we will not age young-gen objects in the case that we skip evacuation. entry_cleanup_early(); { + // TODO: Not sure there is value in logging free-set status right here. Note that whenever the free set is rebuilt, + // it logs the newly rebuilt status. ShenandoahHeapLocker locker(heap->lock()); heap->free_set()->log_status(); } @@ -161,25 +195,83 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { if (heap->is_evacuation_in_progress()) { // Concurrently evacuate entry_evacuate(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { + return false; + } + } + if (heap->has_forwarded_objects()) { // Perform update-refs phase. vmop_entry_init_updaterefs(); entry_updaterefs(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { + return false; + } // Concurrent update thread roots entry_update_thread_roots(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { + return false; + } vmop_entry_final_updaterefs(); // Update references freed up collection set, kick the cleanup to reclaim the space. entry_cleanup_complete(); } else { + // We chose not to evacuate because we found sufficient immediate garbage. Note that we + // do not check for cancellation here because, at this point, the cycle is effectively + // complete. If the cycle has been cancelled here, the control thread will detect it + // on its next iteration and run a degenerated young cycle. vmop_entry_final_roots(); + _abbreviated = true; } + // We defer generation resizing actions until after cset regions have been recycled. We do this even following an + // abbreviated cycle. + if (heap->mode()->is_generational()) { + bool success; + size_t region_xfer; + const char* region_destination; + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahGeneration* old_gen = heap->old_generation(); + { + ShenandoahHeapLocker locker(heap->lock()); + + size_t old_region_surplus = heap->get_old_region_surplus(); + size_t old_region_deficit = heap->get_old_region_deficit(); + if (old_region_surplus) { + success = heap->generation_sizer()->transfer_to_young(old_region_surplus); + region_destination = "young"; + region_xfer = old_region_surplus; + } else if (old_region_deficit) { + success = heap->generation_sizer()->transfer_to_old(old_region_deficit); + region_destination = "old"; + region_xfer = old_region_deficit; + if (!success) { + ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand(); + } + } else { + region_destination = "none"; + region_xfer = 0; + success = true; + } + heap->set_old_region_surplus(0); + heap->set_old_region_deficit(0); + heap->set_young_evac_reserve(0); + heap->set_old_evac_reserve(0); + heap->set_promoted_reserve(0); + } + + // Report outside the heap lock + size_t young_available = young_gen->available(); + size_t old_available = old_gen->available(); + log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " + SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s", + success? "successfully transferred": "failed to transfer", region_xfer, region_destination, + byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), + byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); + } return true; } @@ -289,17 +381,47 @@ void ShenandoahConcurrentGC::entry_final_roots() { void ShenandoahConcurrentGC::entry_reset() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); + heap->try_inject_alloc_failure(); + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); - static const char* msg = "Concurrent reset"; - ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); - EventMark em("%s", msg); + { + static const char* msg = "Concurrent reset"; + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(heap->workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), + msg); + op_reset(); + } - ShenandoahWorkerScope scope(heap->workers(), - ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), - "concurrent reset"); + if (_do_old_gc_bootstrap) { + static const char* msg = "Concurrent reset (OLD)"; + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old); + ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), + msg); + EventMark em("%s", msg); - heap->try_inject_alloc_failure(); - op_reset(); + heap->old_generation()->prepare_gc(); + } +} + +void ShenandoahConcurrentGC::entry_scan_remembered_set() { + if (_generation->is_young()) { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + const char* msg = "Concurrent remembered set scanning"; + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(heap->workers(), + ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(), + msg); + + heap->try_inject_alloc_failure(); + _generation->scan_remembered_set(true /* is_concurrent */); + } } void ShenandoahConcurrentGC::entry_mark_roots() { @@ -480,8 +602,7 @@ void ShenandoahConcurrentGC::op_reset() { if (ShenandoahPacing) { heap->pacer()->setup_for_reset(); } - - heap->prepare_gc(); + _generation->prepare_gc(); } class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { @@ -494,7 +615,8 @@ class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionCl assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); if (r->is_active()) { // Check if region needs updating its TAMS. We have updated it already during concurrent - // reset, so it is very likely we don't need to do another write here. + // reset, so it is very likely we don't need to do another write here. Since most regions + // are not "active", this path is relatively rare. if (_ctx->top_at_mark_start(r) != r->top()) { _ctx->capture_top_at_mark_start(r); } @@ -516,10 +638,32 @@ void ShenandoahConcurrentGC::op_init_mark() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); - assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap"); - assert(!heap->marking_context()->is_complete(), "should not be complete"); + assert(_generation->is_bitmap_clear(), "need clear marking bitmap"); + assert(!_generation->is_mark_complete(), "should not be complete"); assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); + + if (heap->mode()->is_generational()) { + if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) { + // The current implementation of swap_remembered_set() copies the write-card-table + // to the read-card-table. The remembered sets are also swapped for GLOBAL collections + // so that the verifier works with the correct copy of the card table when verifying. + // TODO: This path should not really depend on ShenandoahVerify. + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset); + _generation->swap_remembered_set(); + } + + if (_generation->is_global()) { + heap->cancel_old_gc(); + } else if (heap->is_concurrent_old_mark_in_progress()) { + // Purge the SATB buffers, transferring any valid, old pointers to the + // old generation mark queue. Any pointers in a young region will be + // abandoned. + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb); + heap->transfer_old_pointers_from_satb(); + } + } + if (ShenandoahVerify) { heap->verifier()->verify_before_concmark(); } @@ -528,18 +672,29 @@ void ShenandoahConcurrentGC::op_init_mark() { Universe::verify(); } - heap->set_concurrent_mark_in_progress(true); + _generation->set_concurrent_mark_in_progress(true); start_mark(); - { + if (_do_old_gc_bootstrap) { + // Update region state for both young and old regions + // TODO: We should be able to pull this out of the safepoint for the bootstrap + // cycle. The top of an old region will only move when a GC cycle evacuates + // objects into it. When we start an old cycle, we know that nothing can touch + // the top of old regions. ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); ShenandoahInitMarkUpdateRegionStateClosure cl; heap->parallel_heap_region_iterate(&cl); + heap->old_generation()->ref_processor()->reset_thread_locals(); + } else { + // Update region state for only young regions + ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); + ShenandoahInitMarkUpdateRegionStateClosure cl; + _generation->parallel_heap_region_iterate(&cl); } // Weak reference processing - ShenandoahReferenceProcessor* rp = heap->ref_processor(); + ShenandoahReferenceProcessor* rp = _generation->ref_processor(); rp->reset_thread_locals(); rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); @@ -579,40 +734,128 @@ void ShenandoahConcurrentGC::op_final_mark() { // Notify JVMTI that the tagmap table will need cleaning. JvmtiTagMap::set_needs_cleaning(); - heap->prepare_regions_and_collection_set(true /*concurrent*/); + // The collection set is chosen by prepare_regions_and_collection_set(). + // + // TODO: Under severe memory overload conditions that can be checked here, we may want to limit + // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on + // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there + // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections + // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen + // collections are not triggering frequently enough). + _generation->prepare_regions_and_collection_set(true /*concurrent*/); + + // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the + // evacuation efforts that are about to begin. In particular: + // + // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has + // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage + // of the live young-gen memory within the collection set. If there is more data ready to be promoted than + // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation + // pass. + // + // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been + // set aside to hold objects evacuated from the old-gen collection set. + // + // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has + // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value + // equals the entire amount of live young-gen memory within the collection set, even though some of this memory + // will likely be promoted. // Has to be done after cset selection heap->prepare_concurrent_roots(); - if (!heap->collection_set()->is_empty()) { - if (ShenandoahVerify) { - heap->verifier()->verify_before_evacuation(); - } - - heap->set_evacuation_in_progress(true); - // From here on, we need to update references. - heap->set_has_forwarded_objects(true); - - // Verify before arming for concurrent processing. - // Otherwise, verification can trigger stack processing. - if (ShenandoahVerify) { - heap->verifier()->verify_during_evacuation(); - } - - // Arm nmethods/stack for concurrent processing - ShenandoahCodeRoots::arm_nmethods_for_evac(); - ShenandoahStackWatermark::change_epoch_id(); - - if (ShenandoahPacing) { - heap->pacer()->setup_for_evac(); + if (heap->mode()->is_generational()) { + size_t humongous_regions_promoted = heap->get_promotable_humongous_regions(); + size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place(); + if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) { + // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place. + // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty. + + LogTarget(Debug, gc, cset) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + heap->collection_set()->print_on(&ls); + } + + if (ShenandoahVerify) { + heap->verifier()->verify_before_evacuation(); + } + + heap->set_evacuation_in_progress(true); + + // Verify before arming for concurrent processing. + // Otherwise, verification can trigger stack processing. + if (ShenandoahVerify) { + heap->verifier()->verify_during_evacuation(); + } + + // Generational mode may promote objects in place during the evacuation phase. + // If that is the only reason we are evacuating, we don't need to update references + // and there will be no forwarded objects on the heap. + heap->set_has_forwarded_objects(!heap->collection_set()->is_empty()); + + // Arm nmethods/stack for concurrent processing + if (!heap->collection_set()->is_empty()) { + // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed + // under the same condition (established in prepare_concurrent_roots) after strong + // root evacuation has completed (see op_strong_roots). + ShenandoahCodeRoots::arm_nmethods_for_evac(); + ShenandoahStackWatermark::change_epoch_id(); + } + + if (ShenandoahPacing) { + heap->pacer()->setup_for_evac(); + } + } else { + if (ShenandoahVerify) { + heap->verifier()->verify_after_concmark(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } } } else { - if (ShenandoahVerify) { - heap->verifier()->verify_after_concmark(); - } - - if (VerifyAfterGC) { - Universe::verify(); + // Not is_generational() + if (!heap->collection_set()->is_empty()) { + LogTarget(Info, gc, ergo) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + heap->collection_set()->print_on(&ls); + } + + if (ShenandoahVerify) { + heap->verifier()->verify_before_evacuation(); + } + + heap->set_evacuation_in_progress(true); + + // Verify before arming for concurrent processing. + // Otherwise, verification can trigger stack processing. + if (ShenandoahVerify) { + heap->verifier()->verify_during_evacuation(); + } + + // From here on, we need to update references. + heap->set_has_forwarded_objects(true); + + // Arm nmethods/stack for concurrent processing + ShenandoahCodeRoots::arm_nmethods_for_evac(); + ShenandoahStackWatermark::change_epoch_id(); + + if (ShenandoahPacing) { + heap->pacer()->setup_for_evac(); + } + } else { + if (ShenandoahVerify) { + heap->verifier()->verify_after_concmark(); + } + + if (VerifyAfterGC) { + Universe::verify(); + } } } } @@ -634,6 +877,7 @@ ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(Oop void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { JavaThread* const jt = JavaThread::cast(thread); StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); + ShenandoahThreadLocalData::enable_plab_promotions(thread); } class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { @@ -647,6 +891,9 @@ class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { } void work(uint worker_id) { + Thread* worker_thread = Thread::current(); + ShenandoahThreadLocalData::enable_plab_promotions(worker_thread); + // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. // Otherwise, may deadlock with watermark lock ShenandoahContextEvacuateUpdateRootsClosure oops_cl; @@ -671,7 +918,7 @@ void ShenandoahConcurrentGC::op_weak_refs() { if (heap->gc_cause() == GCCause::_wb_breakpoint) { ShenandoahBreakpoint::at_after_reference_processing_started(); } - heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); + _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); } class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { @@ -698,8 +945,15 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { const oop obj = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(obj)) { if (!_mark_context->is_marked(obj)) { - shenandoah_assert_correct(p, obj); - ShenandoahHeap::atomic_clear_oop(p, obj); + if (_heap->is_in_active_generation(obj)) { + // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'. + // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for + // accessing from-space objects during class unloading. However, the from-space object may have + // been "filled". We've made no effort to prevent old generation classes being unloaded by young + // gen (and vice-versa). + shenandoah_assert_correct(p, obj); + ShenandoahHeap::atomic_clear_oop(p, obj); + } } else if (_evac_in_progress && _heap->in_collection_set(obj)) { oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (resolved == obj) { @@ -925,7 +1179,9 @@ void ShenandoahConcurrentGC::op_init_updaterefs() { heap->set_concurrent_weak_root_in_progress(false); heap->prepare_update_heap_references(true /*concurrent*/); heap->set_update_refs_in_progress(true); - + if (ShenandoahVerify) { + heap->verifier()->verify_before_updaterefs(); + } if (ShenandoahPacing) { heap->pacer()->setup_for_updaterefs(); } @@ -970,7 +1226,7 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { // Clear cancelled GC, if set. On cancellation path, the block before would handle // everything. if (heap->cancelled_gc()) { - heap->clear_cancelled_gc(); + heap->clear_cancelled_gc(true /* clear oom handler */); } // Has to be done before cset is clear @@ -978,11 +1234,33 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { heap->verifier()->verify_roots_in_to_space(); } + if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { + // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to + // objects in the collection set. After those objects are evacuated, the pointers in the + // SATB are no longer safe. Once we have finished update references, we are guaranteed that + // no more writes to the collection set are possible. + // + // This will transfer any old pointers in _active_ regions from the SATB to the old gen + // mark queues. All other pointers will be discarded. This would also discard any pointers + // in old regions that were included in a mixed evacuation. We aren't using the SATB filter + // methods here because we cannot control when they execute. If the SATB filter runs _after_ + // a region has been recycled, we will not be able to detect the bad pointer. + // + // We are not concerned about skipping this step in abbreviated cycles because regions + // with no live objects cannot have been written to and so cannot have entries in the SATB + // buffers. + heap->transfer_old_pointers_from_satb(); + } + heap->update_heap_region_states(true /*concurrent*/); heap->set_update_refs_in_progress(false); heap->set_has_forwarded_objects(false); + // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for + // entire regions. Both of these relevant operations occur before final update refs. + heap->set_aging_cycle(false); + if (ShenandoahVerify) { heap->verifier()->verify_after_updaterefs(); } @@ -995,7 +1273,33 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { } void ShenandoahConcurrentGC::op_final_roots() { - ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); + + ShenandoahHeap *heap = ShenandoahHeap::heap(); + heap->set_concurrent_weak_root_in_progress(false); + heap->set_evacuation_in_progress(false); + + if (heap->mode()->is_generational()) { + // If the cycle was shortened for having enough immediate garbage, this could be + // the last GC safepoint before concurrent marking of old resumes. We must be sure + // that old mark threads don't see any pointers to garbage in the SATB buffers. + if (heap->is_concurrent_old_mark_in_progress()) { + heap->transfer_old_pointers_from_satb(); + } + + ShenandoahMarkingContext *ctx = heap->complete_marking_context(); + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion *r = heap->get_region(i); + if (r->is_active() && r->is_young()) { + HeapWord* tams = ctx->top_at_mark_start(r); + HeapWord* top = r->top(); + if (top > tams) { + r->reset_age(); + } else if (heap->is_aging_cycle()) { + r->increment_age(); + } + } + } + } } void ShenandoahConcurrentGC::op_cleanup_complete() { @@ -1014,28 +1318,31 @@ const char* ShenandoahConcurrentGC::init_mark_event_message() const { ShenandoahHeap* const heap = ShenandoahHeap::heap(); assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); if (heap->unload_classes()) { - return "Pause Init Mark (unload classes)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)"); } else { - return "Pause Init Mark"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", ""); } } const char* ShenandoahConcurrentGC::final_mark_event_message() const { ShenandoahHeap* const heap = ShenandoahHeap::heap(); - assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); + assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), + "Should not have forwarded objects during final mark, unless old gen concurrent mark is running"); + if (heap->unload_classes()) { - return "Pause Final Mark (unload classes)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)"); } else { - return "Pause Final Mark"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", ""); } } const char* ShenandoahConcurrentGC::conc_mark_event_message() const { ShenandoahHeap* const heap = ShenandoahHeap::heap(); - assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); + assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), + "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running"); if (heap->unload_classes()) { - return "Concurrent marking (unload classes)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)"); } else { - return "Concurrent marking"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", ""); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp index 1010ffe5da7..f3c83600c76 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +31,8 @@ #include "gc/shenandoah/shenandoahGC.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" +class ShenandoahGeneration; + class VM_ShenandoahInitMark; class VM_ShenandoahFinalMarkStartEvac; class VM_ShenandoahInitUpdateRefs; @@ -42,25 +45,35 @@ class ShenandoahConcurrentGC : public ShenandoahGC { friend class VM_ShenandoahFinalUpdateRefs; friend class VM_ShenandoahFinalRoots; +protected: + ShenandoahConcurrentMark _mark; + private: - ShenandoahConcurrentMark _mark; - ShenandoahDegenPoint _degen_point; + ShenandoahDegenPoint _degen_point; + bool _abbreviated; + const bool _do_old_gc_bootstrap; + +protected: + ShenandoahGeneration* const _generation; public: - ShenandoahConcurrentGC(); + ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap); bool collect(GCCause::Cause cause); ShenandoahDegenPoint degen_point() const; + bool abbreviated() const { return _abbreviated; } - // Cancel ongoing concurrent GC - static void cancel(); private: // Entry points to STW GC operations, these cause a related safepoint, that then // call the entry method below void vmop_entry_init_mark(); + +protected: void vmop_entry_final_mark(); + void vmop_entry_final_roots(); + +private: void vmop_entry_init_updaterefs(); void vmop_entry_final_updaterefs(); - void vmop_entry_final_roots(); // Entry methods to normally STW GC operations. These set up logging, monitoring // and workers for net VM operation @@ -74,6 +87,9 @@ class ShenandoahConcurrentGC : public ShenandoahGC { // for concurrent operation. void entry_reset(); void entry_mark_roots(); + void entry_scan_remembered_set(); + +protected: void entry_mark(); void entry_thread_roots(); void entry_weak_refs(); @@ -81,6 +97,8 @@ class ShenandoahConcurrentGC : public ShenandoahGC { void entry_class_unloading(); void entry_strong_roots(); void entry_cleanup_early(); + +private: void entry_evacuate(); void entry_update_thread_roots(); void entry_updaterefs(); @@ -91,7 +109,6 @@ class ShenandoahConcurrentGC : public ShenandoahGC { void op_init_mark(); void op_mark_roots(); void op_mark(); - void op_final_mark(); void op_thread_roots(); void op_weak_refs(); void op_weak_roots(); @@ -106,6 +123,10 @@ class ShenandoahConcurrentGC : public ShenandoahGC { void op_final_roots(); void op_cleanup_complete(); +protected: + virtual void op_final_mark(); + +private: void start_mark(); // Messages for GC trace events, they have to be immortal for @@ -114,6 +135,7 @@ class ShenandoahConcurrentGC : public ShenandoahGC { const char* final_mark_event_message() const; const char* conc_mark_event_message() const; +protected: // Check GC cancellation and abort concurrent GC bool check_cancellation_and_abort(ShenandoahDegenPoint point); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp index 956cf8cc908..e3e1c866d17 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +31,7 @@ #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" @@ -39,11 +41,13 @@ #include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" #include "runtime/continuation.hpp" #include "runtime/threads.hpp" +template class ShenandoahConcurrentMarkingTask : public WorkerTask { private: ShenandoahConcurrentMark* const _cm; @@ -56,12 +60,14 @@ class ShenandoahConcurrentMarkingTask : public WorkerTask { void work(uint worker_id) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_mark, ShenandoahPhaseTimings::ParallelMark, worker_id, true); ShenandoahSuspendibleThreadSetJoiner stsj; - ShenandoahReferenceProcessor* rp = heap->ref_processor(); + ShenandoahReferenceProcessor* rp = heap->active_generation()->ref_processor(); + assert(rp != nullptr, "need reference processor"); StringDedup::Requests requests; - _cm->mark_loop(worker_id, _terminator, rp, + _cm->mark_loop(GENERATION, worker_id, _terminator, rp, true /*cancellable*/, ShenandoahStringDedup::is_enabled() ? ENQUEUE_DEDUP : NO_DEDUP, &requests); @@ -72,7 +78,6 @@ class ShenandoahSATBAndRemarkThreadsClosure : public ThreadClosure { private: SATBMarkQueueSet& _satb_qset; OopClosure* const _cl; - uintx _claim_token; public: ShenandoahSATBAndRemarkThreadsClosure(SATBMarkQueueSet& satb_qset, OopClosure* cl) : @@ -91,6 +96,7 @@ class ShenandoahSATBAndRemarkThreadsClosure : public ThreadClosure { } }; +template class ShenandoahFinalMarkingTask : public WorkerTask { private: ShenandoahConcurrentMark* _cm; @@ -106,24 +112,25 @@ class ShenandoahFinalMarkingTask : public WorkerTask { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahParallelWorkerSession worker_session(worker_id); - ShenandoahReferenceProcessor* rp = heap->ref_processor(); StringDedup::Requests requests; + ShenandoahReferenceProcessor* rp = heap->active_generation()->ref_processor(); // First drain remaining SATB buffers. { ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); + ShenandoahObjToScanQueue* old_q = _cm->get_old_queue(worker_id); - ShenandoahSATBBufferClosure cl(q); + ShenandoahSATBBufferClosure cl(q, old_q); SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); while (satb_mq_set.apply_closure_to_completed_buffer(&cl)) {} assert(!heap->has_forwarded_objects(), "Not expected"); - ShenandoahMarkRefsClosure mark_cl(q, rp); + ShenandoahMarkRefsClosure mark_cl(q, rp, old_q); ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set, ShenandoahIUBarrier ? &mark_cl : nullptr); Threads::possibly_parallel_threads_do(true /* is_par */, &tc); } - _cm->mark_loop(worker_id, _terminator, rp, + _cm->mark_loop(GENERATION, worker_id, _terminator, rp, false /*not cancellable*/, _dedup_string ? ENQUEUE_DEDUP : NO_DEDUP, &requests); @@ -131,40 +138,49 @@ class ShenandoahFinalMarkingTask : public WorkerTask { } }; -ShenandoahConcurrentMark::ShenandoahConcurrentMark() : - ShenandoahMark() {} +ShenandoahConcurrentMark::ShenandoahConcurrentMark(ShenandoahGeneration* generation) : + ShenandoahMark(generation) {} // Mark concurrent roots during concurrent phases +template class ShenandoahMarkConcurrentRootsTask : public WorkerTask { private: SuspendibleThreadSetJoiner _sts_joiner; ShenandoahConcurrentRootScanner _root_scanner; ShenandoahObjToScanQueueSet* const _queue_set; + ShenandoahObjToScanQueueSet* const _old_queue_set; ShenandoahReferenceProcessor* const _rp; public: ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, + ShenandoahObjToScanQueueSet* old, ShenandoahReferenceProcessor* rp, ShenandoahPhaseTimings::Phase phase, uint nworkers); void work(uint worker_id); }; -ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, - ShenandoahReferenceProcessor* rp, - ShenandoahPhaseTimings::Phase phase, - uint nworkers) : +template +ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, + ShenandoahObjToScanQueueSet* old, + ShenandoahReferenceProcessor* rp, + ShenandoahPhaseTimings::Phase phase, + uint nworkers) : WorkerTask("Shenandoah Concurrent Mark Roots"), _root_scanner(nworkers, phase), _queue_set(qs), + _old_queue_set(old), _rp(rp) { assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected"); } -void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) { +template +void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) { ShenandoahConcurrentWorkerSession worker_session(worker_id); ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id); - ShenandoahMarkRefsClosure cl(q, _rp); + ShenandoahObjToScanQueue* old_q = (_old_queue_set == nullptr) ? + nullptr : _old_queue_set->queue(worker_id); + ShenandoahMarkRefsClosure cl(q, _rp, old_q); _root_scanner.roots_do(&cl, worker_id); } @@ -172,14 +188,38 @@ void ShenandoahConcurrentMark::mark_concurrent_roots() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); assert(!heap->has_forwarded_objects(), "Not expected"); - TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); - WorkerThreads* workers = heap->workers(); - ShenandoahReferenceProcessor* rp = heap->ref_processor(); - task_queues()->reserve(workers->active_workers()); - ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots, workers->active_workers()); - - workers->run_task(&task); + ShenandoahReferenceProcessor* rp = _generation->ref_processor(); + _generation->reserve_task_queues(workers->active_workers()); + switch (_generation->type()) { + case YOUNG: { + ShenandoahMarkConcurrentRootsTask task(task_queues(), old_task_queues(), rp, + ShenandoahPhaseTimings::conc_mark_roots, workers->active_workers()); + workers->run_task(&task); + break; + } + case GLOBAL_GEN: { + assert(old_task_queues() == nullptr, "Global mark should not have old gen mark queues"); + ShenandoahMarkConcurrentRootsTask task(task_queues(), nullptr, rp, + ShenandoahPhaseTimings::conc_mark_roots, workers->active_workers()); + workers->run_task(&task); + break; + } + case GLOBAL_NON_GEN: { + assert(old_task_queues() == nullptr, "Non-generational mark should not have old gen mark queues"); + ShenandoahMarkConcurrentRootsTask task(task_queues(), nullptr, rp, + ShenandoahPhaseTimings::conc_mark_roots, workers->active_workers()); + workers->run_task(&task); + break; + } + case OLD: { + // We use a YOUNG generation cycle to bootstrap concurrent old marking. + ShouldNotReachHere(); + break; + } + default: + ShouldNotReachHere(); + } } class ShenandoahFlushSATBHandshakeClosure : public HandshakeClosure { @@ -204,9 +244,40 @@ void ShenandoahConcurrentMark::concurrent_mark() { ShenandoahSATBMarkQueueSet& qset = ShenandoahBarrierSet::satb_mark_queue_set(); ShenandoahFlushSATBHandshakeClosure flush_satb(qset); for (uint flushes = 0; flushes < ShenandoahMaxSATBBufferFlushes; flushes++) { - TaskTerminator terminator(nworkers, task_queues()); - ShenandoahConcurrentMarkingTask task(this, &terminator); - workers->run_task(&task); + switch (_generation->type()) { + case YOUNG: { + // Clear any old/partial local census data before the start of marking. + heap->age_census()->reset_local(); + assert(heap->age_census()->is_clear_local(), "Error"); + TaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentMarkingTask task(this, &terminator); + workers->run_task(&task); + break; + } + case OLD: { + TaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentMarkingTask task(this, &terminator); + workers->run_task(&task); + break; + } + case GLOBAL_GEN: { + // Clear any old/partial local census data before the start of marking. + heap->age_census()->reset_local(); + assert(heap->age_census()->is_clear_local(), "Error"); + TaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentMarkingTask task(this, &terminator); + workers->run_task(&task); + break; + } + case GLOBAL_NON_GEN: { + TaskTerminator terminator(nworkers, task_queues()); + ShenandoahConcurrentMarkingTask task(this, &terminator); + workers->run_task(&task); + break; + } + default: + ShouldNotReachHere(); + } if (heap->cancelled_gc()) { // GC is cancelled, break out. @@ -233,9 +304,8 @@ void ShenandoahConcurrentMark::finish_mark() { TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->set_concurrent_mark_in_progress(false); - heap->mark_complete_marking_context(); + _generation->set_concurrent_mark_in_progress(false); + _generation->set_mark_complete(); end_mark(); } @@ -255,15 +325,32 @@ void ShenandoahConcurrentMark::finish_mark_work() { StrongRootsScope scope(nworkers); TaskTerminator terminator(nworkers, task_queues()); - ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); - heap->workers()->run_task(&task); - assert(task_queues()->is_empty(), "Should be empty"); -} + switch (_generation->type()) { + case YOUNG:{ + ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); + heap->workers()->run_task(&task); + break; + } + case OLD:{ + ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); + heap->workers()->run_task(&task); + break; + } + case GLOBAL_GEN:{ + ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); + heap->workers()->run_task(&task); + break; + } + case GLOBAL_NON_GEN:{ + ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); + heap->workers()->run_task(&task); + break; + } + default: + ShouldNotReachHere(); + } -void ShenandoahConcurrentMark::cancel() { - clear(); - ShenandoahReferenceProcessor* rp = ShenandoahHeap::heap()->ref_processor(); - rp->abandon_partial_discovery(); + assert(task_queues()->is_empty(), "Should be empty"); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp index fbcd075117a..336158ca5c9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp @@ -27,24 +27,28 @@ #include "gc/shenandoah/shenandoahMark.hpp" +template class ShenandoahConcurrentMarkingTask; +template class ShenandoahFinalMarkingTask; +class ShenandoahGeneration; class ShenandoahConcurrentMark: public ShenandoahMark { - friend class ShenandoahConcurrentMarkingTask; - friend class ShenandoahFinalMarkingTask; + template friend class ShenandoahConcurrentMarkingTask; + template friend class ShenandoahFinalMarkingTask; public: - ShenandoahConcurrentMark(); + ShenandoahConcurrentMark(ShenandoahGeneration* generation); + // Concurrent mark roots void mark_concurrent_roots(); + // Concurrent mark void concurrent_mark(); + // Finish mark at a safepoint void finish_mark(); - static void cancel(); - private: void finish_mark_work(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 3b755e4366a..131c6c16836 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,18 +29,26 @@ #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahControlThread.hpp" #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" +#include "gc/shenandoah/shenandoahEvacTracker.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahFullGC.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahGlobalGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahOldGC.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" +#include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/metaspaceUtils.hpp" #include "memory/metaspaceStats.hpp" @@ -48,12 +57,17 @@ ShenandoahControlThread::ShenandoahControlThread() : ConcurrentGCThread(), - _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true), - _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true), + _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true), + _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true), + _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true), + _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true), _periodic_task(this), - _requested_gc_cause(GCCause::_no_cause_specified), + _requested_gc_cause(GCCause::_no_gc), + _requested_generation(select_global_generation()), _degen_point(ShenandoahGC::_degenerated_outside_cycle), - _allocs_seen(0) { + _degen_generation(nullptr), + _allocs_seen(0), + _mode(none) { set_name("Shenandoah Control Thread"); reset_gc_id(); create_and_start(); @@ -78,40 +92,45 @@ void ShenandoahPeriodicPacerNotify::task() { } void ShenandoahControlThread::run_service() { - ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); - GCMode default_mode = concurrent_normal; - GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; - int sleep = ShenandoahControlIntervalMin; + const GCMode default_mode = concurrent_normal; + ShenandoahGenerationType generation = select_global_generation(); double last_shrink_time = os::elapsedTime(); - double last_sleep_adjust_time = os::elapsedTime(); + uint age_period = 0; // Shrink period avoids constantly polling regions for shrinking. // Having a period 10x lower than the delay would mean we hit the // shrinking with lag of less than 1/10-th of true delay. // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. - double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; + const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; + + ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); - ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); - ShenandoahHeuristics* heuristics = heap->heuristics(); + // Heuristics are notified of allocation failures here and other outcomes + // of the cycle. They're also used here to control whether the Nth consecutive + // degenerated cycle should be 'promoted' to a full cycle. The decision to + // trigger a cycle or not is evaluated on the regulator thread. + ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics(); while (!in_graceful_shutdown() && !should_terminate()) { // Figure out if we have pending requests. - bool alloc_failure_pending = _alloc_failure_gc.is_set(); - bool is_gc_requested = _gc_requested.is_set(); - GCCause::Cause requested_gc_cause = _requested_gc_cause; - bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause); - bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause); + const bool alloc_failure_pending = _alloc_failure_gc.is_set(); + const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set(); + + GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc); + + const bool explicit_gc_requested = is_explicit_gc(cause); + const bool implicit_gc_requested = is_implicit_gc(cause); // This control loop iteration have seen this much allocations. - size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); + const size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); // Check if we have seen a new target for soft max heap size. - bool soft_max_changed = check_soft_max_changed(); + const bool soft_max_changed = check_soft_max_changed(); // Choose which GC mode to run in. The block below should select a single mode. - GCMode mode = none; - GCCause::Cause cause = GCCause::_last_gc_cause; + set_gc_mode(none); ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; if (alloc_failure_pending) { @@ -124,68 +143,118 @@ void ShenandoahControlThread::run_service() { degen_point = _degen_point; _degen_point = ShenandoahGC::_degenerated_outside_cycle; - if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { + if (degen_point == ShenandoahGC::_degenerated_outside_cycle) { + _degen_generation = heap->mode()->is_generational() ? + heap->young_generation() : heap->global_generation(); + } else { + assert(_degen_generation != nullptr, "Need to know which generation to resume"); + } + + ShenandoahHeuristics* heuristics = _degen_generation->heuristics(); + generation = _degen_generation->type(); + bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure(); + + // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed + if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && + !old_gen_evacuation_failed && !humongous_alloc_failure_pending) { heuristics->record_allocation_failure_gc(); policy->record_alloc_failure_to_degenerated(degen_point); - mode = stw_degenerated; + set_gc_mode(stw_degenerated); } else { + // TODO: if humongous_alloc_failure_pending, there might be value in trying a "compacting" degen before + // going all the way to full. But it's a lot of work to implement this, and it may not provide value. + // A compacting degen can move young regions around without doing full old-gen mark (relying upon the + // remembered set scan), so it might be faster than a full gc. + // + // Longer term, think about how to defragment humongous memory concurrently. + heuristics->record_allocation_failure_gc(); policy->record_alloc_failure_to_full(); - mode = stw_full; + generation = select_global_generation(); + set_gc_mode(stw_full); } - } else if (explicit_gc_requested) { - cause = requested_gc_cause; + generation = select_global_generation(); log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); - heuristics->record_requested_gc(); + global_heuristics->record_requested_gc(); if (ExplicitGCInvokesConcurrent) { policy->record_explicit_to_concurrent(); - mode = default_mode; + set_gc_mode(default_mode); // Unload and clean up everything - heap->set_unload_classes(heuristics->can_unload_classes()); + heap->set_unload_classes(global_heuristics->can_unload_classes()); } else { policy->record_explicit_to_full(); - mode = stw_full; + set_gc_mode(stw_full); } } else if (implicit_gc_requested) { - cause = requested_gc_cause; + generation = select_global_generation(); log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); - heuristics->record_requested_gc(); + global_heuristics->record_requested_gc(); if (ShenandoahImplicitGCInvokesConcurrent) { policy->record_implicit_to_concurrent(); - mode = default_mode; + set_gc_mode(default_mode); // Unload and clean up everything - heap->set_unload_classes(heuristics->can_unload_classes()); + heap->set_unload_classes(global_heuristics->can_unload_classes()); } else { policy->record_implicit_to_full(); - mode = stw_full; + set_gc_mode(stw_full); } } else { - // Potential normal cycle: ask heuristics if it wants to act - if (heuristics->should_start_gc()) { - mode = default_mode; - cause = default_cause; - } + // We should only be here if the regulator requested a cycle or if + // there is an old generation mark in progress. + if (cause == GCCause::_shenandoah_concurrent_gc) { + if (_requested_generation == OLD && heap->doing_mixed_evacuations()) { + // If a request to start an old cycle arrived while an old cycle was running, but _before_ + // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want + // the heuristic to run a young collection so that we can evacuate some old regions. + assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking"); + generation = YOUNG; + } else { + generation = _requested_generation; + } - // Ask policy if this cycle wants to process references or unload classes - heap->set_unload_classes(heuristics->should_unload_classes()); - } + // preemption was requested or this is a regular cycle + set_gc_mode(default_mode); + + // Don't start a new old marking if there is one already in progress + if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) { + set_gc_mode(servicing_old); + } - // Blow all soft references on this cycle, if handling allocation failure, - // either implicit or explicit GC request, or we are requested to do so unconditionally. - if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) { - heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); + if (generation == select_global_generation()) { + heap->set_unload_classes(global_heuristics->should_unload_classes()); + } else { + heap->set_unload_classes(false); + } + } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) { + // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for + // mixed evacuation in progress, so resume working on that. + log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress", + heap->is_concurrent_old_mark_in_progress() ? "" : " NOT", + heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT"); + + cause = GCCause::_shenandoah_concurrent_gc; + generation = OLD; + set_gc_mode(servicing_old); + heap->set_unload_classes(false); + } } - bool gc_requested = (mode != none); - assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); + const bool gc_requested = (gc_mode() != none); + assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set"); if (gc_requested) { + // Blow away all soft references on this cycle, if handling allocation failure, + // either implicit or explicit GC request, or we are requested to do so unconditionally. + if (generation == select_global_generation() && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) { + heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); + } + // GC is starting, bump the internal ID update_gc_id(); @@ -202,17 +271,43 @@ void ShenandoahControlThread::run_service() { ShenandoahHeapLocker locker(heap->lock()); heap->free_set()->log_status(); } - - switch (mode) { - case concurrent_normal: - service_concurrent_normal_cycle(cause); + // In case this is a degenerated cycle, remember whether original cycle was aging. + const bool was_aging_cycle = heap->is_aging_cycle(); + heap->set_aging_cycle(false); + + switch (gc_mode()) { + case concurrent_normal: { + // At this point: + // if (generation == YOUNG), this is a normal YOUNG cycle + // if (generation == OLD), this is a bootstrap OLD cycle + // if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc() + // In all three cases, we want to age old objects if this is an aging cycle + if (age_period-- == 0) { + heap->set_aging_cycle(true); + age_period = ShenandoahAgingCyclePeriod - 1; + } + service_concurrent_normal_cycle(heap, generation, cause); break; - case stw_degenerated: + } + case stw_degenerated: { + heap->set_aging_cycle(was_aging_cycle); service_stw_degenerated_cycle(cause, degen_point); break; - case stw_full: + } + case stw_full: { + if (age_period-- == 0) { + heap->set_aging_cycle(true); + age_period = ShenandoahAgingCyclePeriod - 1; + } service_stw_full_cycle(cause); break; + } + case servicing_old: { + assert(generation == OLD, "Expected old generation here"); + GCIdMark gc_id_mark; + service_concurrent_old_cycle(heap, cause); + break; + } default: ShouldNotReachHere(); } @@ -252,30 +347,10 @@ void ShenandoahControlThread::run_service() { // Clear metaspace oom flag, if current cycle unloaded classes if (heap->unload_classes()) { - heuristics->clear_metaspace_oom(); + global_heuristics->clear_metaspace_oom(); } - // Commit worker statistics to cycle data - heap->phase_timings()->flush_par_workers_to_cycle(); - if (ShenandoahPacing) { - heap->pacer()->flush_stats_to_cycle(); - } - - // Print GC stats for current cycle - { - LogTarget(Info, gc, stats) lt; - if (lt.is_enabled()) { - ResourceMark rm; - LogStream ls(lt); - heap->phase_timings()->print_cycle_on(&ls); - if (ShenandoahPacing) { - heap->pacer()->print_cycle_on(&ls); - } - } - } - - // Commit statistics to globals - heap->phase_timings()->flush_cycle_to_global(); + process_phase_timings(heap); // Print Metaspace change following GC (if logging is enabled). MetaspaceUtils::print_metaspace_change(meta_sizes); @@ -285,7 +360,7 @@ void ShenandoahControlThread::run_service() { heap->pacer()->setup_for_idle(); } } else { - // Allow allocators to know we have seen this much regions + // Allow pacer to know we have seen this many allocations if (ShenandoahPacing && (allocs_seen > 0)) { heap->pacer()->report_alloc(allocs_seen); } @@ -311,16 +386,13 @@ void ShenandoahControlThread::run_service() { last_shrink_time = current; } - // Wait before performing the next action. If allocation happened during this wait, - // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, - // back off exponentially. - if (_heap_changed.try_unset()) { - sleep = ShenandoahControlIntervalMin; - } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ - sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); - last_sleep_adjust_time = current; + // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle. + if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) { + // The timed wait is necessary because this thread has a responsibility to send + // 'alloc_words' to the pacer when it does not perform a GC. + MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag); + lock.wait(ShenandoahControlIntervalMax); } - os::naked_short_sleep(sleep); } // Wait for the actual stop(), can't leave run_service() earlier. @@ -329,6 +401,212 @@ void ShenandoahControlThread::run_service() { } } +void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) { + // Commit worker statistics to cycle data + heap->phase_timings()->flush_par_workers_to_cycle(); + if (ShenandoahPacing) { + heap->pacer()->flush_stats_to_cycle(); + } + + ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker(); + ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); + + // Print GC stats for current cycle + { + LogTarget(Info, gc, stats) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + heap->phase_timings()->print_cycle_on(&ls); + evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, + &evac_stats.mutators); + if (ShenandoahPacing) { + heap->pacer()->print_cycle_on(&ls); + } + } + } + + // Commit statistics to globals + heap->phase_timings()->flush_cycle_to_global(); +} + +// Young and old concurrent cycles are initiated by the regulator. Implicit +// and explicit GC requests are handled by the controller thread and always +// run a global cycle (which is concurrent by default, but may be overridden +// by command line options). Old cycles always degenerate to a global cycle. +// Young cycles are degenerated to complete the young cycle. Young +// and old degen may upgrade to Full GC. Full GC may also be +// triggered directly by a System.gc() invocation. +// +// +// +-----+ Idle +-----+-----------+---------------------+ +// | + | | | +// | | | | | +// | | v | | +// | | Bootstrap Old +-- | ------------+ | +// | | + | | | +// | | | | | | +// | v v v v | +// | Resume Old <----------+ Young +--> Young Degen | +// | + + ^ + + | +// v | | | | | | +// Global <-+ | +----------------------------+ | | +// + | | | +// | v v | +// +---> Global Degen +--------------------> Full <----+ +// +void ShenandoahControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap, + const ShenandoahGenerationType generation, + GCCause::Cause cause) { + GCIdMark gc_id_mark; + ShenandoahGeneration* the_generation = nullptr; + switch (generation) { + case YOUNG: { + // Run a young cycle. This might or might not, have interrupted an ongoing + // concurrent mark in the old generation. We need to think about promotions + // in this case. Promoted objects should be above the TAMS in the old regions + // they end up in, but we have to be sure we don't promote into any regions + // that are in the cset. + log_info(gc, ergo)("Start GC cycle (YOUNG)"); + the_generation = heap->young_generation(); + service_concurrent_cycle(the_generation, cause, false); + break; + } + case OLD: { + log_info(gc, ergo)("Start GC cycle (OLD)"); + the_generation = heap->old_generation(); + service_concurrent_old_cycle(heap, cause); + break; + } + case GLOBAL_GEN: { + log_info(gc, ergo)("Start GC cycle (GLOBAL)"); + the_generation = heap->global_generation(); + service_concurrent_cycle(the_generation, cause, false); + break; + } + case GLOBAL_NON_GEN: { + log_info(gc, ergo)("Start GC cycle"); + the_generation = heap->global_generation(); + service_concurrent_cycle(the_generation, cause, false); + break; + } + default: + ShouldNotReachHere(); + } +} + +void ShenandoahControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) { + ShenandoahOldGeneration* old_generation = heap->old_generation(); + ShenandoahYoungGeneration* young_generation = heap->young_generation(); + ShenandoahOldGeneration::State original_state = old_generation->state(); + + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + + switch (original_state) { + case ShenandoahOldGeneration::FILLING: { + _allow_old_preemption.set(); + old_generation->entry_coalesce_and_fill(); + _allow_old_preemption.unset(); + + // Before bootstrapping begins, we must acknowledge any cancellation request. + // If the gc has not been cancelled, this does nothing. If it has been cancelled, + // this will clear the cancellation request and exit before starting the bootstrap + // phase. This will allow the young GC cycle to proceed normally. If we do not + // acknowledge the cancellation request, the subsequent young cycle will observe + // the request and essentially cancel itself. + if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { + log_info(gc)("Preparation for old generation cycle was cancelled"); + return; + } + + // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. + old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + return; + } + case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: + old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); + case ShenandoahOldGeneration::BOOTSTRAPPING: { + // Configure the young generation's concurrent mark to put objects in + // old regions into the concurrent mark queues associated with the old + // generation. The young cycle will run as normal except that rather than + // ignore old references it will mark and enqueue them in the old concurrent + // task queues but it will not traverse them. + set_gc_mode(bootstrapping_old); + young_generation->set_old_gen_task_queues(old_generation->task_queues()); + ShenandoahGCSession session(cause, young_generation); + service_concurrent_cycle(heap, young_generation, cause, true); + process_phase_timings(heap); + if (heap->cancelled_gc()) { + // Young generation bootstrap cycle has failed. Concurrent mark for old generation + // is going to resume after degenerated bootstrap cycle completes. + log_info(gc)("Bootstrap cycle for old generation was cancelled"); + return; + } + + // Reset the degenerated point. Normally this would happen at the top + // of the control loop, but here we have just completed a young cycle + // which has bootstrapped the old concurrent marking. + _degen_point = ShenandoahGC::_degenerated_outside_cycle; + + // From here we will 'resume' the old concurrent mark. This will skip reset + // and init mark for the concurrent mark. All of that work will have been + // done by the bootstrapping young cycle. + set_gc_mode(servicing_old); + old_generation->transition_to(ShenandoahOldGeneration::MARKING); + } + case ShenandoahOldGeneration::MARKING: { + ShenandoahGCSession session(cause, old_generation); + bool marking_complete = resume_concurrent_old_cycle(old_generation, cause); + if (marking_complete) { + assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); + if (original_state == ShenandoahOldGeneration::MARKING) { + heap->mmu_tracker()->record_old_marking_increment(true); + heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); + } + } else if (original_state == ShenandoahOldGeneration::MARKING) { + heap->mmu_tracker()->record_old_marking_increment(false); + heap->log_heap_status("At end of Concurrent Old Marking increment"); + } + break; + } + default: + fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state())); + } +} + +bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) { + assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); + log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // We can only tolerate being cancelled during concurrent marking or during preparation for mixed + // evacuation. This flag here (passed by reference) is used to control precisely where the regulator + // is allowed to cancel a GC. + ShenandoahOldGC gc(generation, _allow_old_preemption); + if (gc.collect(cause)) { + generation->record_success_concurrent(false); + } + + if (heap->cancelled_gc()) { + // It's possible the gc cycle was cancelled after the last time + // the collection checked for cancellation. In which case, the + // old gc cycle is still completed, and we have to deal with this + // cancellation. We set the degeneration point to be outside + // the cycle because if this is an allocation failure, that is + // what must be done (there is no degenerated old cycle). If the + // cancellation was due to a heuristic wanting to start a young + // cycle, then we are not actually going to a degenerated cycle, + // so the degenerated point doesn't matter here. + check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); + if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) { + heap->shenandoah_policy()->record_interrupted_old(); + } + return false; + } + return true; +} + bool ShenandoahControlThread::check_soft_max_changed() const { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t new_soft_max = Atomic::load(&SoftMaxHeapSize); @@ -348,7 +626,7 @@ bool ShenandoahControlThread::check_soft_max_changed() const { return false; } -void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { +void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) { // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. // If second allocation failure happens during Degenerated GC cycle (for example, when GC @@ -384,36 +662,102 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau // v | // Full GC --------------------------/ // - ShenandoahHeap* heap = ShenandoahHeap::heap(); if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; - GCIdMark gc_id_mark; - ShenandoahGCSession session(cause); - + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGCSession session(cause, generation); TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); - ShenandoahConcurrentGC gc; + service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap); +} + +void ShenandoahControlThread::service_concurrent_cycle(ShenandoahHeap* heap, + ShenandoahGeneration* generation, + GCCause::Cause& cause, + bool do_old_gc_bootstrap) { + ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); if (gc.collect(cause)) { // Cycle is complete - heap->heuristics()->record_success_concurrent(); - heap->shenandoah_policy()->record_success_concurrent(); + generation->record_success_concurrent(gc.abbreviated()); } else { assert(heap->cancelled_gc(), "Must have been cancelled"); check_cancellation_or_degen(gc.degen_point()); + assert(!generation->is_old(), "Old GC takes a different control path"); + // Concurrent young-gen collection degenerates to young + // collection. Same for global collections. + _degen_generation = generation; } + const char* msg; + if (heap->mode()->is_generational()) { + ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker(); + if (generation->is_young()) { + if (heap->cancelled_gc()) { + msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC": + "At end of Interrupted Concurrent Young GC"; + } else { + // We only record GC results if GC was successful + msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC": + "At end of Concurrent Young GC"; + if (heap->collection_set()->has_old_regions()) { + mmu_tracker->record_mixed(get_gc_id()); + } else if (do_old_gc_bootstrap) { + mmu_tracker->record_bootstrap(get_gc_id()); + } else { + mmu_tracker->record_young(get_gc_id()); + } + } + } else { + assert(generation->is_global(), "If not young, must be GLOBAL"); + assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); + if (heap->cancelled_gc()) { + msg = "At end of Interrupted Concurrent GLOBAL GC"; + } else { + // We only record GC results if GC was successful + msg = "At end of Concurrent Global GC"; + mmu_tracker->record_global(get_gc_id()); + } + } + } else { + msg = heap->cancelled_gc() ? "At end of cancelled GC" : + "At end of GC"; + } + heap->log_heap_status(msg); } bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (heap->cancelled_gc()) { - assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); - if (!in_graceful_shutdown()) { - assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle, - "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); - _degen_point = point; - } + if (!heap->cancelled_gc()) { + return false; + } + + if (in_graceful_shutdown()) { + return true; + } + + assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle, + "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); + + if (is_alloc_failure_gc()) { + _degen_point = point; + _preemption_requested.unset(); + return true; + } + + if (_preemption_requested.is_set()) { + assert(_requested_generation == YOUNG, "Only young GCs may preempt old."); + _preemption_requested.unset(); + + // Old generation marking is only cancellable during concurrent marking. + // Once final mark is complete, the code does not check again for cancellation. + // If old generation was cancelled for an allocation failure, we wouldn't + // make it to this case. The calling code is responsible for forcing a + // cancellation due to allocation failure into a degenerated cycle. + _degen_point = point; + heap->clear_cancelled_gc(false /* clear oom handler */); return true; } + + fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking"); return false; } @@ -422,29 +766,37 @@ void ShenandoahControlThread::stop_service() { } void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + GCIdMark gc_id_mark; - ShenandoahGCSession session(cause); + ShenandoahGCSession session(cause, heap->global_generation()); ShenandoahFullGC gc; gc.collect(cause); - - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->heuristics()->record_success_full(); - heap->shenandoah_policy()->record_success_full(); } -void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) { - assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); +void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, + ShenandoahGC::ShenandoahDegenPoint point) { + assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); GCIdMark gc_id_mark; - ShenandoahGCSession session(cause); + ShenandoahGCSession session(cause, _degen_generation); - ShenandoahDegenGC gc(point); + ShenandoahDegenGC gc(point, _degen_generation); gc.collect(cause); - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->heuristics()->record_success_degenerated(); - heap->shenandoah_policy()->record_success_degenerated(); + assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); + if (_degen_generation->is_global()) { + assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); + assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); + } else { + assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global."); + ShenandoahOldGeneration* old = heap->old_generation(); + if (old->state() == ShenandoahOldGeneration::BOOTSTRAPPING) { + old->transition_to(ShenandoahOldGeneration::MARKING); + } + } } void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) { @@ -475,6 +827,12 @@ bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { GCCause::is_serviceability_requested_gc(cause); } +bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const { + return !is_explicit_gc(cause) + && cause != GCCause::_shenandoah_concurrent_gc + && cause != GCCause::_no_gc; +} + void ShenandoahControlThread::request_gc(GCCause::Cause cause) { assert(GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause) || @@ -497,6 +855,69 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) { } } +bool ShenandoahControlThread::request_concurrent_gc(ShenandoahGenerationType generation) { + if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) { + // Ignore subsequent requests from the heuristics + log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s", + BOOL_TO_STR(_preemption_requested.is_set()), + GCCause::to_string(_requested_gc_cause), + BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc())); + return false; + } + + if (gc_mode() == none) { + GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); + if (existing != GCCause::_no_gc) { + log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing)); + return false; + } + + _requested_generation = generation; + notify_control_thread(); + + MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); + while (gc_mode() == none) { + ml.wait(); + } + return true; + } + + if (preempt_old_marking(generation)) { + assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode())); + GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); + if (existing != GCCause::_no_gc) { + log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing)); + return false; + } + + log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation)); + _requested_generation = generation; + _preemption_requested.set(); + ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); + notify_control_thread(); + + MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); + while (gc_mode() == servicing_old) { + ml.wait(); + } + return true; + } + + log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", + gc_mode_name(gc_mode()), + BOOL_TO_STR(_allow_old_preemption.is_set())); + return false; +} + +void ShenandoahControlThread::notify_control_thread() { + MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag); + _control_lock.notify(); +} + +bool ShenandoahControlThread::preempt_old_marking(ShenandoahGenerationType generation) { + return (generation == YOUNG) && _allow_old_preemption.try_unset(); +} + void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { // Make sure we have at least one complete GC cycle before unblocking // from the explicit GC request. @@ -511,12 +932,15 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { size_t current_gc_id = get_gc_id(); size_t required_gc_id = current_gc_id + 1; while (current_gc_id < required_gc_id) { - // Although setting gc request is under _gc_waiters_lock, but read side (run_service()) - // does not take the lock. We need to enforce following order, so that read side sees - // latest requested gc cause when the flag is set. - _requested_gc_cause = cause; - _gc_requested.set(); + // This races with the regulator thread to start a concurrent gc and the + // control thread to clear it at the start of a cycle. Threads here are + // allowed to escalate a heuristic's request for concurrent gc. + GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause); + if (existing != GCCause::_no_gc) { + log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing)); + } + notify_control_thread(); if (cause != GCCause::_wb_breakpoint) { ml.wait(); } @@ -528,13 +952,13 @@ void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) ShenandoahHeap* heap = ShenandoahHeap::heap(); assert(current()->is_Java_thread(), "expect Java thread here"); + bool is_humongous = req.size() > ShenandoahHeapRegion::region_size_words(); - if (try_set_alloc_failure_gc()) { + if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", req.type_string(), byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); - // Now that alloc failure GC is scheduled, we can abort everything else heap->cancel_gc(GCCause::_allocation_failure); } @@ -547,8 +971,9 @@ void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { ShenandoahHeap* heap = ShenandoahHeap::heap(); + bool is_humongous = (words > ShenandoahHeapRegion::region_size_words()); - if (try_set_alloc_failure_gc()) { + if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); @@ -560,11 +985,15 @@ void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { void ShenandoahControlThread::notify_alloc_failure_waiters() { _alloc_failure_gc.unset(); + _humongous_alloc_failure_gc.unset(); MonitorLocker ml(&_alloc_failure_waiters_lock); ml.notify_all(); } -bool ShenandoahControlThread::try_set_alloc_failure_gc() { +bool ShenandoahControlThread::try_set_alloc_failure_gc(bool is_humongous) { + if (is_humongous) { + _humongous_alloc_failure_gc.try_set(); + } return _alloc_failure_gc.try_set(); } @@ -573,7 +1002,6 @@ bool ShenandoahControlThread::is_alloc_failure_gc() { } void ShenandoahControlThread::notify_gc_waiters() { - _gc_requested.unset(); MonitorLocker ml(&_gc_waiters_lock); ml.notify_all(); } @@ -600,10 +1028,6 @@ void ShenandoahControlThread::notify_heap_changed() { if (_do_counters_update.is_unset()) { _do_counters_update.set(); } - // Notify that something had changed. - if (_heap_changed.is_unset()) { - _heap_changed.set(); - } } void ShenandoahControlThread::pacing_notify_alloc(size_t words) { @@ -638,3 +1062,32 @@ void ShenandoahControlThread::prepare_for_graceful_shutdown() { bool ShenandoahControlThread::in_graceful_shutdown() { return _graceful_shutdown.is_set(); } + +const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) { + switch (mode) { + case none: return "idle"; + case concurrent_normal: return "normal"; + case stw_degenerated: return "degenerated"; + case stw_full: return "full"; + case servicing_old: return "old"; + case bootstrapping_old: return "bootstrap"; + default: return "unknown"; + } +} + +void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) { + if (_mode != new_mode) { + log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode)); + MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); + _mode = new_mode; + ml.notify_all(); + } +} + +ShenandoahGenerationType ShenandoahControlThread::select_global_generation() { + if (ShenandoahHeap::heap()->mode()->is_generational()) { + return GLOBAL_GEN; + } else { + return GLOBAL_NON_GEN; + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp index 782b134a449..60163009f37 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,54 +58,74 @@ class ShenandoahControlThread: public ConcurrentGCThread { friend class VMStructs; private: - typedef enum { - none, - concurrent_normal, - stw_degenerated, - stw_full - } GCMode; - // While we could have a single lock for these, it may risk unblocking // GC waiters when alloc failure GC cycle finishes. We want instead - // to make complete explicit cycle for for demanding customers. + // to make complete explicit cycle for demanding customers. Monitor _alloc_failure_waiters_lock; Monitor _gc_waiters_lock; + Monitor _control_lock; + Monitor _regulator_lock; ShenandoahPeriodicTask _periodic_task; ShenandoahPeriodicPacerNotify _periodic_pacer_notify_task; public: + typedef enum { + none, + concurrent_normal, + stw_degenerated, + stw_full, + bootstrapping_old, + servicing_old + } GCMode; + void run_service(); void stop_service(); + size_t get_gc_id(); + private: - ShenandoahSharedFlag _gc_requested; + ShenandoahSharedFlag _allow_old_preemption; + ShenandoahSharedFlag _preemption_requested; ShenandoahSharedFlag _alloc_failure_gc; + ShenandoahSharedFlag _humongous_alloc_failure_gc; ShenandoahSharedFlag _graceful_shutdown; - ShenandoahSharedFlag _heap_changed; ShenandoahSharedFlag _do_counters_update; ShenandoahSharedFlag _force_counters_update; - GCCause::Cause _requested_gc_cause; + + GCCause::Cause _requested_gc_cause; + volatile ShenandoahGenerationType _requested_generation; ShenandoahGC::ShenandoahDegenPoint _degen_point; + ShenandoahGeneration* _degen_generation; shenandoah_padding(0); volatile size_t _allocs_seen; shenandoah_padding(1); volatile size_t _gc_id; shenandoah_padding(2); + volatile GCMode _mode; + shenandoah_padding(3); + // Returns true if the cycle has been cancelled or degenerated. bool check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point); - void service_concurrent_normal_cycle(GCCause::Cause cause); + + // Returns true if the old generation marking completed (i.e., final mark executed for old generation). + bool resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause); + void service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool reset_old_bitmap_specially); void service_stw_full_cycle(GCCause::Cause cause); void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point); void service_uncommit(double shrink_before, size_t shrink_until); - bool try_set_alloc_failure_gc(); + // Return true if setting the flag which indicates allocation failure succeeds. + bool try_set_alloc_failure_gc(bool is_humongous); + + // Notify threads waiting for GC to complete. void notify_alloc_failure_waiters(); + + // True if allocation failure flag has been set. bool is_alloc_failure_gc(); void reset_gc_id(); void update_gc_id(); - size_t get_gc_id(); void notify_gc_waiters(); @@ -113,9 +134,16 @@ class ShenandoahControlThread: public ConcurrentGCThread { void handle_requested_gc(GCCause::Cause cause); bool is_explicit_gc(GCCause::Cause cause) const; + bool is_implicit_gc(GCCause::Cause cause) const; + + // Returns true if the old generation marking was interrupted to allow a young cycle. + bool preempt_old_marking(ShenandoahGenerationType generation); + // Returns true if the soft maximum heap has been changed using management APIs. bool check_soft_max_changed() const; + void process_phase_timings(const ShenandoahHeap* heap); + public: // Constructor ShenandoahControlThread(); @@ -130,6 +158,8 @@ class ShenandoahControlThread: public ConcurrentGCThread { void handle_alloc_failure_evac(size_t words); void request_gc(GCCause::Cause cause); + // Return true if the request to start a concurrent GC for the given generation succeeded. + bool request_concurrent_gc(ShenandoahGenerationType generation); void handle_counters_update(); void handle_force_counters_update(); @@ -142,6 +172,30 @@ class ShenandoahControlThread: public ConcurrentGCThread { void start(); void prepare_for_graceful_shutdown(); bool in_graceful_shutdown(); + + void service_concurrent_normal_cycle(ShenandoahHeap* heap, + const ShenandoahGenerationType generation, + GCCause::Cause cause); + + void service_concurrent_old_cycle(ShenandoahHeap* heap, + GCCause::Cause &cause); + + void set_gc_mode(GCMode new_mode); + GCMode gc_mode() { + return _mode; + } + + static ShenandoahGenerationType select_global_generation(); + + private: + static const char* gc_mode_name(GCMode mode); + void notify_control_thread(); + + void service_concurrent_cycle(ShenandoahHeap* heap, + ShenandoahGeneration* generation, + GCCause::Cause &cause, + bool do_old_gc_bootstrap); + }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONTROLTHREAD_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index e7cf402a527..ef6bbcbadd9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,26 +30,38 @@ #include "gc/shenandoah/shenandoahConcurrentMark.hpp" #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" #include "gc/shenandoah/shenandoahFullGC.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMetrics.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahSTWMark.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "runtime/vmThread.hpp" #include "utilities/events.hpp" -ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) : +ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) : ShenandoahGC(), - _degen_point(degen_point) { + _degen_point(degen_point), + _generation(generation), + _abbreviated(false) { } bool ShenandoahDegenGC::collect(GCCause::Cause cause) { vmop_degenerated(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->mode()->is_generational()) { + bool is_bootstrap_gc = heap->old_generation()->state() == ShenandoahOldGeneration::BOOTSTRAPPING; + heap->mmu_tracker()->record_degenerated(GCId::current(), is_bootstrap_gc); + const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC"; + heap->log_heap_status(msg); + } return true; } @@ -64,7 +77,6 @@ void ShenandoahDegenGC::entry_degenerated() { ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */); EventMark em("%s", msg); ShenandoahHeap* const heap = ShenandoahHeap::heap(); - ShenandoahWorkerScope scope(heap->workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), "stw degenerated gc"); @@ -79,7 +91,27 @@ void ShenandoahDegenGC::op_degenerated() { // Degenerated GC is STW, but it can also fail. Current mechanics communicates // GC failure via cancelled_concgc() flag. So, if we detect the failure after // some phase, we have to upgrade the Degenerate GC to Full GC. - heap->clear_cancelled_gc(); + heap->clear_cancelled_gc(true /* clear oom handler */); + +#ifdef ASSERT + if (heap->mode()->is_generational()) { + ShenandoahOldGeneration* old_generation = heap->old_generation(); + if (!heap->is_concurrent_old_mark_in_progress()) { + // If we are not marking the old generation, there should be nothing in the old mark queues + assert(old_generation->task_queues()->is_empty(), "Old gen task queues should be empty"); + } + + if (_generation->is_global()) { + // If we are in a global cycle, the old generation should not be marking. It is, however, + // allowed to be holding regions for evacuation or coalescing. + ShenandoahOldGeneration::State state = old_generation->state(); + assert(state == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP + || state == ShenandoahOldGeneration::EVACUATING + || state == ShenandoahOldGeneration::FILLING, + "Old generation cannot be in state: %s", old_generation->state_name()); + } + } +#endif ShenandoahMetricsSnapshot metrics; metrics.snap_before(); @@ -95,17 +127,53 @@ void ShenandoahDegenGC::op_degenerated() { // space. It makes little sense to wait for Full GC to reclaim as much as it can, when // we can do the most aggressive degen cycle, which includes processing references and // class unloading, unless those features are explicitly disabled. - // + // Note that we can only do this for "outside-cycle" degens, otherwise we would risk + // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. + heap->set_unload_classes(_generation->heuristics()->can_unload_classes() && + (!heap->mode()->is_generational() || _generation->is_global())); + + if (heap->mode()->is_generational() && + (_generation->is_young() || (_generation->is_global() && ShenandoahVerify))) { + // Swap remembered sets for young, or if the verifier will run during a global collect + // TODO: This path should not depend on ShenandoahVerify + _generation->swap_remembered_set(); + } + + case _degenerated_roots: // Degenerated from concurrent root mark, reset the flag for STW mark - if (heap->is_concurrent_mark_in_progress()) { - ShenandoahConcurrentMark::cancel(); - heap->set_concurrent_mark_in_progress(false); + if (!heap->mode()->is_generational()) { + if (heap->is_concurrent_mark_in_progress()) { + heap->cancel_concurrent_mark(); + } + } else { + if (_generation->is_concurrent_mark_in_progress()) { + // We want to allow old generation marking to be punctuated by young collections + // (even if they have degenerated). If this is a global cycle, we'd have cancelled + // the entire old gc before coming into this switch. Note that cancel_marking on + // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does. + // We need to separate out the old pointers which is done below. + _generation->cancel_marking(); + } + + if (heap->is_concurrent_mark_in_progress()) { + // If either old or young marking is in progress, the SATB barrier will be enabled. + // The SATB buffer may hold a mix of old and young pointers. The old pointers need to be + // transferred to the old generation mark queues and the young pointers are NOT part + // of this snapshot, so they must be dropped here. It is safe to drop them here because + // we will rescan the roots on this safepoint. + heap->transfer_old_pointers_from_satb(); + } } - // Note that we can only do this for "outside-cycle" degens, otherwise we would risk - // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. - heap->set_unload_classes(heap->heuristics()->can_unload_classes()); + if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) { + // We only need this if the concurrent cycle has already swapped the card tables. + // Marking will use the 'read' table, but interesting pointers may have been + // recorded in the 'write' table in the time between the cancelled concurrent cycle + // and this degenerated cycle. These pointers need to be included the 'read' table + // used to scan the remembered set during the STW mark which follows here. + _generation->merge_write_table(); + } op_reset(); @@ -169,7 +237,6 @@ void ShenandoahDegenGC::op_degenerated() { { heap->sync_pinned_region_status(); heap->collection_set()->clear_current_index(); - ShenandoahHeapRegion* r; while ((r = heap->collection_set()->next()) != nullptr) { if (r->is_pinned()) { @@ -188,11 +255,18 @@ void ShenandoahDegenGC::op_degenerated() { } } + // Update collector state regardless of whether or not there are forwarded objects + heap->set_evacuation_in_progress(false); + heap->set_concurrent_weak_root_in_progress(false); + heap->set_concurrent_strong_root_in_progress(false); + // If heuristics thinks we should do the cycle, this flag would be set, // and we need to do update-refs. Otherwise, it would be the shortcut cycle. if (heap->has_forwarded_objects()) { op_init_updaterefs(); assert(!heap->cancelled_gc(), "STW reference update can not OOM"); + } else { + _abbreviated = true; } case _degenerated_updaterefs: @@ -206,12 +280,62 @@ void ShenandoahDegenGC::op_degenerated() { // In above case, update roots should disarm them ShenandoahCodeRoots::disarm_nmethods(); + if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { + // This is still necessary for degenerated cycles because the degeneration point may occur + // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_updaterefs for + // a more detailed explanation. + heap->transfer_old_pointers_from_satb(); + } + op_cleanup_complete(); + // We defer generation resizing actions until after cset regions have been recycled. + if (heap->mode()->is_generational()) { + size_t old_region_surplus = heap->get_old_region_surplus(); + size_t old_region_deficit = heap->get_old_region_deficit(); + bool success; + size_t region_xfer; + const char* region_destination; + if (old_region_surplus) { + region_xfer = old_region_surplus; + region_destination = "young"; + success = heap->generation_sizer()->transfer_to_young(old_region_surplus); + } else if (old_region_deficit) { + region_xfer = old_region_surplus; + region_destination = "old"; + success = heap->generation_sizer()->transfer_to_old(old_region_deficit); + if (!success) { + heap->old_heuristics()->trigger_cannot_expand(); + } + } else { + region_destination = "none"; + region_xfer = 0; + success = true; + } + + size_t young_available = heap->young_generation()->available(); + size_t old_available = heap->old_generation()->available(); + log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " + SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s", + success? "successfully transferred": "failed to transfer", region_xfer, region_destination, + byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), + byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); + + heap->set_old_region_surplus(0); + heap->set_old_region_deficit(0); + } break; default: ShouldNotReachHere(); } + if (heap->mode()->is_generational()) { + // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state. + // Otherwise, these actions have no effect. + heap->set_young_evac_reserve(0); + heap->set_old_evac_reserve(0); + heap->set_promoted_reserve(0); + } + if (ShenandoahVerify) { heap->verifier()->verify_after_degenerated(); } @@ -230,23 +354,24 @@ void ShenandoahDegenGC::op_degenerated() { op_degenerated_futile(); } else { heap->notify_gc_progress(); + heap->shenandoah_policy()->record_success_degenerated(_generation->is_young(), _abbreviated); + _generation->heuristics()->record_success_degenerated(); } } void ShenandoahDegenGC::op_reset() { - ShenandoahHeap::heap()->prepare_gc(); + _generation->prepare_gc(); } void ShenandoahDegenGC::op_mark() { - assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset"); + assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset"); ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark); - ShenandoahSTWMark mark(false /*full gc*/); - mark.clear(); + ShenandoahSTWMark mark(_generation, false /*full gc*/); mark.mark(); } void ShenandoahDegenGC::op_finish_mark() { - ShenandoahConcurrentMark mark; + ShenandoahConcurrentMark mark(_generation); mark.finish_mark(); } @@ -258,8 +383,9 @@ void ShenandoahDegenGC::op_prepare_evacuation() { // STW cleanup weak roots and unload classes heap->parallel_cleaning(false /*full gc*/); + // Prepare regions and collection set - heap->prepare_regions_and_collection_set(false /*concurrent*/); + _generation->prepare_regions_and_collection_set(false /*concurrent*/); // Retire the TLABs, which will force threads to reacquire their TLABs after the pause. // This is needed for two reasons. Strong one: new allocations would be with new freeset, @@ -271,13 +397,23 @@ void ShenandoahDegenGC::op_prepare_evacuation() { heap->tlabs_retire(false); } - if (!heap->collection_set()->is_empty()) { + size_t humongous_regions_promoted = heap->get_promotable_humongous_regions(); + size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place(); + if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) { + // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place. + // Degenerated evacuation takes responsibility for registering objects and setting the remembered set cards to dirty. + + if (ShenandoahVerify) { + heap->verifier()->verify_before_evacuation(); + } + heap->set_evacuation_in_progress(true); - heap->set_has_forwarded_objects(true); if(ShenandoahVerify) { heap->verifier()->verify_during_evacuation(); } + + heap->set_has_forwarded_objects(!heap->collection_set()->is_empty()); } else { if (ShenandoahVerify) { heap->verifier()->verify_after_concmark(); @@ -301,10 +437,6 @@ void ShenandoahDegenGC::op_evacuate() { void ShenandoahDegenGC::op_init_updaterefs() { // Evacuation has completed ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->set_evacuation_in_progress(false); - heap->set_concurrent_weak_root_in_progress(false); - heap->set_concurrent_strong_root_in_progress(false); - heap->prepare_update_heap_references(false /*concurrent*/); heap->set_update_refs_in_progress(true); } @@ -343,33 +475,36 @@ void ShenandoahDegenGC::op_cleanup_complete() { } void ShenandoahDegenGC::op_degenerated_fail() { - log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); - ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); - - ShenandoahFullGC full_gc; - full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); + upgrade_to_full(); } void ShenandoahDegenGC::op_degenerated_futile() { - ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); - ShenandoahFullGC full_gc; - full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); + upgrade_to_full(); } const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const { switch (point) { case _degenerated_unset: - return "Pause Degenerated GC ()"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " ()"); case _degenerated_outside_cycle: - return "Pause Degenerated GC (Outside of Cycle)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Outside of Cycle)"); + case _degenerated_roots: + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Roots)"); case _degenerated_mark: - return "Pause Degenerated GC (Mark)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Mark)"); case _degenerated_evac: - return "Pause Degenerated GC (Evacuation)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Evacuation)"); case _degenerated_updaterefs: - return "Pause Degenerated GC (Update Refs)"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Update Refs)"); default: ShouldNotReachHere(); - return "ERROR"; + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (?)"); } } + +void ShenandoahDegenGC::upgrade_to_full() { + log_info(gc)("Degenerate GC upgrading to Full GC"); + ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); + ShenandoahFullGC full_gc; + full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp index 8f6f71d52c2..ed2c0cce983 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp @@ -28,14 +28,17 @@ #include "gc/shenandoah/shenandoahGC.hpp" class VM_ShenandoahDegeneratedGC; +class ShenandoahGeneration; class ShenandoahDegenGC : public ShenandoahGC { friend class VM_ShenandoahDegeneratedGC; private: const ShenandoahDegenPoint _degen_point; + ShenandoahGeneration* _generation; + bool _abbreviated; public: - ShenandoahDegenGC(ShenandoahDegenPoint degen_point); + ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation); bool collect(GCCause::Cause cause); private: @@ -48,6 +51,7 @@ class ShenandoahDegenGC : public ShenandoahGC { void op_finish_mark(); void op_prepare_evacuation(); void op_cleanup_early(); + void op_evacuate(); void op_init_updaterefs(); void op_updaterefs(); @@ -58,6 +62,8 @@ class ShenandoahDegenGC : public ShenandoahGC { void op_degenerated_futile(); void op_degenerated_fail(); + void upgrade_to_full(); + const char* degen_event_message(ShenandoahDegenPoint point) const; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp index 5c49c2edbb2..4e9a0759205 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp @@ -26,7 +26,6 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_INLINE_HPP #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" - #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp new file mode 100644 index 00000000000..d311c04f3e9 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp @@ -0,0 +1,175 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahAgeCensus.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahEvacTracker.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.hpp" +#include "runtime/threadSMR.inline.hpp" +#include "runtime/thread.hpp" + +ShenandoahEvacuationStats::ShenandoahEvacuationStats(bool generational) + : _evacuations_completed(0), _bytes_completed(0), + _evacuations_attempted(0), _bytes_attempted(0), + _use_age_table(generational && (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring)) { + if (_use_age_table) { + _age_table = new AgeTable(false); + } +} + +AgeTable* ShenandoahEvacuationStats::age_table() const { + assert(_use_age_table, "Don't call"); + return _age_table; +} + +void ShenandoahEvacuationStats::begin_evacuation(size_t bytes) { + ++_evacuations_attempted; + _bytes_attempted += bytes; +} + +void ShenandoahEvacuationStats::end_evacuation(size_t bytes) { + ++_evacuations_completed; + _bytes_completed += bytes; +} + +void ShenandoahEvacuationStats::record_age(size_t bytes, uint age) { + assert(_use_age_table, "Don't call!"); + if (age <= markWord::max_age) { // Filter age sentinel. + _age_table->add(age, bytes >> LogBytesPerWord); + } +} + +void ShenandoahEvacuationStats::accumulate(const ShenandoahEvacuationStats* other) { + _evacuations_completed += other->_evacuations_completed; + _bytes_completed += other->_bytes_completed; + _evacuations_attempted += other->_evacuations_attempted; + _bytes_attempted += other->_bytes_attempted; + if (_use_age_table) { + _age_table->merge(other->age_table()); + } +} + +void ShenandoahEvacuationStats::reset() { + _evacuations_completed = _evacuations_attempted = 0; + _bytes_completed = _bytes_attempted = 0; + if (_use_age_table) { + _age_table->clear(); + } +} + +void ShenandoahEvacuationStats::print_on(outputStream* st) { + size_t abandoned_size = _bytes_attempted - _bytes_completed; + size_t abandoned_count = _evacuations_attempted - _evacuations_completed; + st->print_cr("Evacuated " SIZE_FORMAT "%s across " SIZE_FORMAT " objects, " + "abandoned " SIZE_FORMAT "%s across " SIZE_FORMAT " objects.", + byte_size_in_proper_unit(_bytes_completed), proper_unit_for_byte_size(_bytes_completed), + _evacuations_completed, + byte_size_in_proper_unit(abandoned_size), proper_unit_for_byte_size(abandoned_size), + abandoned_count); + if (_use_age_table) { + _age_table->print_on(st, ShenandoahHeap::heap()->age_census()->tenuring_threshold()); + } +} + +void ShenandoahEvacuationTracker::print_global_on(outputStream* st) { + print_evacuations_on(st, &_workers_global, &_mutators_global); +} + +void ShenandoahEvacuationTracker::print_evacuations_on(outputStream* st, + ShenandoahEvacuationStats* workers, + ShenandoahEvacuationStats* mutators) { + st->print("Workers: "); + workers->print_on(st); + st->cr(); + st->print("Mutators: "); + mutators->print_on(st); + st->cr(); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (_generational) { + AgeTable young_region_ages(false); + for (uint i = 0; i < heap->num_regions(); ++i) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->is_young()) { + young_region_ages.add(r->age(), r->get_live_data_words()); + } + } + uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + st->print("Young regions: "); + young_region_ages.print_on(st, tenuring_threshold); + st->cr(); + } +} + +class ShenandoahStatAggregator : public ThreadClosure { +public: + ShenandoahEvacuationStats* _target; + explicit ShenandoahStatAggregator(ShenandoahEvacuationStats* target) : _target(target) {} + virtual void do_thread(Thread* thread) override { + ShenandoahEvacuationStats* local = ShenandoahThreadLocalData::evacuation_stats(thread); + _target->accumulate(local); + local->reset(); + } +}; + +ShenandoahCycleStats ShenandoahEvacuationTracker::flush_cycle_to_global() { + ShenandoahEvacuationStats mutators(_generational), workers(_generational); + + ThreadsListHandle java_threads_iterator; + ShenandoahStatAggregator aggregate_mutators(&mutators); + java_threads_iterator.list()->threads_do(&aggregate_mutators); + + ShenandoahStatAggregator aggregate_workers(&workers); + ShenandoahHeap::heap()->gc_threads_do(&aggregate_workers); + + _mutators_global.accumulate(&mutators); + _workers_global.accumulate(&workers); + + if (_generational && (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring)) { + // Ingest population vectors into the heap's global census + // data, and use it to compute an appropriate tenuring threshold + // for use in the next cycle. + ShenandoahAgeCensus* census = ShenandoahHeap::heap()->age_census(); + census->prepare_for_census_update(); + // The first argument is used for any age 0 cohort population that we may otherwise have + // missed during the census. This is non-zero only when census happens at marking. + census->update_census(0, _mutators_global.age_table(), _workers_global.age_table()); + } + + return {workers, mutators}; +} + +void ShenandoahEvacuationTracker::begin_evacuation(Thread* thread, size_t bytes) { + ShenandoahThreadLocalData::begin_evacuation(thread, bytes); +} + +void ShenandoahEvacuationTracker::end_evacuation(Thread* thread, size_t bytes) { + ShenandoahThreadLocalData::end_evacuation(thread, bytes); +} + +void ShenandoahEvacuationTracker::record_age(Thread* thread, size_t bytes, uint age) { + ShenandoahThreadLocalData::record_age(thread, bytes, age); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.hpp new file mode 100644 index 00000000000..41137c63cfc --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.hpp @@ -0,0 +1,85 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHEVACTRACKER_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHEVACTRACKER_HPP + +#include "gc/shared/ageTable.hpp" +#include "utilities/ostream.hpp" + +class ShenandoahEvacuationStats : public CHeapObj { +private: + size_t _evacuations_completed; + size_t _bytes_completed; + size_t _evacuations_attempted; + size_t _bytes_attempted; + + bool _use_age_table; + AgeTable* _age_table; + + public: + ShenandoahEvacuationStats(bool generational); + + AgeTable* age_table() const; + + void begin_evacuation(size_t bytes); + void end_evacuation(size_t bytes); + void record_age(size_t bytes, uint age); + + void print_on(outputStream* st); + void accumulate(const ShenandoahEvacuationStats* other); + void reset(); +}; + +struct ShenandoahCycleStats { + ShenandoahEvacuationStats workers; + ShenandoahEvacuationStats mutators; +}; + +class ShenandoahEvacuationTracker : public CHeapObj { +private: + bool _generational; + + ShenandoahEvacuationStats _workers_global; + ShenandoahEvacuationStats _mutators_global; + +public: + ShenandoahEvacuationTracker(bool generational) : + _generational(generational), + _workers_global(generational), + _mutators_global(generational) {} + + void begin_evacuation(Thread* thread, size_t bytes); + void end_evacuation(Thread* thread, size_t bytes); + void record_age(Thread* thread, size_t bytes, uint age); + + void print_global_on(outputStream* st); + void print_evacuations_on(outputStream* st, + ShenandoahEvacuationStats* workers, + ShenandoahEvacuationStats* mutators); + + ShenandoahCycleStats flush_cycle_to_global(); +}; + +#endif //SHARE_GC_SHENANDOAH_SHENANDOAHEVACTRACKER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 72a3f411ea0..10c8e3e0d62 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,44 +25,481 @@ #include "precompiled.hpp" #include "gc/shared/tlab_globals.hpp" +#include "gc/shenandoah/shenandoahAffiliation.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" #include "runtime/orderAccess.hpp" +ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) : + _max(max_regions), + _free_set(free_set), + _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()) +{ + _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC); + clear_internal(); +} + +ShenandoahSetsOfFree::~ShenandoahSetsOfFree() { + FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership); +} + + +void ShenandoahSetsOfFree::clear_internal() { + for (size_t idx = 0; idx < _max; idx++) { + _membership[idx] = NotFree; + } + + for (size_t idx = 0; idx < NumFreeSets; idx++) { + _leftmosts[idx] = _max; + _rightmosts[idx] = 0; + _leftmosts_empty[idx] = _max; + _rightmosts_empty[idx] = 0; + _capacity_of[idx] = 0; + _used_by[idx] = 0; + } + + _left_to_right_bias[Mutator] = true; + _left_to_right_bias[Collector] = false; + _left_to_right_bias[OldCollector] = false; + + _region_counts[Mutator] = 0; + _region_counts[Collector] = 0; + _region_counts[OldCollector] = 0; + _region_counts[NotFree] = _max; +} + +void ShenandoahSetsOfFree::clear_all() { + clear_internal(); +} + +void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) { + assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset"); + _used_by[which_set] += bytes; + assert (_used_by[which_set] <= _capacity_of[which_set], + "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT, + _used_by[which_set], _capacity_of[which_set], bytes); +} + +inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) { + if (idx == _leftmosts[set]) { + while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) { + _leftmosts[set]++; + } + if (_leftmosts_empty[set] < _leftmosts[set]) { + // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested. + _leftmosts_empty[set] = _leftmosts[set]; + } + } + if (idx == _rightmosts[set]) { + while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) { + _rightmosts[set]--; + } + if (_rightmosts_empty[set] > _rightmosts[set]) { + // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested. + _rightmosts_empty[set] = _rightmosts[set]; + } + } +} + +inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) { + if (region_capacity == _region_size_bytes) { + if (_leftmosts_empty[set] > idx) { + _leftmosts_empty[set] = idx; + } + if (_rightmosts_empty[set] < idx) { + _rightmosts_empty[set] = idx; + } + } + if (_leftmosts[set] > idx) { + _leftmosts[set] = idx; + } + if (_rightmosts[set] < idx) { + _rightmosts[set] = idx; + } +} + +void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + ShenandoahFreeMemoryType orig_set = membership(idx); + assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free"); + _membership[idx] = NotFree; + shrink_bounds_if_touched(orig_set, idx); + + _region_counts[orig_set]--; + _region_counts[NotFree]++; +} + + +void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (_membership[idx] == NotFree, "Cannot make free if already free"); + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + _membership[idx] = which_set; + _capacity_of[which_set] += region_capacity; + expand_bounds_maybe(which_set, idx, region_capacity); + + _region_counts[NotFree]--; + _region_counts[which_set]++; +} + +void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid"); + ShenandoahFreeMemoryType orig_set = _membership[idx]; + assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free"); + // Expected transitions: + // During rebuild: Mutator => Collector + // Mutator empty => Collector + // During flip_to_gc: + // Mutator empty => Collector + // Mutator empty => Old Collector + // At start of update refs: + // Collector => Mutator + // OldCollector Empty => Mutator + assert (((region_capacity <= _region_size_bytes) && + ((orig_set == Mutator) && (new_set == Collector)) || + ((orig_set == Collector) && (new_set == Mutator))) || + ((region_capacity == _region_size_bytes) && + ((orig_set == Mutator) && (new_set == Collector)) || + ((orig_set == OldCollector) && (new_set == Mutator)) || + (new_set == OldCollector)), "Unexpected movement between sets"); + + _membership[idx] = new_set; + _capacity_of[orig_set] -= region_capacity; + shrink_bounds_if_touched(orig_set, idx); + + _capacity_of[new_set] += region_capacity; + expand_bounds_maybe(new_set, idx, region_capacity); + + _region_counts[orig_set]--; + _region_counts[new_set]++; +} + +inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + return _membership[idx]; +} + + // Returns true iff region idx is in the test_set free_set. Before returning true, asserts that the free + // set is not empty. Requires that test_set != NotFree or NumFreeSets. +inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const { + assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + if (_membership[idx] == test_set) { + assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity"); + return true; + } else { + return false; + } +} + +inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + size_t idx = _leftmosts[which_set]; + if (idx >= _max) { + return _max; + } else { + assert (in_free_set(idx, which_set), "left-most region must be free"); + return idx; + } +} + +inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + size_t idx = _rightmosts[which_set]; + assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free"); + return idx; +} + +inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + return (leftmost(which_set) > rightmost(which_set)); +} + +size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) { + if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) { + _leftmosts_empty[which_set] = idx; + return idx; + } + } + _leftmosts_empty[which_set] = _max; + _rightmosts_empty[which_set] = 0; + return _max; +} + +inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) { + if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) { + _rightmosts_empty[which_set] = idx; + return idx; + } + } + _leftmosts_empty[which_set] = _max; + _rightmosts_empty[which_set] = 0; + return 0; +} + +inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + return _left_to_right_bias[which_set]; +} + +void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + shenandoah_assert_heaplocked(); + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + + size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2; + size_t available_in_first_half = 0; + size_t available_in_second_half = 0; + + for (size_t index = _leftmosts[which_set]; index < middle; index++) { + if (in_free_set(index, which_set)) { + ShenandoahHeapRegion* r = heap->get_region(index); + available_in_first_half += r->free(); + } + } + for (size_t index = middle; index <= _rightmosts[which_set]; index++) { + if (in_free_set(index, which_set)) { + ShenandoahHeapRegion* r = heap->get_region(index); + available_in_second_half += r->free(); + } + } + + // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed. + // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation + // request. Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the + // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate. + + // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the + // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left. + _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half); +} + +#ifdef ASSERT +void ShenandoahSetsOfFree::assert_bounds() { + + size_t leftmosts[NumFreeSets]; + size_t rightmosts[NumFreeSets]; + size_t empty_leftmosts[NumFreeSets]; + size_t empty_rightmosts[NumFreeSets]; + + for (int i = 0; i < NumFreeSets; i++) { + leftmosts[i] = _max; + empty_leftmosts[i] = _max; + rightmosts[i] = 0; + empty_rightmosts[i] = 0; + } + + for (size_t i = 0; i < _max; i++) { + ShenandoahFreeMemoryType set = membership(i); + switch (set) { + case NotFree: + break; + + case Mutator: + case Collector: + case OldCollector: + { + size_t capacity = _free_set->alloc_capacity(i); + bool is_empty = (capacity == _region_size_bytes); + assert(capacity > 0, "free regions must have allocation capacity"); + if (i < leftmosts[set]) { + leftmosts[set] = i; + } + if (is_empty && (i < empty_leftmosts[set])) { + empty_leftmosts[set] = i; + } + if (i > rightmosts[set]) { + rightmosts[set] = i; + } + if (is_empty && (i > empty_rightmosts[set])) { + empty_rightmosts[set] = i; + } + break; + } + + case NumFreeSets: + default: + ShouldNotReachHere(); + } + } + + // Performance invariants. Failing these would not break the free set, but performance would suffer. + assert (leftmost(Mutator) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator), _max); + assert (rightmost(Mutator) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator), _max); + + assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator), + "leftmost region should be free: " SIZE_FORMAT, leftmost(Mutator)); + assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator), + "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator)); + + // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. + size_t beg_off = leftmosts[Mutator]; + size_t end_off = rightmosts[Mutator]; + assert (beg_off >= leftmost(Mutator), + "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator)); + assert (end_off <= rightmost(Mutator), + "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Mutator)); + + beg_off = empty_leftmosts[Mutator]; + end_off = empty_rightmosts[Mutator]; + assert (beg_off >= leftmost_empty(Mutator), + "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator)); + assert (end_off <= rightmost_empty(Mutator), + "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Mutator)); + + // Performance invariants. Failing these would not break the free set, but performance would suffer. + assert (leftmost(Collector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector), _max); + assert (rightmost(Collector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector), _max); + + assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector), + "leftmost region should be free: " SIZE_FORMAT, leftmost(Collector)); + assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector), + "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector)); + + // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. + beg_off = leftmosts[Collector]; + end_off = rightmosts[Collector]; + assert (beg_off >= leftmost(Collector), + "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector)); + assert (end_off <= rightmost(Collector), + "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Collector)); + + beg_off = empty_leftmosts[Collector]; + end_off = empty_rightmosts[Collector]; + assert (beg_off >= leftmost_empty(Collector), + "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector)); + assert (end_off <= rightmost_empty(Collector), + "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Collector)); + + // Performance invariants. Failing these would not break the free set, but performance would suffer. + assert (leftmost(OldCollector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector), _max); + assert (rightmost(OldCollector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector), _max); + + assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector), + "leftmost region should be free: " SIZE_FORMAT, leftmost(OldCollector)); + assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector), + "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector)); + + // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. + beg_off = leftmosts[OldCollector]; + end_off = rightmosts[OldCollector]; + assert (beg_off >= leftmost(OldCollector), + "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector)); + assert (end_off <= rightmost(OldCollector), + "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(OldCollector)); + + beg_off = empty_leftmosts[OldCollector]; + end_off = empty_rightmosts[OldCollector]; + assert (beg_off >= leftmost_empty(OldCollector), + "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector)); + assert (end_off <= rightmost_empty(OldCollector), + "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(OldCollector)); +} +#endif + ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), - _mutator_free_bitmap(max_regions, mtGC), - _collector_free_bitmap(max_regions, mtGC), - _max(max_regions) + _free_sets(max_regions, this) { clear_internal(); } -void ShenandoahFreeSet::increase_used(size_t num_bytes) { +// This allocates from a region within the old_collector_set. If affiliation equals OLD, the allocation must be taken +// from a region that is_old(). Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated +// region into service. +HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation, + ShenandoahAllocRequest& req, bool& in_new_region) { shenandoah_assert_heaplocked(); - _used += num_bytes; - assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT - ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes); + size_t rightmost = + (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector); + size_t leftmost = + (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector); + if (_free_sets.alloc_from_left_bias(OldCollector)) { + // This mode picks up stragglers left by a full GC + for (size_t idx = leftmost; idx <= rightmost; idx++) { + if (_free_sets.in_free_set(idx, OldCollector)) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation"); + if (r->affiliation() == affiliation) { + HeapWord* result = try_allocate_in(r, req, in_new_region); + if (result != nullptr) { + return result; + } + } + } + } + } else { + // This mode picks up stragglers left by a previous concurrent GC + for (size_t count = rightmost + 1; count > leftmost; count--) { + // size_t is unsigned, need to dodge underflow when _leftmost = 0 + size_t idx = count - 1; + if (_free_sets.in_free_set(idx, OldCollector)) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation"); + if (r->affiliation() == affiliation) { + HeapWord* result = try_allocate_in(r, req, in_new_region); + if (result != nullptr) { + return result; + } + } + } + } + } + return nullptr; } -bool ShenandoahFreeSet::is_mutator_free(size_t idx) const { - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", - idx, _max, _mutator_leftmost, _mutator_rightmost); - return _mutator_free_bitmap.at(idx); +void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) { + shenandoah_assert_heaplocked(); + size_t idx = region->index(); + size_t capacity = alloc_capacity(region); + assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set"); + if (capacity >= PLAB::min_size() * HeapWordSize) { + _free_sets.make_free(idx, OldCollector, capacity); + _heap->augment_promo_reserve(capacity); + } } -bool ShenandoahFreeSet::is_collector_free(size_t idx) const { - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", - idx, _max, _collector_leftmost, _collector_rightmost); - return _collector_free_bitmap.at(idx); +HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation, + ShenandoahAllocRequest& req, bool& in_new_region) { + shenandoah_assert_heaplocked(); + size_t rightmost = + (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector); + size_t leftmost = + (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector); + for (size_t c = rightmost + 1; c > leftmost; c--) { + // size_t is unsigned, need to dodge underflow when _leftmost = 0 + size_t idx = c - 1; + if (_free_sets.in_free_set(idx, Collector)) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + if (r->affiliation() == affiliation) { + HeapWord* result = try_allocate_in(r, req, in_new_region); + if (result != nullptr) { + return result; + } + } + } + } + log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT, + shenandoah_affiliation_name(affiliation), p2i(&req)); + return nullptr; } HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { + shenandoah_assert_heaplocked(); + // Scan the bitmap looking for a first fit. // // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, @@ -74,53 +512,139 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& // Free set maintains mutator and collector views, and normally they allocate in their views only, // unless we special cases for stealing and mixed allocations. + // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping. + + bool allow_new_region = true; + if (_heap->mode()->is_generational()) { + switch (req.affiliation()) { + case ShenandoahAffiliation::OLD_GENERATION: + // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero. + if (_heap->old_generation()->free_unaffiliated_regions() <= 0) { + allow_new_region = false; + } + break; + + case ShenandoahAffiliation::YOUNG_GENERATION: + // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero. + if (_heap->young_generation()->free_unaffiliated_regions() <= 0) { + allow_new_region = false; + } + break; + + case ShenandoahAffiliation::FREE: + fatal("Should request affiliation"); + + default: + ShouldNotReachHere(); + break; + } + } switch (req.type()) { case ShenandoahAllocRequest::_alloc_tlab: case ShenandoahAllocRequest::_alloc_shared: { - // Try to allocate in the mutator view - for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { - if (is_mutator_free(idx)) { - HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); - if (result != nullptr) { - return result; + // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations + if (!_free_sets.is_empty(Mutator)) { + // Use signed idx. Otherwise, loop will never terminate. + int leftmost = (int) _free_sets.leftmost(Mutator); + for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) { + // try_allocate_in() increases used if the allocation is successful. + HeapWord* result; + size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size(); + if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) { + return result; + } } } } - // There is no recovery. Mutator does not touch collector view at all. break; } case ShenandoahAllocRequest::_alloc_gclab: - case ShenandoahAllocRequest::_alloc_shared_gc: { - // size_t is unsigned, need to dodge underflow when _leftmost = 0 + // GCLABs are for evacuation so we must be in evacuation phase. If this allocation is successful, increment + // the relevant evac_expended rather than used value. + + case ShenandoahAllocRequest::_alloc_plab: + // PLABs always reside in old-gen and are only allocated during evacuation phase. - // Fast-path: try to allocate in the collector view first - for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) { - size_t idx = c - 1; - if (is_collector_free(idx)) { - HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); + case ShenandoahAllocRequest::_alloc_shared_gc: { + if (!_heap->mode()->is_generational()) { + // size_t is unsigned, need to dodge underflow when _leftmost = 0 + // Fast-path: try to allocate in the collector view first + for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) { + size_t idx = c - 1; + if (_free_sets.in_free_set(idx, Collector)) { + HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); + if (result != nullptr) { + return result; + } + } + } + } else { + // First try to fit into a region that is already in use in the same generation. + HeapWord* result; + if (req.is_old()) { + result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region); + } else { + result = allocate_with_affiliation(req.affiliation(), req, in_new_region); + } + if (result != nullptr) { + return result; + } + if (allow_new_region) { + // Then try a free region that is dedicated to GC allocations. + if (req.is_old()) { + result = allocate_old_with_affiliation(FREE, req, in_new_region); + } else { + result = allocate_with_affiliation(FREE, req, in_new_region); + } if (result != nullptr) { return result; } } } - // No dice. Can we borrow space from mutator view? if (!ShenandoahEvacReserveOverflow) { return nullptr; } - // Try to steal the empty region from the mutator view - for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { - size_t idx = c - 1; - if (is_mutator_free(idx)) { - ShenandoahHeapRegion* r = _heap->get_region(idx); - if (can_allocate_from(r)) { - flip_to_gc(r); - HeapWord *result = try_allocate_in(r, req, in_new_region); - if (result != nullptr) { - return result; + if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) { + // This allows us to flip a mutator region to old_collector + allow_new_region = true; + } + + // We should expand old-gen if this can prevent an old-gen evacuation failure. We don't care so much about + // promotion failures since they can be mitigated in a subsequent GC pass. Would be nice to know if this + // allocation request is for evacuation or promotion. Individual threads limit their use of PLAB memory for + // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used + // only for old-gen evacuations. + + // Also TODO: + // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector + // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and + // immediately trigger the start of GC. Is better to satisfy the allocation than to trigger out-of-cycle + // allocation failure (even if this means we have a little less memory to handle evacuations during the + // subsequent GC pass). + + if (allow_new_region) { + // Try to steal an empty region from the mutator view. + for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) { + size_t idx = c - 1; + if (_free_sets.in_free_set(idx, Mutator)) { + ShenandoahHeapRegion* r = _heap->get_region(idx); + if (can_allocate_from(r)) { + if (req.is_old()) { + flip_to_old_gc(r); + } else { + flip_to_gc(r); + } + HeapWord *result = try_allocate_in(r, req, in_new_region); + if (result != nullptr) { + log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req)); + return result; + } } } } @@ -129,145 +653,247 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& // No dice. Do not try to mix mutator and GC allocations, because // URWM moves due to GC allocations would expose unparsable mutator // allocations. - break; } default: ShouldNotReachHere(); } - return nullptr; } -HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { - assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); +// This work method takes an argument corresponding to the number of bytes +// free in a region, and returns the largest amount in heapwords that can be allocated +// such that both of the following conditions are satisfied: +// +// 1. it is a multiple of card size +// 2. any remaining shard may be filled with a filler object +// +// The idea is that the allocation starts and ends at card boundaries. Because +// a region ('s end) is card-aligned, the remainder shard that must be filled is +// at the start of the free space. +// +// This is merely a helper method to use for the purpose of such a calculation. +size_t get_usable_free_words(size_t free_bytes) { + // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8 + // free is 514 + // usable_free is 512, which is decreased to 0 + size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift(); + assert(usable_free <= free_bytes, "Sanity check"); + if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) { + // After aligning to card multiples, the remainder would be smaller than + // the minimum filler object, so we'll need to take away another card's + // worth to construct a filler object. + if (usable_free >= CardTable::card_size()) { + usable_free -= CardTable::card_size(); + } else { + assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size"); + } + } + + return usable_free / HeapWordSize; +} - if (_heap->is_concurrent_weak_root_in_progress() && - r->is_trash()) { +// Given a size argument, which is a multiple of card size, a request struct +// for a PLAB, and an old region, return a pointer to the allocated space for +// a PLAB which is card-aligned and where any remaining shard in the region +// has been suitably filled by a filler object. +// It is assumed (and assertion-checked) that such an allocation is always possible. +HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) { + assert(_heap->mode()->is_generational(), "PLABs are only for generational mode"); + assert(r->is_old(), "All PLABs reside in old-gen"); + assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators."); + assert(size % CardTable::card_size_in_words() == 0, "size must be multiple of card table size, was " SIZE_FORMAT, size); + + HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size()); + assert(result != nullptr, "Allocation cannot fail"); + assert(r->top() <= r->end(), "Allocation cannot span end of region"); + assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB."); + assert(((uintptr_t) result) % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary"); + + return result; +} + +HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { + assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); + if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) { return nullptr; } try_recycle_trashed(r); + if (!r->is_affiliated()) { + ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); + r->set_affiliation(req.affiliation()); + if (r->is_old()) { + // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because + // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an + // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next + // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before + // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any + // coalesce-and-fill processing. + r->end_preemptible_coalesce_and_fill(); + _heap->clear_cards_for(r); + _heap->old_generation()->increment_affiliated_region_count(); + } else { + _heap->young_generation()->increment_affiliated_region_count(); + } - in_new_region = r->is_empty(); + assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); + assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear"); + } else if (r->affiliation() != req.affiliation()) { + assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.", + req.affiliation_name(), r->affiliation_name()); + return nullptr; + } + in_new_region = r->is_empty(); HeapWord* result = nullptr; - size_t size = req.size(); + if (in_new_region) { + log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").", + r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); + } + + // req.size() is in words, r->free() is in bytes. if (req.is_lab_alloc()) { - size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); - if (size > free) { - size = free; + if (req.type() == ShenandoahAllocRequest::_alloc_plab) { + assert(_heap->mode()->is_generational(), "PLABs are only for generational mode"); + assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions"); + // Need to assure that plabs are aligned on multiple of card region. + // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable + // memory remaining in the region (which will also be aligned to cards). + size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words()); + size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words()); + size_t usable_free = get_usable_free_words(r->free()); + + if (adjusted_size > usable_free) { + adjusted_size = usable_free; + } + + if (adjusted_size >= adjusted_min_size) { + result = allocate_aligned_plab(adjusted_size, req, r); + } + // Otherwise, leave result == nullptr because the adjusted size is smaller than min size. + } else { + // This is a GCLAB or a TLAB allocation + size_t adjusted_size = req.size(); + size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); + if (adjusted_size > free) { + adjusted_size = free; + } + if (adjusted_size >= req.min_size()) { + result = r->allocate(adjusted_size, req); + assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size); + req.set_actual_size(adjusted_size); + } else { + log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT + " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); + } } - if (size >= req.min_size()) { - result = r->allocate(size, req.type()); - assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size); + } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) { + + // inelastic PLAB + size_t size = req.size(); + size_t usable_free = get_usable_free_words(r->free()); + if (size <= usable_free) { + result = allocate_aligned_plab(size, req, r); } } else { - result = r->allocate(size, req.type()); + size_t size = req.size(); + result = r->allocate(size, req); + if (result != nullptr) { + // Record actual allocation size + req.set_actual_size(size); + } } + ShenandoahGeneration* generation = _heap->generation_for(req.affiliation()); if (result != nullptr) { // Allocation successful, bump stats: if (req.is_mutator_alloc()) { - increase_used(size * HeapWordSize); - } - - // Record actual allocation size - req.set_actual_size(size); - - if (req.is_gc_alloc()) { + assert(req.is_young(), "Mutator allocations always come from young generation."); + _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize); + } else { + assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc"); + + // For GC allocations, we advance update_watermark because the objects relocated into this memory during + // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all + // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation. + // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require + // that the PLAB be disabled for all future purposes. We may want to introduce a new service to make the + // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the + // next evacuation pass. r->set_update_watermark(r->top()); + if (r->is_old()) { + assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation"); + // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab + } } } - if (result == nullptr || has_no_alloc_capacity(r)) { - // Region cannot afford this or future allocations. Retire it. + if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) { + // Region cannot afford this and is likely to not afford future allocations. Retire it. // // While this seems a bit harsh, especially in the case when this large allocation does not - // fit, but the next small one would, we are risking to inflate scan times when lots of - // almost-full regions precede the fully-empty region where we want allocate the entire TLAB. - // TODO: Record first fully-empty region, and use that for large allocations + // fit but the next small one would, we are risking to inflate scan times when lots of + // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB. // Record the remainder as allocation waste + size_t idx = r->index(); if (req.is_mutator_alloc()) { size_t waste = r->free(); if (waste > 0) { - increase_used(waste); - _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true); + _free_sets.increase_used(Mutator, waste); + // This one request could cause several regions to be "retired", so we must accumulate the waste + req.set_waste((waste >> LogHeapWordSize) + req.waste()); } + assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx); + } else { + assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector, + "Must be collector or old-collector free: " SIZE_FORMAT, idx); } - - size_t num = r->index(); - _collector_free_bitmap.clear_bit(num); - _mutator_free_bitmap.clear_bit(num); - // Touched the bounds? Need to update: - if (touches_bounds(num)) { - adjust_bounds(); - } - assert_bounds(); + // This region is no longer considered free (in any set) + _free_sets.remove_from_free_sets(idx); + _free_sets.assert_bounds(); } return result; } -bool ShenandoahFreeSet::touches_bounds(size_t num) const { - return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost; -} - -void ShenandoahFreeSet::recompute_bounds() { - // Reset to the most pessimistic case: - _mutator_rightmost = _max - 1; - _mutator_leftmost = 0; - _collector_rightmost = _max - 1; - _collector_leftmost = 0; - - // ...and adjust from there - adjust_bounds(); -} - -void ShenandoahFreeSet::adjust_bounds() { - // Rewind both mutator bounds until the next bit. - while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) { - _mutator_leftmost++; - } - while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) { - _mutator_rightmost--; - } - // Rewind both collector bounds until the next bit. - while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) { - _collector_leftmost++; - } - while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) { - _collector_rightmost--; - } -} - HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { shenandoah_assert_heaplocked(); size_t words_size = req.size(); size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); - // No regions left to satisfy allocation, bye. - if (num > mutator_count()) { - return nullptr; + assert(req.is_young(), "Humongous regions always allocated in YOUNG"); + ShenandoahGeneration* generation = _heap->generation_for(req.affiliation()); + + // Check if there are enough regions left to satisfy allocation. + if (_heap->mode()->is_generational()) { + size_t avail_young_regions = generation->free_unaffiliated_regions(); + if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) { + return nullptr; + } + } else { + if (num > _free_sets.count(Mutator)) { + return nullptr; + } } // Find the continuous interval of $num regions, starting from $beg and ending in $end, // inclusive. Contiguous allocations are biased to the beginning. - size_t beg = _mutator_leftmost; + size_t beg = _free_sets.leftmost(Mutator); size_t end = beg; while (true) { - if (end >= _max) { + if (end >= _free_sets.max()) { // Hit the end, goodbye return nullptr; } // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. - if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) { + if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) { end++; beg = end; continue; @@ -282,6 +908,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { } size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); + ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); // Initialize regions: for (size_t i = beg; i <= end; i++) { @@ -305,35 +932,43 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { used_words = ShenandoahHeapRegion::region_size_words(); } + r->set_affiliation(req.affiliation()); + r->set_update_watermark(r->bottom()); r->set_top(r->bottom() + used_words); - _mutator_free_bitmap.clear_bit(r->index()); + // While individual regions report their true use, all humongous regions are marked used in the free set. + _free_sets.remove_from_free_sets(r->index()); } + _heap->young_generation()->increase_affiliated_region_count(num); - // While individual regions report their true use, all humongous regions are - // marked used in the free set. - increase_used(ShenandoahHeapRegion::region_size_bytes() * num); - + size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; + _free_sets.increase_used(Mutator, total_humongous_size); + _free_sets.assert_bounds(); + req.set_actual_size(words_size); if (remainder != 0) { - // Record this remainder as allocation waste - _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); + req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder); } - - // Allocated at left/rightmost? Move the bounds appropriately. - if (beg == _mutator_leftmost || end == _mutator_rightmost) { - adjust_bounds(); - } - assert_bounds(); - - req.set_actual_size(words_size); return _heap->get_region(beg)->bottom(); } -bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) { +// Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent +// immediate trash and we'll be able to immediately recycle it. Note that we cannot recycle immediate trash if +// concurrent weak root processing is in progress. +bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); } -size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { +bool ShenandoahFreeSet::can_allocate_from(size_t idx) const { + ShenandoahHeapRegion* r = _heap->get_region(idx); + return can_allocate_from(r); +} + +size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const { + ShenandoahHeapRegion* r = _heap->get_region(idx); + return alloc_capacity(r); +} + +size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const { if (r->is_trash()) { // This would be recycled on allocation path return ShenandoahHeapRegion::region_size_bytes(); @@ -342,13 +977,12 @@ size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { } } -bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) { - return alloc_capacity(r) == 0; +bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const { + return alloc_capacity(r) > 0; } void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { if (r->is_trash()) { - _heap->decrease_used(r->used()); r->recycle(); } } @@ -367,23 +1001,38 @@ void ShenandoahFreeSet::recycle_trash() { } } -void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { +void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { size_t idx = r->index(); - assert(_mutator_free_bitmap.at(idx), "Should be in mutator view"); + assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view"); + // Note: can_allocate_from(r) means r is entirely empty assert(can_allocate_from(r), "Should not be allocated"); - _mutator_free_bitmap.clear_bit(idx); - _collector_free_bitmap.set_bit(idx); - _collector_leftmost = MIN2(idx, _collector_leftmost); - _collector_rightmost = MAX2(idx, _collector_rightmost); + size_t region_capacity = alloc_capacity(r); + _free_sets.move_to_set(idx, OldCollector, region_capacity); + _free_sets.assert_bounds(); + _heap->augment_old_evac_reserve(region_capacity); + bool transferred = _heap->generation_sizer()->transfer_to_old(1); + if (!transferred) { + log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); + _heap->generation_sizer()->force_transfer_to_old(1); + } + // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, + // to recycle trash before attempting to allocate anything in the region. +} + +void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { + size_t idx = r->index(); + + assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view"); + assert(can_allocate_from(r), "Should not be allocated"); - _capacity -= alloc_capacity(r); + size_t region_capacity = alloc_capacity(r); + _free_sets.move_to_set(idx, Collector, region_capacity); + _free_sets.assert_bounds(); - if (touches_bounds(idx)) { - adjust_bounds(); - } - assert_bounds(); + // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, + // to recycle trash before attempting to allocate anything in the region. } void ShenandoahFreeSet::clear() { @@ -392,61 +1041,377 @@ void ShenandoahFreeSet::clear() { } void ShenandoahFreeSet::clear_internal() { - _mutator_free_bitmap.clear(); - _collector_free_bitmap.clear(); - _mutator_leftmost = _max; - _mutator_rightmost = 0; - _collector_leftmost = _max; - _collector_rightmost = 0; - _capacity = 0; - _used = 0; + _free_sets.clear_all(); } -void ShenandoahFreeSet::rebuild() { - shenandoah_assert_heaplocked(); - clear(); - +// This function places all is_old() regions that have allocation capacity into the old_collector set. It places +// all other regions (not is_old()) that have allocation capacity into the mutator_set. Subsequently, we will +// move some of the mutator regions into the collector set or old_collector set with the intent of packing +// old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the +// next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap. +void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, + size_t &first_old_region, size_t &last_old_region, + size_t &old_region_count) { + first_old_region = _heap->num_regions(); + last_old_region = 0; + old_region_count = 0; + old_cset_regions = 0; + young_cset_regions = 0; for (size_t idx = 0; idx < _heap->num_regions(); idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); + if (region->is_trash()) { + // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up". + if (region->is_old()) { + old_cset_regions++; + } else { + assert(region->is_young(), "Trashed region should be old or young"); + young_cset_regions++; + } + } else if (region->is_old() && region->is_regular()) { + old_region_count++; + if (first_old_region > idx) { + first_old_region = idx; + } + last_old_region = idx; + } if (region->is_alloc_allowed() || region->is_trash()) { - assert(!region->is_cset(), "Shouldn't be adding those to the free set"); + assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set"); + assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already"); + + // Do not add regions that would almost surely fail allocation + if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue; + + if (region->is_old()) { + _free_sets.make_free(idx, OldCollector, alloc_capacity(region)); + log_debug(gc, free)( + " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set", + idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()), + byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used())); + } else { + _free_sets.make_free(idx, Mutator, alloc_capacity(region)); + log_debug(gc, free)( + " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set", + idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()), + byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used())); + } + } + } +} - // Do not add regions that would surely fail allocation - if (has_no_alloc_capacity(region)) continue; +// Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set. +// This is called from outside the heap lock. +void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) { + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t collector_empty_xfer = 0; + size_t collector_not_empty_xfer = 0; + size_t old_collector_empty_xfer = 0; + + // Process empty regions within the Collector free set + if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) { + ShenandoahHeapLocker locker(_heap->lock()); + for (size_t idx = _free_sets.leftmost_empty(Collector); + (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) { + if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) { + _free_sets.move_to_set(idx, Mutator, region_size_bytes); + max_xfer_regions--; + collector_empty_xfer += region_size_bytes; + } + } + } - _capacity += alloc_capacity(region); - assert(_used <= _capacity, "must not use more than we have"); + // Process empty regions within the OldCollector free set + size_t old_collector_regions = 0; + if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) { + ShenandoahHeapLocker locker(_heap->lock()); + for (size_t idx = _free_sets.leftmost_empty(OldCollector); + (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) { + if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) { + _free_sets.move_to_set(idx, Mutator, region_size_bytes); + max_xfer_regions--; + old_collector_empty_xfer += region_size_bytes; + old_collector_regions++; + } + } + if (old_collector_regions > 0) { + _heap->generation_sizer()->transfer_to_young(old_collector_regions); + } + } - assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already"); - _mutator_free_bitmap.set_bit(idx); + // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set + if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) { + ShenandoahHeapLocker locker(_heap->lock()); + for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) { + size_t alloc_capacity = this->alloc_capacity(idx); + if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) { + _free_sets.move_to_set(idx, Mutator, alloc_capacity); + max_xfer_regions--; + collector_not_empty_xfer += alloc_capacity; + } } } - // Evac reserve: reserve trailing space for evacuations - size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve; - size_t reserved = 0; + size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer; + size_t total_xfer = collector_xfer + old_collector_empty_xfer; + log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve (" + SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer), + byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer), + byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer)); +} - for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) { - if (reserved >= to_reserve) break; - ShenandoahHeapRegion* region = _heap->get_region(idx); - if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) { - _mutator_free_bitmap.clear_bit(idx); - _collector_free_bitmap.set_bit(idx); - size_t ac = alloc_capacity(region); - _capacity -= ac; - reserved += ac; +// Overwrite arguments to represent the amount of memory in each generation that is about to be recycled +void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, + size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { + shenandoah_assert_heaplocked(); + // This resets all state information, removing all regions from all sets. + clear(); + log_debug(gc, free)("Rebuilding FreeSet"); + + // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the + // mutator set otherwise. + find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); +} + +void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions) { + shenandoah_assert_heaplocked(); + size_t young_reserve, old_reserve; + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + size_t old_capacity = _heap->old_generation()->max_capacity(); + size_t old_available = _heap->old_generation()->available(); + size_t old_unaffiliated_regions = _heap->old_generation()->free_unaffiliated_regions(); + size_t young_capacity = _heap->young_generation()->max_capacity(); + size_t young_available = _heap->young_generation()->available(); + size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions(); + + old_unaffiliated_regions += old_cset_regions; + old_available += old_cset_regions * region_size_bytes; + young_unaffiliated_regions += young_cset_regions; + young_available += young_cset_regions * region_size_bytes; + + // Consult old-region surplus and deficit to make adjustments to current generation capacities and availability. + // The generation region transfers take place after we rebuild. + size_t old_region_surplus = _heap->get_old_region_surplus(); + size_t old_region_deficit = _heap->get_old_region_deficit(); + + if (old_region_surplus > 0) { + size_t xfer_bytes = old_region_surplus * region_size_bytes; + assert(old_region_surplus <= old_unaffiliated_regions, "Cannot transfer regions that are affiliated"); + old_capacity -= xfer_bytes; + old_available -= xfer_bytes; + old_unaffiliated_regions -= old_region_surplus; + young_capacity += xfer_bytes; + young_available += xfer_bytes; + young_unaffiliated_regions += old_region_surplus; + } else if (old_region_deficit > 0) { + size_t xfer_bytes = old_region_deficit * region_size_bytes; + assert(old_region_deficit <= young_unaffiliated_regions, "Cannot transfer regions that are affiliated"); + old_capacity += xfer_bytes; + old_available += xfer_bytes; + old_unaffiliated_regions += old_region_deficit; + young_capacity -= xfer_bytes; + young_available -= xfer_bytes; + young_unaffiliated_regions -= old_region_deficit; + } + + // Evac reserve: reserve trailing space for evacuations, with regions reserved for old evacuations placed to the right + // of regions reserved of young evacuations. + if (!_heap->mode()->is_generational()) { + young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; + old_reserve = 0; + } else { + // All allocations taken from the old collector set are performed by GC, generally using PLABs for both + // promotions and evacuations. The partition between which old memory is reserved for evacuation and + // which is reserved for promotion is enforced using thread-local variables that prescribe intentons for + // each PLAB's available memory. + if (_heap->has_evacuation_reserve_quantities()) { + // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. + young_reserve = _heap->get_young_evac_reserve(); + old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); + assert(old_reserve <= old_available, + "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, + _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available); + } else { + // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) + young_reserve = (young_capacity * ShenandoahEvacReserve) / 100; + // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions. + // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of + // unaffiliated regions. + old_reserve = old_available; + } + } + + // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector + // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust + // the reserve downward to account for this possibility. This loss is part of the reason why the original budget + // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers. + if (old_reserve > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) { + old_reserve = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes; + } + + if (young_reserve > young_unaffiliated_regions * region_size_bytes) { + young_reserve = young_unaffiliated_regions * region_size_bytes; + } + + reserve_regions(young_reserve, old_reserve); + _free_sets.establish_alloc_bias(OldCollector); + _free_sets.assert_bounds(); + log_status(); +} + +// Having placed all regions that have allocation capacity into the mutator set if they identify as is_young() +// or into the old collector set if they identify as is_old(), move some of these regions from the mutator set +// into the collector set or old collector set in order to assure that the memory available for allocations within +// the collector set is at least to_reserve, and the memory available for allocations within the old collector set +// is at least to_reserve_old. +void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) { + for (size_t i = _heap->num_regions(); i > 0; i--) { + size_t idx = i - 1; + ShenandoahHeapRegion* r = _heap->get_region(idx); + if (!_free_sets.in_free_set(idx, Mutator)) { + continue; + } + + size_t ac = alloc_capacity(r); + assert (ac > 0, "Membership in free set implies has capacity"); + assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD"); + + bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old; + bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve; + + if (!move_to_old && !move_to_young) { + // We've satisfied both to_reserve and to_reserved_old + break; + } + + if (move_to_old) { + if (r->is_trash() || !r->is_affiliated()) { + // OLD regions that have available memory are already in the old_collector free set + _free_sets.move_to_set(idx, OldCollector, ac); + log_debug(gc, free)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx); + continue; + } + } + + if (move_to_young) { + // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if + // they were entirely empty. I'm not sure I understand the rationale for that. That alternative behavior would + // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the + // ephemeral objects. It also delays aging of regions, causing promotion in place to be delayed. + _free_sets.move_to_set(idx, Collector, ac); + log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx); } } - recompute_bounds(); - assert_bounds(); + if (LogTarget(Info, gc, free)::is_enabled()) { + size_t old_reserve = _free_sets.capacity_of(OldCollector); + if (old_reserve < to_reserve_old) { + log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); + } + size_t young_reserve = _free_sets.capacity_of(Collector); + if (young_reserve < to_reserve) { + log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); + } + } } void ShenandoahFreeSet::log_status() { shenandoah_assert_heaplocked(); - LogTarget(Info, gc, ergo) lt; +#ifdef ASSERT + // Dump of the FreeSet details is only enabled if assertions are enabled + if (LogTarget(Debug, gc, free)::is_enabled()) { +#define BUFFER_SIZE 80 + size_t retired_old = 0; + size_t retired_old_humongous = 0; + size_t retired_young = 0; + size_t retired_young_humongous = 0; + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t retired_young_waste = 0; + size_t retired_old_waste = 0; + size_t consumed_collector = 0; + size_t consumed_old_collector = 0; + size_t consumed_mutator = 0; + size_t available_old = 0; + size_t available_young = 0; + size_t available_mutator = 0; + size_t available_collector = 0; + size_t available_old_collector = 0; + + char buffer[BUFFER_SIZE]; + for (uint i = 0; i < BUFFER_SIZE; i++) { + buffer[i] = '\0'; + } + log_debug(gc, free)("FreeSet map legend:" + " M:mutator_free C:collector_free O:old_collector_free" + " H:humongous ~:retired old _:retired young"); + log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " + " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " + "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s", + _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator), + _free_sets.leftmost(Collector), _free_sets.rightmost(Collector), + _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector), + _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left"); + + for (uint i = 0; i < _heap->num_regions(); i++) { + ShenandoahHeapRegion *r = _heap->get_region(i); + uint idx = i % 64; + if ((i != 0) && (idx == 0)) { + log_debug(gc, free)(" %6u: %s", i-64, buffer); + } + if (_free_sets.in_free_set(i, Mutator)) { + assert(!r->is_old(), "Old regions should not be in mutator_free set"); + size_t capacity = alloc_capacity(r); + available_mutator += capacity; + consumed_mutator += region_size_bytes - capacity; + buffer[idx] = (capacity == region_size_bytes)? 'M': 'm'; + } else if (_free_sets.in_free_set(i, Collector)) { + assert(!r->is_old(), "Old regions should not be in collector_free set"); + size_t capacity = alloc_capacity(r); + available_collector += capacity; + consumed_collector += region_size_bytes - capacity; + buffer[idx] = (capacity == region_size_bytes)? 'C': 'c'; + } else if (_free_sets.in_free_set(i, OldCollector)) { + size_t capacity = alloc_capacity(r); + available_old_collector += capacity; + consumed_old_collector += region_size_bytes - capacity; + buffer[idx] = (capacity == region_size_bytes)? 'O': 'o'; + } else if (r->is_humongous()) { + if (r->is_old()) { + buffer[idx] = 'H'; + retired_old_humongous += region_size_bytes; + } else { + buffer[idx] = 'h'; + retired_young_humongous += region_size_bytes; + } + } else { + if (r->is_old()) { + buffer[idx] = '~'; + retired_old_waste += alloc_capacity(r); + retired_old += region_size_bytes; + } else { + buffer[idx] = '_'; + retired_young_waste += alloc_capacity(r); + retired_young += region_size_bytes; + } + } + } + uint remnant = _heap->num_regions() % 64; + if (remnant > 0) { + buffer[remnant] = '\0'; + } else { + remnant = 64; + } + log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); + size_t total_young = retired_young + retired_young_humongous; + size_t total_old = retired_old + retired_old_humongous; + } +#endif + + LogTarget(Info, gc, free) lt; if (lt.is_enabled()) { ResourceMark rm; LogStream ls(lt); @@ -461,13 +1426,11 @@ void ShenandoahFreeSet::log_status() { size_t total_free = 0; size_t total_free_ext = 0; - for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { - if (is_mutator_free(idx)) { + for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) { + if (_free_sets.in_free_set(idx, Mutator)) { ShenandoahHeapRegion *r = _heap->get_region(idx); size_t free = alloc_capacity(r); - max = MAX2(max, free); - if (r->is_empty()) { total_free_ext += free; if (last_idx + 1 == idx) { @@ -478,10 +1441,8 @@ void ShenandoahFreeSet::log_status() { } else { empty_contig = 0; } - total_used += r->used(); total_free += free; - max_contig = MAX2(max_contig, empty_contig); last_idx = idx; } @@ -490,6 +1451,10 @@ void ShenandoahFreeSet::log_status() { size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); size_t free = capacity() - used(); + assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT + ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")", + total_free, capacity(), used()); + ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), @@ -506,44 +1471,69 @@ void ShenandoahFreeSet::log_status() { ls.print(SIZE_FORMAT "%% external, ", frag_ext); size_t frag_int; - if (mutator_count() > 0) { - frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes()); + if (_free_sets.count(Mutator) > 0) { + frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes()); } else { frag_int = 0; } ls.print(SIZE_FORMAT "%% internal; ", frag_int); + ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT, + byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator)); } { size_t max = 0; size_t total_free = 0; + size_t total_used = 0; - for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) { - if (is_collector_free(idx)) { + for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) { + if (_free_sets.in_free_set(idx, Collector)) { ShenandoahHeapRegion *r = _heap->get_region(idx); size_t free = alloc_capacity(r); max = MAX2(max, free); total_free += free; + total_used += r->used(); } } + ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), + byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); + } - ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s", + if (_heap->mode()->is_generational()) { + size_t max = 0; + size_t total_free = 0; + size_t total_used = 0; + + for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) { + if (_free_sets.in_free_set(idx, OldCollector)) { + ShenandoahHeapRegion *r = _heap->get_region(idx); + size_t free = alloc_capacity(r); + max = MAX2(max, free); + total_free += free; + total_used += r->used(); + } + } + ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), - byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), + byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); } } } HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { shenandoah_assert_heaplocked(); - assert_bounds(); + // Allocation request is known to satisfy all memory budgeting constraints. if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { switch (req.type()) { case ShenandoahAllocRequest::_alloc_shared: case ShenandoahAllocRequest::_alloc_shared_gc: in_new_region = true; return allocate_contiguous(req); + case ShenandoahAllocRequest::_alloc_plab: case ShenandoahAllocRequest::_alloc_gclab: case ShenandoahAllocRequest::_alloc_tlab: in_new_region = false; @@ -562,8 +1552,8 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ size_t ShenandoahFreeSet::unsafe_peek_free() const { // Deliberately not locked, this method is unsafe when free set is modified. - for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { - if (index < _max && is_mutator_free(index)) { + for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { + if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) { ShenandoahHeapRegion* r = _heap->get_region(index); if (r->free() >= MinTLABSize) { return r->free(); @@ -576,18 +1566,26 @@ size_t ShenandoahFreeSet::unsafe_peek_free() const { } void ShenandoahFreeSet::print_on(outputStream* out) const { - out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count()); - for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { - if (is_mutator_free(index)) { + out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator)); + for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { + if (_free_sets.in_free_set(index, Mutator)) { _heap->get_region(index)->print_on(out); } } - out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count()); - for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) { - if (is_collector_free(index)) { + out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector)); + for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) { + if (_free_sets.in_free_set(index, Collector)) { _heap->get_region(index)->print_on(out); } } + if (_heap->mode()->is_generational()) { + out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector)); + for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) { + if (_free_sets.in_free_set(index, OldCollector)) { + _heap->get_region(index)->print_on(out); + } + } + } } /* @@ -616,8 +1614,8 @@ double ShenandoahFreeSet::internal_fragmentation() { double linear = 0; int count = 0; - for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { - if (is_mutator_free(index)) { + for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { + if (_free_sets.in_free_set(index, Mutator)) { ShenandoahHeapRegion* r = _heap->get_region(index); size_t used = r->used(); squared += used * used; @@ -654,8 +1652,8 @@ double ShenandoahFreeSet::external_fragmentation() { size_t free = 0; - for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { - if (is_mutator_free(index)) { + for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { + if (_free_sets.in_free_set(index, Mutator)) { ShenandoahHeapRegion* r = _heap->get_region(index); if (r->is_empty()) { free += ShenandoahHeapRegion::region_size_bytes(); @@ -680,30 +1678,3 @@ double ShenandoahFreeSet::external_fragmentation() { } } -#ifdef ASSERT -void ShenandoahFreeSet::assert_bounds() const { - // Performance invariants. Failing these would not break the free set, but performance - // would suffer. - assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max); - assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max); - - assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost); - assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost); - - size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0); - size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1); - assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost); - assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost); - - assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max); - assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max); - - assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost); - assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost); - - beg_off = _collector_free_bitmap.find_first_set_bit(0); - end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1); - assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost); - assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost); -} -#endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 634adfb63e0..414377c4ca8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -1,5 +1,7 @@ + /* * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,63 +30,182 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" +enum ShenandoahFreeMemoryType : uint8_t { + NotFree, + Mutator, + Collector, + OldCollector, + NumFreeSets +}; + +class ShenandoahSetsOfFree { + +private: + size_t _max; // The maximum number of heap regions + ShenandoahFreeSet* _free_set; + size_t _region_size_bytes; + ShenandoahFreeMemoryType* _membership; + size_t _leftmosts[NumFreeSets]; + size_t _rightmosts[NumFreeSets]; + size_t _leftmosts_empty[NumFreeSets]; + size_t _rightmosts_empty[NumFreeSets]; + size_t _capacity_of[NumFreeSets]; + size_t _used_by[NumFreeSets]; + bool _left_to_right_bias[NumFreeSets]; + size_t _region_counts[NumFreeSets]; + + inline void shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx); + inline void expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t capacity); + + // Restore all state variables to initial default state. + void clear_internal(); + +public: + ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set); + ~ShenandoahSetsOfFree(); + + // Make all regions NotFree and reset all bounds + void clear_all(); + + // Remove or retire region idx from all free sets. Requires that idx is in a free set. This does not affect capacity. + void remove_from_free_sets(size_t idx); + + // Place region idx into free set which_set. Requires that idx is currently NotFree. + void make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity); + + // Place region idx into free set new_set. Requires that idx is currently not NotFree. + void move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity); + + // Returns the ShenandoahFreeMemoryType affiliation of region idx, or NotFree if this region is not currently free. This does + // not enforce that free_set membership implies allocation capacity. + inline ShenandoahFreeMemoryType membership(size_t idx) const; + + // Returns true iff region idx is in the test_set free_set. Before returning true, asserts that the free + // set is not empty. Requires that test_set != NotFree or NumFreeSets. + inline bool in_free_set(size_t idx, ShenandoahFreeMemoryType which_set) const; + + // The following four methods return the left-most and right-most bounds on ranges of regions representing + // the requested set. The _empty variants represent bounds on the range that holds completely empty + // regions, which are required for humongous allocations and desired for "very large" allocations. A + // return value of -1 from leftmost() or leftmost_empty() denotes that the corresponding set is empty. + // In other words: + // if the requested which_set is empty: + // leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0 + // otherwise, expect the following: + // 0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max + inline size_t leftmost(ShenandoahFreeMemoryType which_set) const; + inline size_t rightmost(ShenandoahFreeMemoryType which_set) const; + size_t leftmost_empty(ShenandoahFreeMemoryType which_set); + size_t rightmost_empty(ShenandoahFreeMemoryType which_set); + + inline bool is_empty(ShenandoahFreeMemoryType which_set) const; + + inline void increase_used(ShenandoahFreeMemoryType which_set, size_t bytes); + + inline size_t capacity_of(ShenandoahFreeMemoryType which_set) const { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + return _capacity_of[which_set]; + } + + inline size_t used_by(ShenandoahFreeMemoryType which_set) const { + assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); + return _used_by[which_set]; + } + + inline size_t max() const { return _max; } + + inline size_t count(ShenandoahFreeMemoryType which_set) const { return _region_counts[which_set]; } + + // Return true iff regions for allocation from this set should be peformed left to right. Otherwise, allocate + // from right to left. + inline bool alloc_from_left_bias(ShenandoahFreeMemoryType which_set); + + // Determine whether we prefer to allocate from left to right or from right to left for this free-set. + void establish_alloc_bias(ShenandoahFreeMemoryType which_set); + + // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets. + // Valid bounds honor all of the following (where max is the number of heap regions): + // if the set is empty, leftmost equals max and rightmost equals 0 + // Otherwise (the set is not empty): + // 0 <= leftmost < max and 0 <= rightmost < max + // the region at leftmost is in the set + // the region at rightmost is in the set + // rightmost >= leftmost + // for every idx that is in the set { + // idx >= leftmost && + // idx <= rightmost + // } + // if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0 + // Otherwise (the region has empty regions): + // 0 <= lefmost_empty < max and 0 <= rightmost_empty < max + // rightmost_empty >= leftmost_empty + // for every idx that is in the set and is empty { + // idx >= leftmost && + // idx <= rightmost + // } + void assert_bounds() NOT_DEBUG_RETURN; +}; + class ShenandoahFreeSet : public CHeapObj { private: ShenandoahHeap* const _heap; - CHeapBitMap _mutator_free_bitmap; - CHeapBitMap _collector_free_bitmap; - size_t _max; + ShenandoahSetsOfFree _free_sets; - // Left-most and right-most region indexes. There are no free regions outside - // of [left-most; right-most] index intervals - size_t _mutator_leftmost, _mutator_rightmost; - size_t _collector_leftmost, _collector_rightmost; + HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); - size_t _capacity; - size_t _used; + HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r); - void assert_bounds() const NOT_DEBUG_RETURN; + // Satisfy young-generation or single-generation collector allocation request req by finding memory that matches + // affiliation, which either equals req.affiliation or FREE. We know req.is_young(). + HeapWord* allocate_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region); - bool is_mutator_free(size_t idx) const; - bool is_collector_free(size_t idx) const; + // Satisfy allocation request req by finding memory that matches affiliation, which either equals req.affiliation + // or FREE. We know req.is_old(). + HeapWord* allocate_old_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region); - HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); + // While holding the heap lock, allocate memory for a single object which is to be entirely contained + // within a single HeapRegion as characterized by req. The req.size() value is known to be less than or + // equal to ShenandoahHeapRegion::humongous_threshold_words(). The caller of allocate_single is responsible + // for registering the resulting object and setting the remembered set card values as appropriate. The + // most common case is that we are allocating a PLAB in which case object registering and card dirtying + // is managed after the PLAB is divided into individual objects. HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region); HeapWord* allocate_contiguous(ShenandoahAllocRequest& req); void flip_to_gc(ShenandoahHeapRegion* r); + void flip_to_old_gc(ShenandoahHeapRegion* r); - void recompute_bounds(); - void adjust_bounds(); - bool touches_bounds(size_t num) const; - - void increase_used(size_t amount); void clear_internal(); - size_t collector_count() const { return _collector_free_bitmap.count_one_bits(); } - size_t mutator_count() const { return _mutator_free_bitmap.count_one_bits(); } - void try_recycle_trashed(ShenandoahHeapRegion *r); - bool can_allocate_from(ShenandoahHeapRegion *r); - size_t alloc_capacity(ShenandoahHeapRegion *r); - bool has_no_alloc_capacity(ShenandoahHeapRegion *r); + bool can_allocate_from(ShenandoahHeapRegion *r) const; + bool can_allocate_from(size_t idx) const; + bool has_alloc_capacity(ShenandoahHeapRegion *r) const; public: ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); + size_t alloc_capacity(ShenandoahHeapRegion *r) const; + size_t alloc_capacity(size_t idx) const; + void clear(); - void rebuild(); + void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, + size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); + void rebuild(size_t young_cset_regions, size_t old_cset_regions); + void move_collector_sets_to_mutator(size_t cset_regions); + + void add_old_collector_free_region(ShenandoahHeapRegion* region); void recycle_trash(); void log_status(); - size_t capacity() const { return _capacity; } - size_t used() const { return _used; } - size_t available() const { - assert(_used <= _capacity, "must use less than capacity"); - return _capacity - _used; + inline size_t capacity() const { return _free_sets.capacity_of(Mutator); } + inline size_t used() const { return _free_sets.used_by(Mutator); } + inline size_t available() const { + assert(used() <= capacity(), "must use less than capacity"); + return capacity() - used(); } HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); @@ -94,6 +215,10 @@ class ShenandoahFreeSet : public CHeapObj { double external_fragmentation(); void print_on(outputStream* out) const; + + void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, + size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); + void reserve_regions(size_t young_reserve, size_t old_reserve); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 4cef5378d30..fa92b956876 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,8 +34,10 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahFullGC.hpp" +#include "gc/shenandoah/shenandoahGlobalGeneration.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" @@ -43,6 +46,7 @@ #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahMetrics.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" @@ -51,6 +55,7 @@ #include "gc/shenandoah/shenandoahVerifier.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "memory/metaspaceUtils.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.inline.hpp" @@ -62,6 +67,69 @@ #include "utilities/events.hpp" #include "utilities/growableArray.hpp" +// After Full GC is done, reconstruct the remembered set by iterating over OLD regions, +// registering all objects between bottom() and top(), and setting remembered set cards to +// DIRTY if they hold interesting pointers. +class ShenandoahReconstructRememberedSetTask : public WorkerTask { +private: + ShenandoahRegionIterator _regions; + +public: + ShenandoahReconstructRememberedSetTask() : + WorkerTask("Shenandoah Reset Bitmap") { } + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahHeapRegion* r = _regions.next(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + RememberedScanner* scanner = heap->card_scan(); + ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers; + + while (r != nullptr) { + if (r->is_old() && r->is_active()) { + HeapWord* obj_addr = r->bottom(); + if (r->is_humongous_start()) { + // First, clear the remembered set + oop obj = cast_to_oop(obj_addr); + size_t size = obj->size(); + + // First, clear the remembered set for all spanned humongous regions + size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words(); + scanner->reset_remset(r->bottom(), region_span); + size_t region_index = r->index(); + ShenandoahHeapRegion* humongous_region = heap->get_region(region_index); + while (num_regions-- != 0) { + scanner->reset_object_range(humongous_region->bottom(), humongous_region->end()); + region_index++; + humongous_region = heap->get_region(region_index); + } + + // Then register the humongous object and DIRTY relevant remembered set cards + scanner->register_object_without_lock(obj_addr); + obj->oop_iterate(&dirty_cards_for_interesting_pointers); + } else if (!r->is_humongous()) { + // First, clear the remembered set + scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words()); + scanner->reset_object_range(r->bottom(), r->end()); + + // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards + HeapWord* t = r->top(); + while (obj_addr < t) { + oop obj = cast_to_oop(obj_addr); + size_t size = obj->size(); + scanner->register_object_without_lock(obj_addr); + obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers); + } + } // else, ignore humongous continuation region + } + // else, this region is FREE or YOUNG or inactive and we can ignore it. + // TODO: Assert this. + r = _regions.next(); + } + } +}; + ShenandoahFullGC::ShenandoahFullGC() : _gc_timer(ShenandoahHeap::heap()->gc_timer()), _preserved_marks(new PreservedMarksSet(true)) {} @@ -99,6 +167,7 @@ void ShenandoahFullGC::entry_full(GCCause::Cause cause) { } void ShenandoahFullGC::op_full(GCCause::Cause cause) { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); ShenandoahMetricsSnapshot metrics; metrics.snap_before(); @@ -106,7 +175,33 @@ void ShenandoahFullGC::op_full(GCCause::Cause cause) { do_it(cause); metrics.snap_after(); - + if (heap->mode()->is_generational()) { + // Full GC should reset time since last gc for young and old heuristics + heap->young_generation()->heuristics()->record_cycle_end(); + heap->old_generation()->heuristics()->record_cycle_end(); + + heap->mmu_tracker()->record_full(GCId::current()); + heap->log_heap_status("At end of Full GC"); + + assert(heap->old_generation()->state() == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP, + "After full GC, old generation should be waiting for bootstrap."); + + // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are + // made valid by the time Full GC completes. + assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->max_capacity(), + "Old generation affiliated regions must be less than capacity"); + assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->max_capacity(), + "Young generation affiliated regions must be less than capacity"); + + assert((heap->young_generation()->used() + heap->young_generation()->get_humongous_waste()) + <= heap->young_generation()->used_regions_size(), "Young consumed can be no larger than span of affiliated regions"); + assert((heap->old_generation()->used() + heap->old_generation()->get_humongous_waste()) + <= heap->old_generation()->used_regions_size(), "Old consumed can be no larger than span of affiliated regions"); + + // Establish baseline for next old-has-grown trigger. + heap->old_generation()->set_live_bytes_after_last_mark(heap->old_generation()->used() + + heap->old_generation()->get_humongous_waste()); + } if (metrics.is_good_progress()) { ShenandoahHeap::heap()->notify_gc_progress(); } else { @@ -114,10 +209,26 @@ void ShenandoahFullGC::op_full(GCCause::Cause cause) { // progress, and it can finally fail. ShenandoahHeap::heap()->notify_gc_no_progress(); } + + // Regardless if progress was made, we record that we completed a "successful" full GC. + heap->global_generation()->heuristics()->record_success_full(); + heap->shenandoah_policy()->record_success_full(); } void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { ShenandoahHeap* heap = ShenandoahHeap::heap(); + // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL. + heap->set_gc_generation(heap->global_generation()); + + if (heap->mode()->is_generational()) { + // No need for old_gen->increase_used() as this was done when plabs were allocated. + heap->set_young_evac_reserve(0); + heap->set_old_evac_reserve(0); + heap->set_promoted_reserve(0); + + // Full GC supersedes any marking or coalescing in old generation. + heap->cancel_old_gc(); + } if (ShenandoahVerify) { heap->verifier()->verify_before_fullgc(); @@ -161,10 +272,9 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { } assert(!heap->is_update_refs_in_progress(), "sanity"); - // b. Cancel concurrent mark, if in progress + // b. Cancel all concurrent marks, if in progress if (heap->is_concurrent_mark_in_progress()) { - ShenandoahConcurrentGC::cancel(); - heap->set_concurrent_mark_in_progress(false); + heap->cancel_concurrent_mark(); } assert(!heap->is_concurrent_mark_in_progress(), "sanity"); @@ -174,17 +284,26 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { } // d. Reset the bitmaps for new marking - heap->reset_mark_bitmap(); + heap->global_generation()->reset_mark_bitmap(); assert(heap->marking_context()->is_bitmap_clear(), "sanity"); - assert(!heap->marking_context()->is_complete(), "sanity"); + assert(!heap->global_generation()->is_mark_complete(), "sanity"); // e. Abandon reference discovery and clear all discovered references. - ShenandoahReferenceProcessor* rp = heap->ref_processor(); + ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); rp->abandon_partial_discovery(); // f. Sync pinned region status from the CP marks heap->sync_pinned_region_status(); + if (heap->mode()->is_generational()) { + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->get_top_before_promote() != nullptr) { + r->restore_top_before_promote(); + } + } + } + // The rest of prologue: _preserved_marks->init(heap->workers()->active_workers()); @@ -192,6 +311,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { } if (UseTLAB) { + // TODO: Do we need to explicitly retire PLABs? heap->gclabs_retire(ResizeTLAB); heap->tlabs_retire(ResizeTLAB); } @@ -228,12 +348,21 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { phase3_update_references(); phase4_compact_objects(worker_slices); + + phase5_epilog(); } { // Epilogue + // TODO: Merge with phase5_epilog? _preserved_marks->restore(heap->workers()); _preserved_marks->reclaim(); + + if (heap->mode()->is_generational()) { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set); + ShenandoahReconstructRememberedSetTask task; + heap->workers()->run_task(&task); + } } // Resize metaspace @@ -252,6 +381,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { heap->verifier()->verify_after_fullgc(); } + // Humongous regions are promoted on demand and are accounted for by normal Full GC mechanisms. if (VerifyAfterGC) { Universe::verify(); } @@ -270,9 +400,13 @@ class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} void heap_region_do(ShenandoahHeapRegion *r) { - _ctx->capture_top_at_mark_start(r); - r->clear_live_data(); + if (r->affiliation() != FREE) { + _ctx->capture_top_at_mark_start(r); + r->clear_live_data(); + } } + + bool is_thread_safe() { return true; } }; void ShenandoahFullGC::phase1_mark_heap() { @@ -282,19 +416,263 @@ void ShenandoahFullGC::phase1_mark_heap() { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahPrepareForMarkClosure cl; - heap->heap_region_iterate(&cl); + heap->parallel_heap_region_iterate(&cl); - heap->set_unload_classes(heap->heuristics()->can_unload_classes()); + heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes()); - ShenandoahReferenceProcessor* rp = heap->ref_processor(); + ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); // enable ("weak") refs discovery rp->set_soft_reference_policy(true); // forcefully purge all soft references - ShenandoahSTWMark mark(true /*full_gc*/); + ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/); mark.mark(); heap->parallel_cleaning(true /* full_gc */); + + size_t live_bytes_in_old = 0; + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + if (r->is_old()) { + live_bytes_in_old += r->get_live_data_bytes(); + } + } + log_info(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old)); } +class ShenandoahPrepareForCompactionTask : public WorkerTask { +private: + PreservedMarksSet* const _preserved_marks; + ShenandoahHeap* const _heap; + ShenandoahHeapRegionSet** const _worker_slices; + size_t const _num_workers; + +public: + ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, + ShenandoahHeapRegionSet **worker_slices, + size_t num_workers); + + static bool is_candidate_region(ShenandoahHeapRegion* r) { + // Empty region: get it into the slice to defragment the slice itself. + // We could have skipped this without violating correctness, but we really + // want to compact all live regions to the start of the heap, which sometimes + // means moving them into the fully empty regions. + if (r->is_empty()) return true; + + // Can move the region, and this is not the humongous region. Humongous + // moves are special cased here, because their moves are handled separately. + return r->is_stw_move_allowed() && !r->is_humongous(); + } + + void work(uint worker_id); +}; + +class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure { +private: + PreservedMarks* const _preserved_marks; + ShenandoahHeap* const _heap; + uint _tenuring_threshold; + + // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's + // compaction efforts. The worker thread that drives these efforts adds compacted regions to this list if the + // region has not been compacted onto itself. + GrowableArray& _empty_regions; + int _empty_regions_pos; + ShenandoahHeapRegion* _old_to_region; + ShenandoahHeapRegion* _young_to_region; + ShenandoahHeapRegion* _from_region; + ShenandoahAffiliation _from_affiliation; + HeapWord* _old_compact_point; + HeapWord* _young_compact_point; + uint _worker_id; + +public: + ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, + GrowableArray& empty_regions, + ShenandoahHeapRegion* old_to_region, + ShenandoahHeapRegion* young_to_region, uint worker_id) : + _preserved_marks(preserved_marks), + _heap(ShenandoahHeap::heap()), + _tenuring_threshold(0), + _empty_regions(empty_regions), + _empty_regions_pos(0), + _old_to_region(old_to_region), + _young_to_region(young_to_region), + _from_region(nullptr), + _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr), + _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr), + _worker_id(worker_id) { + if (_heap->mode()->is_generational()) { + _tenuring_threshold = _heap->age_census()->tenuring_threshold(); + } + } + + void set_from_region(ShenandoahHeapRegion* from_region) { + _from_region = from_region; + _from_affiliation = from_region->affiliation(); + if (_from_region->has_live()) { + if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) { + if (_old_to_region == nullptr) { + _old_to_region = from_region; + _old_compact_point = from_region->bottom(); + } + } else { + assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG"); + if (_young_to_region == nullptr) { + _young_to_region = from_region; + _young_compact_point = from_region->bottom(); + } + } + } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies + } + + void finish() { + finish_old_region(); + finish_young_region(); + } + + void finish_old_region() { + if (_old_to_region != nullptr) { + log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u", + _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id); + _old_to_region->set_new_top(_old_compact_point); + _old_to_region = nullptr; + } + } + + void finish_young_region() { + if (_young_to_region != nullptr) { + log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT, + _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom()); + _young_to_region->set_new_top(_young_compact_point); + _young_to_region = nullptr; + } + } + + bool is_compact_same_region() { + return (_from_region == _old_to_region) || (_from_region == _young_to_region); + } + + int empty_regions_pos() { + return _empty_regions_pos; + } + + void do_object(oop p) { + assert(_from_region != nullptr, "must set before work"); + assert((_from_region->bottom() <= cast_from_oop(p)) && (cast_from_oop(p) < _from_region->top()), + "Object must reside in _from_region"); + assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); + assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); + + size_t obj_size = p->size(); + uint from_region_age = _from_region->age(); + uint object_age = p->age(); + + bool promote_object = false; + if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) && + (from_region_age + object_age >= _tenuring_threshold)) { + if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) { + finish_old_region(); + _old_to_region = nullptr; + } + if (_old_to_region == nullptr) { + if (_empty_regions_pos < _empty_regions.length()) { + ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos); + _empty_regions_pos++; + new_to_region->set_affiliation(OLD_GENERATION); + _old_to_region = new_to_region; + _old_compact_point = _old_to_region->bottom(); + promote_object = true; + } + // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so + // we leave promote_object as false, deferring the promotion. + } else { + promote_object = true; + } + } + + if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) { + assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region"); + if (_old_compact_point + obj_size > _old_to_region->end()) { + ShenandoahHeapRegion* new_to_region; + + log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT + ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(), + p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end())); + + // Object does not fit. Get a new _old_to_region. + finish_old_region(); + if (_empty_regions_pos < _empty_regions.length()) { + new_to_region = _empty_regions.at(_empty_regions_pos); + _empty_regions_pos++; + new_to_region->set_affiliation(OLD_GENERATION); + } else { + // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct + // from _from_region. That's because there is always room for _from_region to be compacted into itself. + // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. + new_to_region = _from_region; + } + + assert(new_to_region != _old_to_region, "must not reuse same OLD to-region"); + assert(new_to_region != nullptr, "must not be nullptr"); + _old_to_region = new_to_region; + _old_compact_point = _old_to_region->bottom(); + } + + // Object fits into current region, record new location: + assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit"); + shenandoah_assert_not_forwarded(nullptr, p); + _preserved_marks->push_if_necessary(p, p->mark()); + p->forward_to(cast_to_oop(_old_compact_point)); + _old_compact_point += obj_size; + } else { + assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, + "_from_region must be OLD_GENERATION or YOUNG_GENERATION"); + assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region"); + + // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve + // tenuring progress. + if (_heap->is_aging_cycle()) { + _heap->increase_object_age(p, from_region_age + 1); + } else { + _heap->increase_object_age(p, from_region_age); + } + + if (_young_compact_point + obj_size > _young_to_region->end()) { + ShenandoahHeapRegion* new_to_region; + + log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT + ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(), + p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end())); + + // Object does not fit. Get a new _young_to_region. + finish_young_region(); + if (_empty_regions_pos < _empty_regions.length()) { + new_to_region = _empty_regions.at(_empty_regions_pos); + _empty_regions_pos++; + new_to_region->set_affiliation(YOUNG_GENERATION); + } else { + // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct + // from _from_region. That's because there is always room for _from_region to be compacted into itself. + // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. + new_to_region = _from_region; + } + + assert(new_to_region != _young_to_region, "must not reuse same OLD to-region"); + assert(new_to_region != nullptr, "must not be nullptr"); + _young_to_region = new_to_region; + _young_compact_point = _young_to_region->bottom(); + } + + // Object fits into current region, record new location: + assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit"); + shenandoah_assert_not_forwarded(nullptr, p); + _preserved_marks->push_if_necessary(p, p->mark()); + p->forward_to(cast_to_oop(_young_compact_point)); + _young_compact_point += obj_size; + } + } +}; + + class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { private: PreservedMarks* const _preserved_marks; @@ -323,6 +701,7 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { void finish_region() { assert(_to_region != nullptr, "should not happen"); + assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure"); _to_region->set_new_top(_compact_point); } @@ -368,52 +747,64 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { } }; -class ShenandoahPrepareForCompactionTask : public WorkerTask { -private: - PreservedMarksSet* const _preserved_marks; - ShenandoahHeap* const _heap; - ShenandoahHeapRegionSet** const _worker_slices; -public: - ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : +ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, + ShenandoahHeapRegionSet **worker_slices, + size_t num_workers) : WorkerTask("Shenandoah Prepare For Compaction"), - _preserved_marks(preserved_marks), - _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { + _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()), + _worker_slices(worker_slices), _num_workers(num_workers) { } + + +void ShenandoahPrepareForCompactionTask::work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; + ShenandoahHeapRegionSetIterator it(slice); + ShenandoahHeapRegion* from_region = it.next(); + // No work? + if (from_region == nullptr) { + return; } - static bool is_candidate_region(ShenandoahHeapRegion* r) { - // Empty region: get it into the slice to defragment the slice itself. - // We could have skipped this without violating correctness, but we really - // want to compact all live regions to the start of the heap, which sometimes - // means moving them into the fully empty regions. - if (r->is_empty()) return true; + // Sliding compaction. Walk all regions in the slice, and compact them. + // Remember empty regions and reuse them as needed. + ResourceMark rm; - // Can move the region, and this is not the humongous region. Humongous - // moves are special cased here, because their moves are handled separately. - return r->is_stw_move_allowed() && !r->is_humongous(); - } + GrowableArray empty_regions((int)_heap->num_regions()); - void work(uint worker_id) { - ShenandoahParallelWorkerSession worker_session(worker_id); - ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; - ShenandoahHeapRegionSetIterator it(slice); - ShenandoahHeapRegion* from_region = it.next(); - // No work? - if (from_region == nullptr) { - return; + if (_heap->mode()->is_generational()) { + ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr; + ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr; + ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id), + empty_regions, + old_to_region, young_to_region, + worker_id); + while (from_region != nullptr) { + assert(is_candidate_region(from_region), "Sanity"); + log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live", + worker_id, from_region->affiliation_name(), + from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have"); + cl.set_from_region(from_region); + if (from_region->has_live()) { + _heap->marked_object_iterate(from_region, &cl); + } + // Compacted the region to somewhere else? From-region is empty then. + if (!cl.is_compact_same_region()) { + empty_regions.append(from_region); + } + from_region = it.next(); } + cl.finish(); - // Sliding compaction. Walk all regions in the slice, and compact them. - // Remember empty regions and reuse them as needed. - ResourceMark rm; - - GrowableArray empty_regions((int)_heap->num_regions()); - + // Mark all remaining regions as empty + for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { + ShenandoahHeapRegion* r = empty_regions.at(pos); + r->set_new_top(r->bottom()); + } + } else { ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); - while (from_region != nullptr) { assert(is_candidate_region(from_region), "Sanity"); - cl.set_from_region(from_region); if (from_region->has_live()) { _heap->marked_object_iterate(from_region, &cl); @@ -433,7 +824,7 @@ class ShenandoahPrepareForCompactionTask : public WorkerTask { r->set_new_top(r->bottom()); } } -}; +} void ShenandoahFullGC::calculate_target_humongous_objects() { ShenandoahHeap* heap = ShenandoahHeap::heap(); @@ -452,6 +843,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { size_t to_begin = heap->num_regions(); size_t to_end = heap->num_regions(); + log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end); for (size_t c = heap->num_regions(); c > 0; c--) { ShenandoahHeapRegion *r = heap->get_region(c - 1); if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { @@ -494,6 +886,7 @@ class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { r->recycle(); } if (r->is_cset()) { + // Leave affiliation unchanged r->make_regular_bypass(); } if (r->is_empty_uncommitted()) { @@ -518,22 +911,31 @@ class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} void heap_region_do(ShenandoahHeapRegion* r) { + if (!r->is_affiliated()) { + // Ignore free regions + // TODO: change iterators so they do not process FREE regions. + return; + } + if (r->is_humongous_start()) { oop humongous_obj = cast_to_oop(r->bottom()); if (!_ctx->is_marked(humongous_obj)) { assert(!r->has_live(), - "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); + "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live", + r->affiliation_name(), r->index()); + log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index()); _heap->trash_humongous_region_at(r); } else { assert(r->has_live(), - "Region " SIZE_FORMAT " should have live", r->index()); + "Humongous Start %s Region " SIZE_FORMAT " should have live", r->affiliation_name(), r->index()); } } else if (r->is_humongous_continuation()) { // If we hit continuation, the non-live humongous starts should have been trashed already assert(r->humongous_start_region()->has_live(), - "Region " SIZE_FORMAT " should have live", r->index()); + "Humongous Continuation %s Region " SIZE_FORMAT " should have live", r->affiliation_name(), r->index()); } else if (r->is_regular()) { if (!r->has_live()) { + log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index()); r->make_trash_immediate(); } } @@ -682,6 +1084,11 @@ void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices #endif } +// TODO: +// Consider compacting old-gen objects toward the high end of memory and young-gen objects towards the low-end +// of memory. As currently implemented, all regions are compacted toward the low-end of memory. This creates more +// fragmentation of the heap, because old-gen regions get scattered among low-address regions such that it becomes +// more difficult to find contiguous regions for humongous objects. void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); @@ -709,7 +1116,10 @@ void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet distribute_slices(worker_slices); - ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); + size_t num_workers = heap->max_workers(); + + ResourceMark rm; + ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers); heap->workers()->run_task(&task); } @@ -783,6 +1193,13 @@ class ShenandoahAdjustPointersTask : public WorkerTask { if (!r->is_humongous_continuation() && r->has_live()) { _heap->marked_object_iterate(r, &obj_cl); } + if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) { + // Pinned regions are not compacted so they may still hold unmarked objects with + // reference to reclaimed memory. Remembered set scanning will crash if it attempts + // to iterate the oops in these objects. + r->begin_preemptible_coalesce_and_fill(); + r->oop_fill_and_coalesce_without_cancel(); + } r = _regions.next(); } } @@ -883,13 +1300,40 @@ class ShenandoahCompactObjectsTask : public WorkerTask { } }; +static void account_for_region(ShenandoahHeapRegion* r, size_t ®ion_count, size_t ®ion_usage, size_t &humongous_waste) { + region_count++; + region_usage += r->used(); + if (r->is_humongous_start()) { + // For each humongous object, we take this path once regardless of how many regions it spans. + HeapWord* obj_addr = r->bottom(); + oop obj = cast_to_oop(obj_addr); + size_t word_size = obj->size(); + size_t region_size_words = ShenandoahHeapRegion::region_size_words(); + size_t overreach = word_size % region_size_words; + if (overreach != 0) { + humongous_waste += (region_size_words - overreach) * HeapWordSize; + } + // else, this humongous object aligns exactly on region size, so no waste. + } +} + class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { private: ShenandoahHeap* const _heap; - size_t _live; + bool _is_generational; + size_t _young_regions, _young_usage, _young_humongous_waste; + size_t _old_regions, _old_usage, _old_humongous_waste; public: - ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { + ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), + _is_generational(_heap->mode()->is_generational()), + _young_regions(0), + _young_usage(0), + _young_humongous_waste(0), + _old_regions(0), + _old_usage(0), + _old_humongous_waste(0) + { _heap->free_set()->clear(); } @@ -909,6 +1353,10 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { // Make empty regions that have been allocated into regular if (r->is_empty() && live > 0) { + if (!_is_generational) { + r->make_young_maybe(); + } + // else, generational mode compaction has already established affiliation. r->make_regular_bypass(); if (ZapUnusedHeapArea) { SpaceMangler::mangle_region(MemRegion(r->top(), r->end())); @@ -924,15 +1372,32 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { if (r->is_trash()) { live = 0; r->recycle(); + } else { + if (r->is_old()) { + account_for_region(r, _old_regions, _old_usage, _old_humongous_waste); + } else if (r->is_young()) { + account_for_region(r, _young_regions, _young_usage, _young_humongous_waste); + } } - r->set_live_data(live); r->reset_alloc_metadata(); - _live += live; } - size_t get_live() { - return _live; + void update_generation_usage() { + if (_is_generational) { + _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste); + _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste); + } else { + assert(_old_regions == 0, "Old regions only expected in generational mode"); + assert(_old_usage == 0, "Old usage only expected in generational mode"); + assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode"); + } + + // In generational mode, global usage should be the sum of young and old. This is also true + // for non-generational modes except that there are no old regions. + _heap->global_generation()->establish_usage(_old_regions + _young_regions, + _old_usage + _young_usage, + _old_humongous_waste + _young_humongous_waste); } }; @@ -941,7 +1406,7 @@ void ShenandoahFullGC::compact_humongous_objects() { // // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, // humongous regions are already compacted, and do not require further moves, which alleviates - // sliding costs. We may consider doing this in parallel in future. + // sliding costs. We may consider doing this in parallel in the future. ShenandoahHeap* heap = ShenandoahHeap::heap(); @@ -963,15 +1428,22 @@ void ShenandoahFullGC::compact_humongous_objects() { assert(old_start != new_start, "must be real move"); assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); - Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size); - ContinuationGCSupport::relativize_stack_chunk(cast_to_oop(r->bottom())); + ContinuationGCSupport::relativize_stack_chunk(cast_to_oop(heap->get_region(old_start)->bottom())); + log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, + old_start, new_start); + + Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), + heap->get_region(new_start)->bottom(), + words_size); oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); new_obj->init_mark(); { + ShenandoahAffiliation original_affiliation = r->affiliation(); for (size_t c = old_start; c <= old_end; c++) { ShenandoahHeapRegion* r = heap->get_region(c); + // Leave humongous region affiliation unchanged. r->make_regular_bypass(); r->set_top(r->bottom()); } @@ -979,9 +1451,9 @@ void ShenandoahFullGC::compact_humongous_objects() { for (size_t c = new_start; c <= new_end; c++) { ShenandoahHeapRegion* r = heap->get_region(c); if (c == new_start) { - r->make_humongous_start_bypass(); + r->make_humongous_start_bypass(original_affiliation); } else { - r->make_humongous_cont_bypass(); + r->make_humongous_cont_bypass(original_affiliation); } // Trailing region may be non-full, record the remainder there @@ -1047,6 +1519,11 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); compact_humongous_objects(); } +} + +void ShenandoahFullGC::phase5_epilog() { + GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); + ShenandoahHeap* heap = ShenandoahHeap::heap(); // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer // and must ensure the bitmap is in sync. @@ -1059,14 +1536,87 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s // Bring regions in proper states after the collection, and set heap properties. { ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); - ShenandoahPostCompactClosure post_compact; heap->heap_region_iterate(&post_compact); - heap->set_used(post_compact.get_live()); + post_compact.update_generation_usage(); + if (heap->mode()->is_generational()) { + size_t old_usage = heap->old_generation()->used_regions_size(); + size_t old_capacity = heap->old_generation()->max_capacity(); + + assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must aligh with region size"); + assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must aligh with region size"); + + if (old_capacity > old_usage) { + size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes(); + heap->generation_sizer()->transfer_to_young(excess_old_regions); + } else if (old_capacity < old_usage) { + size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); + heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); + } + log_info(gc)("FullGC done: young usage: " SIZE_FORMAT "%s, old usage: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(heap->young_generation()->used()), proper_unit_for_byte_size(heap->young_generation()->used()), + byte_size_in_proper_unit(heap->old_generation()->used()), proper_unit_for_byte_size(heap->old_generation()->used())); + } heap->collection_set()->clear(); - heap->free_set()->rebuild(); - } + size_t young_cset_regions, old_cset_regions; + size_t first_old, last_old, num_old; + heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + + // We also do not expand old generation size following Full GC because we have scrambled age populations and + // no longer have objects separated by age into distinct regions. + + // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions? + // A partial solution would be to remember how many objects are of tenure age following Full GC, but + // this is probably suboptimal, because most of these objects will not reside in a region that will be + // selected for the next evacuation phase. + + // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion. + heap->clear_promotion_potential(); - heap->clear_cancelled_gc(); + if (heap->mode()->is_generational()) { + // Invoke this in case we are able to transfer memory from OLD to YOUNG. + heap->adjust_generation_sizes_for_next_cycle(0, 0, 0); + } + heap->free_set()->rebuild(young_cset_regions, old_cset_regions); + + // We defer generation resizing actions until after cset regions have been recycled. We do this even following an + // abbreviated cycle. + if (heap->mode()->is_generational()) { + bool success; + size_t region_xfer; + const char* region_destination; + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahGeneration* old_gen = heap->old_generation(); + + size_t old_region_surplus = heap->get_old_region_surplus(); + size_t old_region_deficit = heap->get_old_region_deficit(); + if (old_region_surplus) { + success = heap->generation_sizer()->transfer_to_young(old_region_surplus); + region_destination = "young"; + region_xfer = old_region_surplus; + } else if (old_region_deficit) { + success = heap->generation_sizer()->transfer_to_old(old_region_deficit); + region_destination = "old"; + region_xfer = old_region_deficit; + if (!success) { + ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand(); + } + } else { + region_destination = "none"; + region_xfer = 0; + success = true; + } + heap->set_old_region_surplus(0); + heap->set_old_region_deficit(0); + size_t young_available = young_gen->available(); + size_t old_available = old_gen->available(); + log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " + SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s", + success? "successfully transferred": "failed to transfer", region_xfer, region_destination, + byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), + byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); + } + heap->clear_cancelled_gc(true /* clear oom handler */); + } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp index 1c1653e59ec..6687116b21f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp @@ -81,6 +81,7 @@ class ShenandoahFullGC : public ShenandoahGC { void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices); void phase3_update_references(); void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); + void phase5_epilog(); void distribute_slices(ShenandoahHeapRegionSet** worker_slices); void calculate_target_humongous_objects(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp index 922f54edf3c..5a3a739fb06 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp @@ -39,6 +39,8 @@ const char* ShenandoahGC::degen_point_to_string(ShenandoahDegenPoint point) { return ""; case _degenerated_outside_cycle: return "Outside of Cycle"; + case _degenerated_roots: + return "Roots"; case _degenerated_mark: return "Mark"; case _degenerated_evac: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp index e0d3724723a..4e929363c94 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp @@ -50,6 +50,7 @@ class ShenandoahGC : public StackObj { enum ShenandoahDegenPoint { _degenerated_unset, _degenerated_outside_cycle, + _degenerated_roots, _degenerated_mark, _degenerated_evac, _degenerated_updaterefs, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp new file mode 100644 index 00000000000..f31753bb2de --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -0,0 +1,1001 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahMarkClosures.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahReferenceProcessor.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" + +#include "utilities/quickSort.hpp" + +class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { + private: + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _ctx; + public: + ShenandoahResetUpdateRegionStateClosure() : + _heap(ShenandoahHeap::heap()), + _ctx(_heap->marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion* r) override { + if (_heap->is_bitmap_slice_committed(r)) { + _ctx->clear_bitmap(r); + } + + if (r->is_active()) { + // Reset live data and set TAMS optimistically. We would recheck these under the pause + // anyway to capture any updates that happened since now. + _ctx->capture_top_at_mark_start(r); + r->clear_live_data(); + } + } + + bool is_thread_safe() override { return true; } +}; + +class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure { + private: + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _ctx; + public: + ShenandoahResetBitmapTask() : + _heap(ShenandoahHeap::heap()), + _ctx(_heap->marking_context()) {} + + void heap_region_do(ShenandoahHeapRegion* region) { + if (_heap->is_bitmap_slice_committed(region)) { + _ctx->clear_bitmap(region); + } + } + + bool is_thread_safe() { return true; } +}; + +// Copy the write-version of the card-table into the read-version, clearing the +// write-copy. +class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure { + private: + ShenandoahHeap* _heap; + RememberedScanner* _scanner; + public: + ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {} + + virtual void heap_region_do(ShenandoahHeapRegion* r) override { + assert(r->is_old(), "Don't waste time doing this for non-old regions"); + _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words()); + } + + virtual bool is_thread_safe() override { + return true; + } +}; + +class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure { + private: + ShenandoahHeap* _heap; + RememberedScanner* _scanner; + public: + ShenandoahSquirrelAwayCardTable() : + _heap(ShenandoahHeap::heap()), + _scanner(_heap->card_scan()) {} + + void heap_region_do(ShenandoahHeapRegion* region) { + assert(region->is_old(), "Don't waste time doing this for non-old regions"); + _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words()); + } + + bool is_thread_safe() { return true; } +}; + +void ShenandoahGeneration::confirm_heuristics_mode() { + if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { + vm_exit_during_initialization( + err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", + _heuristics->name())); + } + if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { + vm_exit_during_initialization( + err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", + _heuristics->name())); + } +} + +ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { + _heuristics = gc_mode->initialize_heuristics(this); + _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval); + confirm_heuristics_mode(); + return _heuristics; +} + +size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { + return Atomic::load(&_bytes_allocated_since_gc_start); +} + +void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { + Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); +} + +void ShenandoahGeneration::increase_allocated(size_t bytes) { + Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); +} + +void ShenandoahGeneration::log_status(const char *msg) const { + typedef LogTarget(Info, gc, ergo) LogGcInfo; + + if (!LogGcInfo::is_enabled()) { + return; + } + + // Not under a lock here, so read each of these once to make sure + // byte size in proper unit and proper unit for byte size are consistent. + size_t v_used = used(); + size_t v_used_regions = used_regions_size(); + size_t v_soft_max_capacity = soft_max_capacity(); + size_t v_max_capacity = max_capacity(); + size_t v_available = available(); + size_t v_humongous_waste = get_humongous_waste(); + LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, " + "humongous waste: " SIZE_FORMAT "%s, soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, " + "available: " SIZE_FORMAT "%s", msg, name(), + byte_size_in_proper_unit(v_used), proper_unit_for_byte_size(v_used), + byte_size_in_proper_unit(v_used_regions), proper_unit_for_byte_size(v_used_regions), + byte_size_in_proper_unit(v_humongous_waste), proper_unit_for_byte_size(v_humongous_waste), + byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity), + byte_size_in_proper_unit(v_max_capacity), proper_unit_for_byte_size(v_max_capacity), + byte_size_in_proper_unit(v_available), proper_unit_for_byte_size(v_available)); +} + +void ShenandoahGeneration::reset_mark_bitmap() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->assert_gc_workers(heap->workers()->active_workers()); + + set_mark_incomplete(); + + ShenandoahResetBitmapTask task; + parallel_heap_region_iterate(&task); +} + +// The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations. +// However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the +// location of the card table. So the interim implementation of swap_remembered_set will copy the write-table +// onto the read-table and will then clear the write-table. +void ShenandoahGeneration::swap_remembered_set() { + // Must be sure that marking is complete before we swap remembered set. + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->assert_gc_workers(heap->workers()->active_workers()); + shenandoah_assert_safepoint(); + + // TODO: Eventually, we want replace this with a constant-time exchange of pointers. + ShenandoahSquirrelAwayCardTable task; + heap->old_generation()->parallel_heap_region_iterate(&task); +} + +// Copy the write-version of the card-table into the read-version, clearing the +// write-version. The work is done at a safepoint and in parallel by the GC +// worker threads. +void ShenandoahGeneration::merge_write_table() { + // This should only happen for degenerated cycles + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->assert_gc_workers(heap->workers()->active_workers()); + shenandoah_assert_safepoint(); + + ShenandoahMergeWriteTable task; + heap->old_generation()->parallel_heap_region_iterate(&task); +} + +void ShenandoahGeneration::prepare_gc() { + // Invalidate the marking context + set_mark_incomplete(); + + // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap. + ShenandoahResetUpdateRegionStateClosure cl; + parallel_heap_region_iterate(&cl); +} + +void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) { + + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t regions_available_to_loan = 0; + size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes; + size_t old_regions_loaned_for_young_evac = 0; + + ShenandoahGeneration* const old_generation = heap->old_generation(); + ShenandoahYoungGeneration* const young_generation = heap->young_generation(); + + // During initialization and phase changes, it is more likely that fewer objects die young and old-gen + // memory is not yet full (or is in the process of being replaced). During these times especially, it + // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases + // of execution. + + // Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion. + // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less + // critical. If we cannot promote, there may be degradation of young-gen memory because old objects + // accumulate there until they can be promoted. This increases the young-gen marking and evacuation work. + + // Do not fill up old-gen memory with promotions. Reserve some amount of memory for compaction purposes. + size_t young_evac_reserve_max = 0; + + // First priority is to reclaim the easy garbage out of young-gen. + + // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young + const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; + const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); + + // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), + // clamped by the old generation space available. + // + // Here's the algebra. + // Let SOEP = ShenandoahOldEvacRatioPercent, + // OE = old evac, + // YE = young evac, and + // TE = total evac = OE + YE + // By definition: + // SOEP/100 = OE/TE + // = OE/(OE+YE) + // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) + // = OE/YE + // => OE = YE*SOEP/(100-SOEP) + + // We have to be careful in the event that SOEP is set to 100 by the user. + assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); + const size_t old_available = old_generation->available(); + const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ? + old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + old_available); + + + // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority + // is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young + // GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case, + // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs + // through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions + // do not add to the update-refs burden of GC. + + ShenandoahOldHeuristics* const old_heuristics = heap->old_heuristics(); + size_t old_evacuation_reserve, old_promo_reserve; + if (is_global()) { + // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots + // of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take + // significantly longer than typical young marking because we must mark through all old objects. To expedite + // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found. + // Global GC will adjust generation sizes to accommodate the collection set it chooses. + + // Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically + // have relatively high memory utilization. We still call select_aged_regions() because this will prepare for + // promotions in place, if relevant. + old_promo_reserve = 0; + + // Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only + // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand + // the budget for evacuation of old during GLOBAL cset selection. + old_evacuation_reserve = maximum_old_evacuation_reserve; + } else if (old_heuristics->unprocessed_old_collection_candidates() > 0) { + // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is + // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction + // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory. + old_evacuation_reserve = maximum_old_evacuation_reserve; + old_promo_reserve = 0; + } else { + // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation. + old_evacuation_reserve = 0; + old_promo_reserve = maximum_old_evacuation_reserve; + } + assert(old_evacuation_reserve <= old_available, "Error"); + + // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty. + // So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and + // crannies within existing partially used regions and it generally tries to do so. + const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes; + if (old_evacuation_reserve > old_free_unfragmented) { + const size_t delta = old_evacuation_reserve - old_free_unfragmented; + old_evacuation_reserve -= delta; + // Let promo consume fragments of old-gen memory if not global + if (!is_global()) { + old_promo_reserve += delta; + } + } + + // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve), + // and identify regions that will promote in place. These use the tenuring threshold. + size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); + assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); + + // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this + // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood + // of old evacuation failure. + + heap->set_young_evac_reserve(young_evacuation_reserve); + heap->set_old_evac_reserve(old_evacuation_reserve); + heap->set_promoted_reserve(consumed_by_advance_promotion); + + // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the + // case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand. +} + +// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note +// that young_generation->available() now knows about recently discovered immediate garbage. +// +void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { + // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may + // be able to increase regions_available_to_loan + + // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make + // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to + // integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement, + // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan + // will be repaid as soon as we finish updating references for the recently evacuated collection set. + + // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes + // because the available memory may be distributed between many partially occupied regions that are already holding old-gen + // objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen + // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned + // to young-gen. + + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + const ShenandoahOldGeneration* const old_generation = heap->old_generation(); + const ShenandoahYoungGeneration* const young_generation = heap->young_generation(); + + size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); + size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * old_evacuated); + size_t old_evacuation_reserve = heap->get_old_evac_reserve(); + + if (old_evacuated_committed > old_evacuation_reserve) { + // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste + assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32, + "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, + old_evacuated_committed, old_evacuation_reserve); + old_evacuated_committed = old_evacuation_reserve; + // Leave old_evac_reserve as previously configured + } else if (old_evacuated_committed < old_evacuation_reserve) { + // This happens if the old-gen collection consumes less than full budget. + old_evacuation_reserve = old_evacuated_committed; + heap->set_old_evac_reserve(old_evacuation_reserve); + } + + size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); + size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * young_advance_promoted); + + size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); + size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated); + + size_t total_young_available = young_generation->available_with_reserve(); + assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); + heap->set_young_evac_reserve(young_evacuated_reserve_used); + + size_t old_available = old_generation->available(); + // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation + // and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during + // evac and update phases. + size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; + + if (old_available < old_consumed) { + // This can happen due to round-off errors when adding the results of truncated integer arithmetic. + // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. + assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, + "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, + young_advance_promoted_reserve_used, old_available - old_evacuated_committed); + young_advance_promoted_reserve_used = old_available - old_evacuated_committed; + old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; + } + + assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")", + old_consumed, old_available); + size_t excess_old = old_available - old_consumed; + size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); + size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; + assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); + + // Make sure old_evac_committed is unaffiliated + if (old_evacuated_committed > 0) { + if (unaffiliated_old > old_evacuated_committed) { + size_t giveaway = unaffiliated_old - old_evacuated_committed; + size_t giveaway_regions = giveaway / region_size_bytes; // round down + if (giveaway_regions > 0) { + excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes); + } else { + excess_old = 0; + } + } else { + excess_old = 0; + } + } + + // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation + // runway during evacuation and update-refs. + size_t regions_to_xfer = 0; + if (excess_old > unaffiliated_old) { + // we can give back unaffiliated_old (all of unaffiliated is excess) + if (unaffiliated_old_regions > 0) { + regions_to_xfer = unaffiliated_old_regions; + } + } else if (unaffiliated_old_regions > 0) { + // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions) + size_t excess_regions = excess_old / region_size_bytes; + size_t regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); + } + + if (regions_to_xfer > 0) { + bool result = heap->generation_sizer()->transfer_to_young(regions_to_xfer); + assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old"); + excess_old -= regions_to_xfer * region_size_bytes; + log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", + result? "Successfully": "Unsuccessfully", regions_to_xfer); + } + + // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated + // promotions than fit in reserved memory, they will be deferred until a future GC pass. + size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; + heap->set_promoted_reserve(total_promotion_reserve); + heap->reset_promoted_expended(); +} + +typedef struct { + ShenandoahHeapRegion* _region; + size_t _live_data; +} AgedRegionData; + +static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { + if (a._live_data < b._live_data) + return -1; + else if (a._live_data > b._live_data) + return 1; + else return 0; +} + +inline void assert_no_in_place_promotions() { +#ifdef ASSERT + class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { + public: + void heap_region_do(ShenandoahHeapRegion *r) override { + assert(r->get_top_before_promote() == nullptr, + "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); + } + } cl; + ShenandoahHeap::heap()->heap_region_iterate(&cl); +#endif +} + +// Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than +// ShenandoahOldGarbageThreshold amounts of garbage. We identify these regions by setting the appropriate entry of +// the collection set's preselected regions array to true. All entries are initialized to false before calling this +// function. +// +// During the subsequent selection of the collection set, we give priority to these promotion set candidates. +// Without this prioritization, we found that the aged regions tend to be ignored because they typically have +// much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are +// repeatedly excluded from the collection set, the amount of live memory within the young generation tends to +// accumulate and this has the undesirable side effect of causing young-generation collections to require much more +// CPU and wall-clock time. +// +// A second benefit of treating aged regions differently than other regions during collection set selection is +// that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation +// of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be +// reserved in the young generation. +size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { + + // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. + assert_no_in_place_promotions(); + + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + assert(heap->mode()->is_generational(), "Only in generational mode"); + bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); + ShenandoahMarkingContext* const ctx = heap->marking_context(); + + const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); + const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; + + size_t old_consumed = 0; + size_t promo_potential = 0; + size_t candidates = 0; + + // Tracks the padding of space above top in regions eligible for promotion in place + size_t promote_in_place_pad = 0; + + // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require + // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that + // have more live data. + const size_t num_regions = heap->num_regions(); + + ResourceMark rm; + AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); + + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* const r = heap->get_region(i); + if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { + // skip over regions that aren't regular young with some live data + continue; + } + if (r->age() >= tenuring_threshold) { + if ((r->garbage() < old_garbage_threshold)) { + // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to + // reclaim the garbage; instead this region may be eligible for promotion-in-place to the + // old generation. + HeapWord* tams = ctx->top_at_mark_start(r); + HeapWord* original_top = r->top(); + if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { + // No allocations from this region have been made during concurrent mark. It meets all the criteria + // for in-place-promotion. Though we only need the value of top when we fill the end of the region, + // we use this field to indicate that this region should be promoted in place during the evacuation + // phase. + r->save_top_before_promote(); + + size_t remnant_size = r->free() / HeapWordSize; + if (remnant_size > ShenandoahHeap::min_fill_size()) { + ShenandoahHeap::fill_with_object(original_top, remnant_size); + // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, + // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any + // new allocations would not necessarily be eligible for promotion. This addresses both issues. + r->set_top(r->end()); + promote_in_place_pad += remnant_size * HeapWordSize; + } else { + // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental + // allocations occurring within this region before the region is promoted in place. + } + } + // Else, we do not promote this region (either in place or by copy) because it has received new allocations. + + // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold, + // and get_top_before_promote() != tams + } else { + // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below, + // we may still decide to exclude this promotion-eligible region from the current collection set. If this + // happens, we will consider this region as part of the anticipated promotion potential for the next GC + // pass; see further below. + sorted_regions[candidates]._region = r; + sorted_regions[candidates++]._live_data = r->get_live_data_bytes(); + } + } else { + // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold. + // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to + // old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that + // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes + // place during a subsequent GC pass because more garbage is found within the region between now and then. This + // should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold + // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous + // age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population + // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age. + // + // In the case that certain regions which were anticipated to be promoted in place need to be promoted by + // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of + // these regions. The likely outcome is that these regions will not be selected for evacuation or promotion + // in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause + // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle. + // + // TODO: + // If we are auto-tuning the tenure age and regions that were anticipated to be promoted in place end up + // being promoted by evacuation, this event should feed into the tenure-age-selection heuristic so that + // the tenure age can be increased. + if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { + if (r->garbage() >= old_garbage_threshold) { + promo_potential += r->get_live_data_bytes(); + } + } + } + // Note that we keep going even if one region is excluded from selection. + // Subsequent regions may be selected if they have smaller live data. + } + // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions + // that qualify to be promoted by evacuation. + if (candidates > 0) { + size_t selected_regions = 0; + size_t selected_live = 0; + QuickSort::sort(sorted_regions, candidates, compare_by_aged_live, false); + for (size_t i = 0; i < candidates; i++) { + ShenandoahHeapRegion* const region = sorted_regions[i]._region; + size_t region_live_data = sorted_regions[i]._live_data; + size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); + if (old_consumed + promotion_need <= old_available) { + old_consumed += promotion_need; + candidate_regions_for_promotion_by_copy[region->index()] = true; + selected_regions++; + selected_live += region_live_data; + } else { + // We rejected this promotable region from the collection set because we had no room to hold its copy. + // Add this region to promo potential for next GC. + promo_potential += region_live_data; + assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); + } + // We keep going even if one region is excluded from selection because we need to accumulate all eligible + // regions that are not preselected into promo_potential + } + log_info(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes," + " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT, + selected_regions, selected_live, old_consumed, old_available); + } + heap->set_pad_for_promote_in_place(promote_in_place_pad); + heap->set_promotion_potential(promo_potential); + return old_consumed; +} + +void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahCollectionSet* collection_set = heap->collection_set(); + bool is_generational = heap->mode()->is_generational(); + + assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); + assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); + { + ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : + ShenandoahPhaseTimings::degen_gc_final_update_region_states); + ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); + parallel_heap_region_iterate(&cl); + + if (is_young()) { + // We always need to update the watermark for old regions. If there + // are mixed collections pending, we also need to synchronize the + // pinned status for old regions. Since we are already visiting every + // old region here, go ahead and sync the pin status too. + ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr); + heap->old_generation()->parallel_heap_region_iterate(&old_cl); + } + } + + // Tally the census counts and compute the adaptive tenuring threshold + if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { + // Objects above TAMS weren't included in the age census. Since they were all + // allocated in this cycle they belong in the age 0 cohort. We walk over all + // young regions and sum the volume of objects between TAMS and top. + ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context()); + heap->young_generation()->heap_region_iterate(&age0_cl); + size_t age0_pop = age0_cl.get_population(); + + // Age table updates + ShenandoahAgeCensus* census = heap->age_census(); + census->prepare_for_census_update(); + // Update the global census, including the missed age 0 cohort above, + // along with the census during marking, and compute the tenuring threshold + census->update_census(age0_pop); + } + + { + ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : + ShenandoahPhaseTimings::degen_gc_choose_cset); + + collection_set->clear(); + ShenandoahHeapLocker locker(heap->lock()); + if (is_generational) { + // Seed the collection set with resource area-allocated + // preselected regions, which are removed when we exit this scope. + ResourceMark rm; + ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions()); + + // TODO: young_available can include available (between top() and end()) within each young region that is not + // part of the collection set. Making this memory available to the young_evacuation_reserve allows a larger + // young collection set to be chosen when available memory is under extreme pressure. Implementing this "improvement" + // is tricky, because the incremental construction of the collection set actually changes the amount of memory + // available to hold evacuated young-gen objects. As currently implemented, the memory that is available within + // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while + // GC is evacuating and updating references. + + // Find the amount that will be promoted, regions that will be promoted in + // place, and preselect older regions that will be promoted by evacuation. + compute_evacuation_budgets(heap); + + // Choose the collection set, including the regions preselected above for + // promotion into the old generation. + _heuristics->choose_collection_set(collection_set); + if (!collection_set->is_empty()) { + // only make use of evacuation budgets when we are evacuating + adjust_evacuation_budgets(heap, collection_set); + } + + if (is_global()) { + // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so + // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will + // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus, + // we prepare for old collections by remembering which regions are old at this time. Note that any objects + // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that + // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to + // coalesce those regions. Only the old regions which are not part of the collection set at this point are + // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations + // after a global cycle for old regions that were not included in this collection set. + assert(heap->old_generation()->is_mark_complete(), "Expected old generation mark to be complete after global cycle."); + heap->old_heuristics()->prepare_for_old_collections(); + log_info(gc)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT, + heap->old_heuristics()->unprocessed_old_collection_candidates(), + heap->old_heuristics()->coalesce_and_fill_candidates_count()); + } + } else { + _heuristics->choose_collection_set(collection_set); + } + } + + // Freeset construction uses reserve quantities if they are valid + heap->set_evacuation_reserve_quantities(true); + { + ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : + ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); + ShenandoahHeapLocker locker(heap->lock()); + size_t young_cset_regions, old_cset_regions; + + // We are preparing for evacuation. At this time, we ignore cset region tallies. + size_t first_old, last_old, num_old; + heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + heap->free_set()->rebuild(young_cset_regions, old_cset_regions); + } + heap->set_evacuation_reserve_quantities(false); +} + +bool ShenandoahGeneration::is_bitmap_clear() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* context = heap->marking_context(); + const size_t num_regions = heap->num_regions(); + for (size_t idx = 0; idx < num_regions; idx++) { + ShenandoahHeapRegion* r = heap->get_region(idx); + if (contains(r) && r->is_affiliated()) { + if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) && + !context->is_bitmap_clear_range(r->bottom(), r->end())) { + return false; + } + } + } + return true; +} + +bool ShenandoahGeneration::is_mark_complete() { + return _is_marking_complete.is_set(); +} + +void ShenandoahGeneration::set_mark_complete() { + _is_marking_complete.set(); +} + +void ShenandoahGeneration::set_mark_incomplete() { + _is_marking_complete.unset(); +} + +ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() { + assert(is_mark_complete(), "Marking must be completed."); + return ShenandoahHeap::heap()->marking_context(); +} + +void ShenandoahGeneration::cancel_marking() { + log_info(gc)("Cancel marking: %s", name()); + if (is_concurrent_mark_in_progress()) { + set_mark_incomplete(); + } + _task_queues->clear(); + ref_processor()->abandon_partial_discovery(); + set_concurrent_mark_in_progress(false); +} + +ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, + uint max_workers, + size_t max_capacity, + size_t soft_max_capacity) : + _type(type), + _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), + _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), + _affiliated_region_count(0), _humongous_waste(0), _used(0), _bytes_allocated_since_gc_start(0), + _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), + _heuristics(nullptr) { + _is_marking_complete.set(); + assert(max_workers > 0, "At least one queue"); + for (uint i = 0; i < max_workers; ++i) { + ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); + _task_queues->register_queue(i, task_queue); + } +} + +ShenandoahGeneration::~ShenandoahGeneration() { + for (uint i = 0; i < _task_queues->size(); ++i) { + ShenandoahObjToScanQueue* q = _task_queues->queue(i); + delete q; + } + delete _task_queues; +} + +void ShenandoahGeneration::reserve_task_queues(uint workers) { + _task_queues->reserve(workers); +} + +ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const { + return nullptr; +} + +void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { + assert(is_young(), "Should only scan remembered set for young generation."); + + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + uint nworkers = heap->workers()->active_workers(); + reserve_task_queues(nworkers); + + ShenandoahReferenceProcessor* rp = ref_processor(); + ShenandoahRegionChunkIterator work_list(nworkers); + ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent); + heap->assert_gc_workers(nworkers); + heap->workers()->run_task(&task); + if (ShenandoahEnableCardStats) { + assert(heap->card_scan() != nullptr, "Not generational"); + heap->card_scan()->log_card_stats(nworkers, CARD_STAT_SCAN_RS); + } +} + +size_t ShenandoahGeneration::increment_affiliated_region_count() { + shenandoah_assert_heaplocked_or_fullgc_safepoint(); + // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced + // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with + // a coherent value. + _affiliated_region_count++; + return _affiliated_region_count; +} + +size_t ShenandoahGeneration::decrement_affiliated_region_count() { + shenandoah_assert_heaplocked_or_fullgc_safepoint(); + // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced + // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with + // a coherent value. + _affiliated_region_count--; + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), + "used + humongous cannot exceed regions"); + return _affiliated_region_count; +} + +size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { + shenandoah_assert_heaplocked_or_fullgc_safepoint(); + _affiliated_region_count += delta; + return _affiliated_region_count; +} + +size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { + shenandoah_assert_heaplocked_or_fullgc_safepoint(); + assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative"); + + _affiliated_region_count -= delta; + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), + "used + humongous cannot exceed regions"); + return _affiliated_region_count; +} + +void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); + _affiliated_region_count = num_regions; + _used = num_bytes; + _humongous_waste = humongous_waste; +} + +void ShenandoahGeneration::increase_used(size_t bytes) { + Atomic::add(&_used, bytes); +} + +void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { + if (bytes > 0) { + Atomic::add(&_humongous_waste, bytes); + } +} + +void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { + if (bytes > 0) { + assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), + "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes); + Atomic::sub(&_humongous_waste, bytes); + } +} + +void ShenandoahGeneration::decrease_used(size_t bytes) { + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_used >= bytes), "cannot reduce bytes used by generation below zero"); + Atomic::sub(&_used, bytes); +} + +size_t ShenandoahGeneration::used_regions() const { + return _affiliated_region_count; +} + +size_t ShenandoahGeneration::free_unaffiliated_regions() const { + size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes(); + if (_affiliated_region_count > result) { + result = 0; + } else { + result -= _affiliated_region_count; + } + return result; +} + +size_t ShenandoahGeneration::used_regions_size() const { + return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes(); +} + +size_t ShenandoahGeneration::available() const { + return available(max_capacity()); +} + +// For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. +size_t ShenandoahGeneration::available_with_reserve() const { + return available(max_capacity()); +} + +size_t ShenandoahGeneration::soft_available() const { + return available(soft_max_capacity()); +} + +size_t ShenandoahGeneration::available(size_t capacity) const { + size_t in_use = used() + get_humongous_waste(); + return in_use > capacity ? 0 : capacity - in_use; +} + +void ShenandoahGeneration::increase_capacity(size_t increment) { + shenandoah_assert_heaplocked_or_safepoint(); + + // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb + // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions + // in place. + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size"); + assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); + _max_capacity += increment; + + // This detects arithmetic wraparound on _used + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), + "Affiliated regions must hold more than what is currently used"); +} + +void ShenandoahGeneration::decrease_capacity(size_t decrement) { + shenandoah_assert_heaplocked_or_safepoint(); + + // We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb + // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions + // in place. + assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); + assert(_max_capacity >= decrement, "Generation capacity cannot be negative"); + + _max_capacity -= decrement; + + // This detects arithmetic wraparound on _used + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), + "Affiliated regions must hold more than what is currently used"); + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_used <= _max_capacity), "Cannot use more than capacity"); + // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING + assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity), + "Cannot use more than capacity"); +} + +void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { + heuristics()->record_success_concurrent(abbreviated); + ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp new file mode 100644 index 00000000000..4f06fb944d5 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -0,0 +1,222 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHGENERATION_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHGENERATION_HPP + +#include "memory/allocation.hpp" +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" +#include "gc/shenandoah/shenandoahGenerationType.hpp" +#include "gc/shenandoah/shenandoahLock.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.hpp" + +class ShenandoahHeapRegion; +class ShenandoahHeapRegionClosure; +class ShenandoahReferenceProcessor; +class ShenandoahHeap; +class ShenandoahMode; + +class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { + friend class VMStructs; +private: + ShenandoahGenerationType const _type; + + // Marking task queues and completeness + ShenandoahObjToScanQueueSet* _task_queues; + ShenandoahSharedFlag _is_marking_complete; + + ShenandoahReferenceProcessor* const _ref_processor; + + size_t _affiliated_region_count; + + // How much free memory is left in the last region of humongous objects. + // This is _not_ included in used, but it _is_ deducted from available, + // which gives the heuristics a more accurate view of how much memory remains + // for allocation. This figure is also included the heap status logging. + // The units are bytes. The value is only changed on a safepoint or under the + // heap lock. + size_t _humongous_waste; + +protected: + // Usage + + volatile size_t _used; + volatile size_t _bytes_allocated_since_gc_start; + size_t _max_capacity; + size_t _soft_max_capacity; + + ShenandoahHeuristics* _heuristics; + +private: + // Compute evacuation budgets prior to choosing collection set. + void compute_evacuation_budgets(ShenandoahHeap* heap); + + // Adjust evacuation budgets after choosing collection set. + void adjust_evacuation_budgets(ShenandoahHeap* heap, + ShenandoahCollectionSet* collection_set); + + // Preselect for possible inclusion into the collection set exactly the most + // garbage-dense regions, including those that satisfy criteria 1 & 2 below, + // and whose live bytes will fit within old_available budget: + // Criterion 1. region age >= tenuring threshold + // Criterion 2. region garbage percentage > ShenandoahOldGarbageThreshold + // + // Identifies regions eligible for promotion in place, + // being those of at least tenuring_threshold age that have lower garbage + // density. + // + // Updates promotion_potential and pad_for_promote_in_place fields + // of the heap. Returns bytes of live object memory in the preselected + // regions, which are marked in the preselected_regions() indicator + // array of the heap's collection set, which should be initialized + // to false. + size_t select_aged_regions(size_t old_available); + + size_t available(size_t capacity) const; + + public: + ShenandoahGeneration(ShenandoahGenerationType type, + uint max_workers, + size_t max_capacity, + size_t soft_max_capacity); + ~ShenandoahGeneration(); + + bool is_young() const { return _type == YOUNG; } + bool is_old() const { return _type == OLD; } + bool is_global() const { return _type == GLOBAL_GEN || _type == GLOBAL_NON_GEN; } + + inline ShenandoahGenerationType type() const { return _type; } + + inline ShenandoahHeuristics* heuristics() const { return _heuristics; } + + ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; } + + virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode); + + size_t soft_max_capacity() const override { return _soft_max_capacity; } + size_t max_capacity() const override { return _max_capacity; } + virtual size_t used_regions() const; + virtual size_t used_regions_size() const; + virtual size_t free_unaffiliated_regions() const; + size_t used() const override { return _used; } + size_t available() const override; + size_t available_with_reserve() const; + + // Returns the memory available based on the _soft_ max heap capacity (soft_max_heap - used). + // The soft max heap size may be adjusted lower than the max heap size to cause the trigger + // to believe it has less memory available than is _really_ available. Lowering the soft + // max heap size will cause the adaptive heuristic to run more frequent cycles. + size_t soft_available() const override; + + size_t bytes_allocated_since_gc_start() const override; + void reset_bytes_allocated_since_gc_start(); + void increase_allocated(size_t bytes); + + // These methods change the capacity of the region by adding or subtracting the given number of bytes from the current + // capacity. + void increase_capacity(size_t increment); + void decrease_capacity(size_t decrement); + + void log_status(const char* msg) const; + + // Used directly by FullGC + void reset_mark_bitmap(); + + // Used by concurrent and degenerated GC to reset remembered set. + void swap_remembered_set(); + + // Update the read cards with the state of the write table (write table is not cleared). + void merge_write_table(); + + // Called before init mark, expected to prepare regions for marking. + virtual void prepare_gc(); + + // Called during final mark, chooses collection set, rebuilds free set. + virtual void prepare_regions_and_collection_set(bool concurrent); + + // Cancel marking (used by Full collect and when cancelling cycle). + virtual void cancel_marking(); + + // Return true if this region is affiliated with this generation. + virtual bool contains(ShenandoahHeapRegion* region) const = 0; + + // Return true if this object is affiliated with this generation. + virtual bool contains(oop obj) const = 0; + + // Apply closure to all regions affiliated with this generation. + virtual void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) = 0; + + // Apply closure to all regions affiliated with this generation (single threaded). + virtual void heap_region_iterate(ShenandoahHeapRegionClosure* cl) = 0; + + // This is public to support cancellation of marking when a Full cycle is started. + virtual void set_concurrent_mark_in_progress(bool in_progress) = 0; + + // Check the bitmap only for regions belong to this generation. + bool is_bitmap_clear(); + + // We need to track the status of marking for different generations. + bool is_mark_complete(); + virtual void set_mark_complete(); + virtual void set_mark_incomplete(); + + ShenandoahMarkingContext* complete_marking_context(); + + // Task queues + ShenandoahObjToScanQueueSet* task_queues() const { return _task_queues; } + virtual void reserve_task_queues(uint workers); + virtual ShenandoahObjToScanQueueSet* old_gen_task_queues() const; + + // Scan remembered set at start of concurrent young-gen marking. + void scan_remembered_set(bool is_concurrent); + + // Return the updated value of affiliated_region_count + size_t increment_affiliated_region_count(); + + // Return the updated value of affiliated_region_count + size_t decrement_affiliated_region_count(); + + // Return the updated value of affiliated_region_count + size_t increase_affiliated_region_count(size_t delta); + + // Return the updated value of affiliated_region_count + size_t decrease_affiliated_region_count(size_t delta); + + void establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste); + + void increase_used(size_t bytes); + void decrease_used(size_t bytes); + + void increase_humongous_waste(size_t bytes); + void decrease_humongous_waste(size_t bytes); + size_t get_humongous_waste() const { return _humongous_waste; } + + virtual bool is_concurrent_mark_in_progress() = 0; + void confirm_heuristics_mode(); + + virtual void record_success_concurrent(bool abbreviated); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHGENERATION_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationType.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationType.hpp new file mode 100644 index 00000000000..ec73fc07c13 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationType.hpp @@ -0,0 +1,51 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONTYPE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONTYPE_HPP + +enum ShenandoahGenerationType { + GLOBAL_NON_GEN, // Global, non-generational + GLOBAL_GEN, // Global, generational + YOUNG, // Young, generational + OLD // Old, generational +}; + +inline const char* shenandoah_generation_name(ShenandoahGenerationType mode) { + switch (mode) { + case GLOBAL_NON_GEN: + return ""; + case GLOBAL_GEN: + return "Global"; + case OLD: + return "Old"; + case YOUNG: + return "Young"; + default: + ShouldNotReachHere(); + return "?"; + } +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONTYPE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp new file mode 100644 index 00000000000..9fcf9a4780e --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahGenerationalHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahInitLogger.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" + +#include "logging/log.hpp" + +class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger { +public: + static void print() { + ShenandoahGenerationalInitLogger logger; + logger.print_all(); + } + + void print_heap() override { + ShenandoahInitLogger::print_heap(); + + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); + + ShenandoahYoungGeneration* young = heap->young_generation(); + log_info(gc, init)("Young Generation Soft Size: " PROPERFMT, PROPERFMTARGS(young->soft_max_capacity())); + log_info(gc, init)("Young Generation Max: " PROPERFMT, PROPERFMTARGS(young->max_capacity())); + + ShenandoahOldGeneration* old = heap->old_generation(); + log_info(gc, init)("Old Generation Soft Size: " PROPERFMT, PROPERFMTARGS(old->soft_max_capacity())); + log_info(gc, init)("Old Generation Max: " PROPERFMT, PROPERFMTARGS(old->max_capacity())); + } + +protected: + void print_gc_specific() override { + ShenandoahInitLogger::print_gc_specific(); + + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); + log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name()); + log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name()); + } +}; + +ShenandoahGenerationalHeap* ShenandoahGenerationalHeap::heap() { + CollectedHeap* heap = Universe::heap(); + return checked_cast(heap); +} + +void ShenandoahGenerationalHeap::print_init_logger() const { + ShenandoahGenerationalInitLogger logger; + logger.print_all(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp new file mode 100644 index 00000000000..5d56179e8b8 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -0,0 +1,39 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALHEAP +#define SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALHEAP + +#include "gc/shenandoah/shenandoahHeap.hpp" + +class ShenandoahGenerationalHeap : public ShenandoahHeap { +public: + explicit ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) : ShenandoahHeap(policy) {} + + static ShenandoahGenerationalHeap* heap(); + + void print_init_logger() const override; +}; + +#endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALHEAP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.cpp new file mode 100644 index 00000000000..59163d1f29c --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.cpp @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahGlobalGeneration.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" + + +const char* ShenandoahGlobalGeneration::name() const { + return "GLOBAL"; +} + +size_t ShenandoahGlobalGeneration::max_capacity() const { + return ShenandoahHeap::heap()->max_capacity(); +} + +size_t ShenandoahGlobalGeneration::used_regions() const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(heap->mode()->is_generational(), "Region usage accounting is only for generational mode"); + return heap->old_generation()->used_regions() + heap->young_generation()->used_regions(); +} + +size_t ShenandoahGlobalGeneration::used_regions_size() const { + return ShenandoahHeap::heap()->capacity(); +} + +size_t ShenandoahGlobalGeneration::soft_max_capacity() const { + return ShenandoahHeap::heap()->soft_max_capacity(); +} + +size_t ShenandoahGlobalGeneration::available() const { + return ShenandoahHeap::heap()->free_set()->available(); +} + +size_t ShenandoahGlobalGeneration::soft_available() const { + size_t available = this->available(); + + // Make sure the code below treats available without the soft tail. + assert(max_capacity() >= soft_max_capacity(), "Max capacity must be greater than soft max capacity."); + size_t soft_tail = max_capacity() - soft_max_capacity(); + return (available > soft_tail) ? (available - soft_tail) : 0; +} + +void ShenandoahGlobalGeneration::set_concurrent_mark_in_progress(bool in_progress) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (in_progress && heap->mode()->is_generational()) { + // Global collection has preempted an old generation mark. This is fine + // because the global generation includes the old generation, but we + // want the global collect to start from a clean slate and we don't want + // any stale state in the old generation. + assert(!heap->is_concurrent_old_mark_in_progress(), "Old cycle should not be running."); + } + + heap->set_concurrent_young_mark_in_progress(in_progress); +} + +bool ShenandoahGlobalGeneration::contains(ShenandoahHeapRegion* region) const { + return true; +} + +void ShenandoahGlobalGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + ShenandoahHeap::heap()->parallel_heap_region_iterate(cl); +} + +void ShenandoahGlobalGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + ShenandoahHeap::heap()->heap_region_iterate(cl); +} + +bool ShenandoahGlobalGeneration::is_concurrent_mark_in_progress() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + return heap->is_concurrent_mark_in_progress(); +} + +ShenandoahHeuristics* ShenandoahGlobalGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { + if (gc_mode->is_generational()) { + _heuristics = new ShenandoahGlobalHeuristics(this); + } else { + _heuristics = gc_mode->initialize_heuristics(this); + } + + _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval); + confirm_heuristics_mode(); + return _heuristics; +} + +void ShenandoahGlobalGeneration::set_mark_complete() { + ShenandoahGeneration::set_mark_complete(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->young_generation()->set_mark_complete(); + heap->old_generation()->set_mark_complete(); +} + +void ShenandoahGlobalGeneration::set_mark_incomplete() { + ShenandoahGeneration::set_mark_incomplete(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->young_generation()->set_mark_incomplete(); + heap->old_generation()->set_mark_incomplete(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.hpp new file mode 100644 index 00000000000..4b0427b785a --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahGlobalGeneration.hpp @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHGLOBALGENERATION_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHGLOBALGENERATION_HPP + +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" + +// A "generation" that represents the whole heap. +class ShenandoahGlobalGeneration : public ShenandoahGeneration { +public: + ShenandoahGlobalGeneration(bool generational, uint max_queues, size_t max_capacity, size_t soft_max_capacity) + : ShenandoahGeneration(generational ? GLOBAL_GEN : GLOBAL_NON_GEN, max_queues, max_capacity, soft_max_capacity) { } + +public: + const char* name() const override; + + size_t max_capacity() const override; + size_t soft_max_capacity() const override; + size_t used_regions() const override; + size_t used_regions_size() const override; + size_t available() const override; + size_t soft_available() const override; + + void set_concurrent_mark_in_progress(bool in_progress) override; + + bool contains(ShenandoahHeapRegion* region) const override; + + bool contains(oop obj) const override { + // TODO: Should this assert is_in()? + return true; + } + + void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + + void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + + bool is_concurrent_mark_in_progress() override; + + void set_mark_complete() override; + + void set_mark_incomplete() override; + + ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHGLOBALGENERATION_HPP + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index f1dcbf5a8bc..63a67b6f0fe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,14 +36,21 @@ #include "gc/shared/plab.hpp" #include "gc/shared/tlab_globals.hpp" +#include "gc/shenandoah/shenandoahAgeCensus.hpp" +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" +#include "gc/shenandoah/shenandoahAllocRequest.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahRegulatorThread.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahGlobalGeneration.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" @@ -52,12 +60,14 @@ #include "gc/shenandoah/shenandoahMemoryPool.hpp" #include "gc/shenandoah/shenandoahMetrics.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahPacer.inline.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "gc/shenandoah/shenandoahSTWMark.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" @@ -65,9 +75,13 @@ #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkGroup.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp" #include "gc/shenandoah/mode/shenandoahIUMode.hpp" #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp" #include "gc/shenandoah/mode/shenandoahSATBMode.hpp" +#include "utilities/globalDefinitions.hpp" + #if INCLUDE_JFR #include "gc/shenandoah/shenandoahJfrSupport.hpp" #endif @@ -159,9 +173,6 @@ jint ShenandoahHeap::initialize() { "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT, _num_regions, max_byte_size, reg_size_bytes); - // Now we know the number of regions, initialize the heuristics. - initialize_heuristics(); - size_t num_committed_regions = init_byte_size / reg_size_bytes; num_committed_regions = MIN2(num_committed_regions, _num_regions); assert(num_committed_regions <= _num_regions, "sanity"); @@ -177,6 +188,9 @@ jint ShenandoahHeap::initialize() { _committed = _initial_size; + // Now we know the number of regions and heap sizes, initialize the heuristics. + initialize_heuristics_generations(); + size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); @@ -192,6 +206,9 @@ jint ShenandoahHeap::initialize() { assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, "Misaligned heap: " PTR_FORMAT, p2i(base())); + os::trace_page_sizes_for_requested_size("Heap", + max_byte_size, heap_rs.page_size(), heap_alignment, + heap_rs.base(), heap_rs.size()); #if SHENANDOAH_OPTIMIZED_MARKTASK // The optimized ShenandoahMarkTask takes some bits away from the full object bits. @@ -211,12 +228,40 @@ jint ShenandoahHeap::initialize() { "Cannot commit heap memory"); } + BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region)); + + // + // After reserving the Java heap, create the card table, barriers, and workers, in dependency order + // + if (mode()->is_generational()) { + ShenandoahDirectCardMarkRememberedSet *rs; + ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table(); + size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize); + rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count); + _card_scan = new ShenandoahScanRemembered(rs); + + // Age census structure + _age_census = new ShenandoahAgeCensus(); + } + + _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers); + if (_workers == nullptr) { + vm_exit_during_initialization("Failed necessary allocation."); + } else { + _workers->initialize_workers(); + } + + if (ParallelGCThreads > 1) { + _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads); + _safepoint_workers->initialize_workers(); + } + // // Reserve and commit memory for bitmap(s) // - _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size()); - _bitmap_size = align_up(_bitmap_size, bitmap_page_size); + size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size()); + _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size); size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor(); @@ -242,6 +287,10 @@ jint ShenandoahHeap::initialize() { _bitmap_bytes_per_slice, bitmap_page_size); ReservedSpace bitmap(_bitmap_size, bitmap_page_size); + os::trace_page_sizes_for_requested_size("Mark Bitmap", + bitmap_size_orig, bitmap.page_size(), bitmap_page_size, + bitmap.base(), + bitmap.size()); MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); _bitmap_region_special = bitmap.special(); @@ -254,10 +303,14 @@ jint ShenandoahHeap::initialize() { "Cannot commit bitmap memory"); } - _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers); + _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); if (ShenandoahVerify) { ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); + os::trace_page_sizes_for_requested_size("Verify Bitmap", + bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size, + verify_bitmap.base(), + verify_bitmap.size()); if (!verify_bitmap.special()) { os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, "Cannot commit verification bitmap memory"); @@ -269,7 +322,19 @@ jint ShenandoahHeap::initialize() { } // Reserve aux bitmap for use in object_iterate(). We don't commit it here. - ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); + size_t aux_bitmap_page_size = bitmap_page_size; +#ifdef LINUX + // In THP "advise" mode, we refrain from advising the system to use large pages + // since we know these commits will be short lived, and there is no reason to trash + // the THP area with this bitmap. + if (UseTransparentHugePages) { + aux_bitmap_page_size = os::vm_page_size(); + } +#endif + ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size); + os::trace_page_sizes_for_requested_size("Aux Bitmap", + bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size, + aux_bitmap.base(), aux_bitmap.size()); MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); _aux_bitmap_region_special = aux_bitmap.special(); @@ -279,10 +344,14 @@ jint ShenandoahHeap::initialize() { // Create regions and region sets // size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE); - size_t region_storage_size = align_up(region_align * _num_regions, region_page_size); - region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity()); + size_t region_storage_size_orig = region_align * _num_regions; + size_t region_storage_size = align_up(region_storage_size_orig, + MAX2(region_page_size, os::vm_allocation_granularity())); ReservedSpace region_storage(region_storage_size, region_page_size); + os::trace_page_sizes_for_requested_size("Region Storage", + region_storage_size_orig, region_storage.page_size(), region_page_size, + region_storage.base(), region_storage.size()); MemTracker::record_virtual_memory_type(region_storage.base(), mtGC); if (!region_storage.special()) { os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false, @@ -293,16 +362,18 @@ jint ShenandoahHeap::initialize() { // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there. // If not successful, bite a bullet and allocate at whatever address. { - size_t cset_align = MAX2(os::vm_page_size(), os::vm_allocation_granularity()); - size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align); + const size_t cset_align = MAX2(os::vm_page_size(), os::vm_allocation_granularity()); + const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align); + const size_t cset_page_size = os::vm_page_size(); uintptr_t min = round_up_power_of_2(cset_align); uintptr_t max = (1u << 30u); + ReservedSpace cset_rs; for (uintptr_t addr = min; addr <= max; addr <<= 1u) { char* req_addr = (char*)addr; assert(is_aligned(req_addr, cset_align), "Should be aligned"); - ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr); + cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr); if (cset_rs.is_reserved()) { assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr); _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); @@ -311,17 +382,23 @@ jint ShenandoahHeap::initialize() { } if (_collection_set == nullptr) { - ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size()); + cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size()); _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); } + os::trace_page_sizes_for_requested_size("Collection Set", + cset_size, cset_rs.page_size(), cset_page_size, + cset_rs.base(), + cset_rs.size()); } _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); + _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC); _free_set = new ShenandoahFreeSet(this, _num_regions); { ShenandoahHeapLocker locker(lock()); + for (size_t i = 0; i < _num_regions; i++) { HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i; bool is_committed = i < num_committed_regions; @@ -333,12 +410,18 @@ jint ShenandoahHeap::initialize() { _marking_context->initialize_top_at_mark_start(r); _regions[i] = r; assert(!collection_set()->is_in(i), "New region should not be in collection set"); + + _affiliations[i] = ShenandoahAffiliation::FREE; } // Initialize to complete _marking_context->mark_complete(); + size_t young_cset_regions, old_cset_regions; - _free_set->rebuild(); + // We are initializing free set. We ignore cset region tallies. + size_t first_old, last_old, num_old; + _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + _free_set->rebuild(young_cset_regions, old_cset_regions); } if (AlwaysPreTouch) { @@ -400,13 +483,48 @@ jint ShenandoahHeap::initialize() { } _control_thread = new ShenandoahControlThread(); + _regulator_thread = new ShenandoahRegulatorThread(_control_thread); - ShenandoahInitLogger::print(); + print_init_logger(); return JNI_OK; } -void ShenandoahHeap::initialize_mode() { +void ShenandoahHeap::print_init_logger() const { + ShenandoahInitLogger::print(); +} + +size_t ShenandoahHeap::max_size_for(ShenandoahGeneration* generation) const { + switch (generation->type()) { + case YOUNG: + return _generation_sizer.max_young_size(); + case OLD: + return max_capacity() - _generation_sizer.min_young_size(); + case GLOBAL_GEN: + case GLOBAL_NON_GEN: + return max_capacity(); + default: + ShouldNotReachHere(); + return 0; + } +} + +size_t ShenandoahHeap::min_size_for(ShenandoahGeneration* generation) const { + switch (generation->type()) { + case YOUNG: + return _generation_sizer.min_young_size(); + case OLD: + return max_capacity() - _generation_sizer.max_young_size(); + case GLOBAL_GEN: + case GLOBAL_NON_GEN: + return min_capacity(); + default: + ShouldNotReachHere(); + return 0; + } +} + +void ShenandoahHeap::initialize_heuristics_generations() { if (ShenandoahGCMode != nullptr) { if (strcmp(ShenandoahGCMode, "satb") == 0) { _gc_mode = new ShenandoahSATBMode(); @@ -414,6 +532,8 @@ void ShenandoahHeap::initialize_mode() { _gc_mode = new ShenandoahIUMode(); } else if (strcmp(ShenandoahGCMode, "passive") == 0) { _gc_mode = new ShenandoahPassiveMode(); + } else if (strcmp(ShenandoahGCMode, "generational") == 0) { + _gc_mode = new ShenandoahGenerationalMode(); } else { vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option"); } @@ -431,22 +551,26 @@ void ShenandoahHeap::initialize_mode() { err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", _gc_mode->name())); } -} -void ShenandoahHeap::initialize_heuristics() { - assert(_gc_mode != nullptr, "Must be initialized"); - _heuristics = _gc_mode->initialize_heuristics(); + // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity + // for old would be total heap - minimum capacity of young. This means the sum of the maximum + // allowed for old and young could exceed the total heap size. It remains the case that the + // _actual_ capacity of young + old = total. + _generation_sizer.heap_size_changed(max_capacity()); + size_t initial_capacity_young = _generation_sizer.max_young_size(); + size_t max_capacity_young = _generation_sizer.max_young_size(); + size_t initial_capacity_old = max_capacity() - max_capacity_young; + size_t max_capacity_old = max_capacity() - initial_capacity_young; - if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { - vm_exit_during_initialization( - err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", - _heuristics->name())); - } - if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { - vm_exit_during_initialization( - err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", - _heuristics->name())); + _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young); + _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old); + _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, max_capacity(), max_capacity()); + _global_generation->initialize_heuristics(_gc_mode); + if (mode()->is_generational()) { + _young_generation->initialize_heuristics(_gc_mode); + _old_generation->initialize_heuristics(_gc_mode); } + _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational()); } #ifdef _MSC_VER @@ -456,33 +580,48 @@ void ShenandoahHeap::initialize_heuristics() { ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : CollectedHeap(), + _gc_generation(nullptr), _initial_size(0), - _used(0), + _promotion_potential(0), _committed(0), - _bytes_allocated_since_gc_start(0), - _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), + _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)), _workers(nullptr), _safepoint_workers(nullptr), _heap_region_special(false), _num_regions(0), _regions(nullptr), + _affiliations(nullptr), _update_refs_iterator(this), + _promoted_reserve(0), + _old_evac_reserve(0), + _young_evac_reserve(0), + _age_census(nullptr), + _has_evacuation_reserve_quantities(false), + _cancel_requested_time(0), + _young_generation(nullptr), + _global_generation(nullptr), + _old_generation(nullptr), _control_thread(nullptr), + _regulator_thread(nullptr), _shenandoah_policy(policy), - _gc_mode(nullptr), - _heuristics(nullptr), _free_set(nullptr), _pacer(nullptr), _verifier(nullptr), _phase_timings(nullptr), + _evac_tracker(nullptr), + _mmu_tracker(), + _generation_sizer(), _monitoring_support(nullptr), _memory_pool(nullptr), + _young_gen_memory_pool(nullptr), + _old_gen_memory_pool(nullptr), _stw_memory_manager("Shenandoah Pauses"), _cycle_memory_manager("Shenandoah Cycles"), _gc_timer(new ConcurrentGCTimer()), _soft_ref_policy(), _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes), - _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))), + _old_regions_surplus(0), + _old_regions_deficit(0), _marking_context(nullptr), _bitmap_size(0), _bitmap_regions_per_slice(0), @@ -490,60 +629,15 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : _bitmap_region_special(false), _aux_bitmap_region_special(false), _liveness_cache(nullptr), - _collection_set(nullptr) + _collection_set(nullptr), + _card_scan(nullptr) { - // Initialize GC mode early, so we can adjust barrier support - initialize_mode(); - BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this)); - - _max_workers = MAX2(_max_workers, 1U); - _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers); - if (_workers == nullptr) { - vm_exit_during_initialization("Failed necessary allocation."); - } else { - _workers->initialize_workers(); - } - - if (ParallelGCThreads > 1) { - _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", - ParallelGCThreads); - _safepoint_workers->initialize_workers(); - } } #ifdef _MSC_VER #pragma warning( pop ) #endif -class ShenandoahResetBitmapTask : public WorkerTask { -private: - ShenandoahRegionIterator _regions; - -public: - ShenandoahResetBitmapTask() : - WorkerTask("Shenandoah Reset Bitmap") {} - - void work(uint worker_id) { - ShenandoahHeapRegion* region = _regions.next(); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahMarkingContext* const ctx = heap->marking_context(); - while (region != nullptr) { - if (heap->is_bitmap_slice_committed(region)) { - ctx->clear_bitmap(region); - } - region = _regions.next(); - } - } -}; - -void ShenandoahHeap::reset_mark_bitmap() { - assert_gc_workers(_workers->active_workers()); - mark_incomplete_marking_context(); - - ShenandoahResetBitmapTask task; - _workers->run_task(&task); -} - void ShenandoahHeap::print_on(outputStream* st) const { st->print_cr("Shenandoah Heap"); st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", @@ -558,7 +652,8 @@ void ShenandoahHeap::print_on(outputStream* st) const { st->print("Status: "); if (has_forwarded_objects()) st->print("has forwarded objects, "); - if (is_concurrent_mark_in_progress()) st->print("marking, "); + if (is_concurrent_old_mark_in_progress()) st->print("old marking, "); + if (is_concurrent_young_mark_in_progress()) st->print("young marking, "); if (is_evacuation_in_progress()) st->print("evacuating, "); if (is_update_refs_in_progress()) st->print("updating refs, "); if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); @@ -609,6 +704,8 @@ class ShenandoahInitWorkerGCLABClosure : public ThreadClosure { void ShenandoahHeap::post_initialize() { CollectedHeap::post_initialize(); + _mmu_tracker.initialize(); + MutexLocker ml(Threads_lock); ShenandoahInitWorkerGCLABClosure init_gclabs; @@ -622,13 +719,35 @@ void ShenandoahHeap::post_initialize() { _safepoint_workers->set_initialize_gclab(); } - _heuristics->initialize(); - JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers()); } +ShenandoahHeuristics* ShenandoahHeap::heuristics() { + return _global_generation->heuristics(); +} + +ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() { + return (ShenandoahOldHeuristics*) _old_generation->heuristics(); +} + +ShenandoahYoungHeuristics* ShenandoahHeap::young_heuristics() { + return (ShenandoahYoungHeuristics*) _young_generation->heuristics(); +} + +bool ShenandoahHeap::doing_mixed_evacuations() { + return _old_generation->state() == ShenandoahOldGeneration::EVACUATING; +} + +bool ShenandoahHeap::is_old_bitmap_stable() const { + return _old_generation->is_mark_complete(); +} + +bool ShenandoahHeap::is_gc_generation_young() const { + return _gc_generation != nullptr && _gc_generation->is_young(); +} + size_t ShenandoahHeap::used() const { - return Atomic::load(&_used); + return global_generation()->used(); } size_t ShenandoahHeap::committed() const { @@ -645,33 +764,84 @@ void ShenandoahHeap::decrease_committed(size_t bytes) { _committed -= bytes; } -void ShenandoahHeap::increase_used(size_t bytes) { - Atomic::add(&_used, bytes, memory_order_relaxed); +// For tracking usage based on allocations, it should be the case that: +// * The sum of regions::used == heap::used +// * The sum of a generation's regions::used == generation::used +// * The sum of a generation's humongous regions::free == generation::humongous_waste +// These invariants are checked by the verifier on GC safepoints. +// +// Additional notes: +// * When a mutator's allocation request causes a region to be retired, the +// free memory left in that region is considered waste. It does not contribute +// to the usage, but it _does_ contribute to allocation rate. +// * The bottom of a PLAB must be aligned on card size. In some cases this will +// require padding in front of the PLAB (a filler object). Because this padding +// is included in the region's used memory we include the padding in the usage +// accounting as waste. +// * Mutator allocations are used to compute an allocation rate. They are also +// sent to the Pacer for those purposes. +// * There are three sources of waste: +// 1. The padding used to align a PLAB on card size +// 2. Region's free is less than minimum TLAB size and is retired +// 3. The unused portion of memory in the last region of a humongous object +void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) { + size_t actual_bytes = req.actual_size() * HeapWordSize; + size_t wasted_bytes = req.waste() * HeapWordSize; + ShenandoahGeneration* generation = generation_for(req.affiliation()); + + if (req.is_gc_alloc()) { + assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste"); + increase_used(generation, actual_bytes + wasted_bytes); + } else { + assert(req.is_mutator_alloc(), "Expected mutator alloc here"); + // padding and actual size both count towards allocation counter + generation->increase_allocated(actual_bytes + wasted_bytes); + + // only actual size counts toward usage for mutator allocations + increase_used(generation, actual_bytes); + + // notify pacer of both actual size and waste + notify_mutator_alloc_words(req.actual_size(), req.waste()); + + if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) { + increase_humongous_waste(generation,wasted_bytes); + } + } } -void ShenandoahHeap::set_used(size_t bytes) { - Atomic::store(&_used, bytes); +void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { + generation->increase_humongous_waste(bytes); + if (!generation->is_global()) { + global_generation()->increase_humongous_waste(bytes); + } } -void ShenandoahHeap::decrease_used(size_t bytes) { - assert(used() >= bytes, "never decrease heap size by more than we've left"); - Atomic::sub(&_used, bytes, memory_order_relaxed); +void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { + generation->decrease_humongous_waste(bytes); + if (!generation->is_global()) { + global_generation()->decrease_humongous_waste(bytes); + } } -void ShenandoahHeap::increase_allocated(size_t bytes) { - Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); +void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) { + generation->increase_used(bytes); + if (!generation->is_global()) { + global_generation()->increase_used(bytes); + } } -void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { - size_t bytes = words * HeapWordSize; - if (!waste) { - increase_used(bytes); +void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) { + generation->decrease_used(bytes); + if (!generation->is_global()) { + global_generation()->decrease_used(bytes); } - increase_allocated(bytes); +} + +void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) { if (ShenandoahPacing) { control_thread()->pacing_notify_alloc(words); - if (waste) { - pacer()->claim_for_alloc(words, true); + if (waste > 0) { + pacer()->claim_for_alloc(waste, true); } } } @@ -707,12 +877,6 @@ size_t ShenandoahHeap::initial_capacity() const { return _initial_size; } -bool ShenandoahHeap::is_in(const void* p) const { - HeapWord* heap_base = (HeapWord*) base(); - HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); - return p >= heap_base && p < last_region_end; -} - void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) { assert (ShenandoahUncommit, "should be enabled"); @@ -740,6 +904,71 @@ void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) { if (count > 0) { control_thread()->notify_heap_changed(); + regulator_thread()->notify_heap_changed(); + } +} + +void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) { + // Only register the copy of the object that won the evacuation race. + card_scan()->register_object_without_lock(obj); + + // Mark the entire range of the evacuated object as dirty. At next remembered set scan, + // we will clear dirty bits that do not hold interesting pointers. It's more efficient to + // do this in batch, in a background GC thread than to try to carefully dirty only cards + // that hold interesting pointers right now. + card_scan()->mark_range_as_dirty(obj, words); + + if (promotion) { + // This evacuation was a promotion, track this as allocation against old gen + old_generation()->increase_allocated(words * HeapWordSize); + } +} + +void ShenandoahHeap::handle_old_evacuation_failure() { + if (_old_gen_oom_evac.try_set()) { + log_info(gc)("Old gen evac failure."); + } +} + +void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) { + // We squelch excessive reports to reduce noise in logs. + const size_t MaxReportsPerEpoch = 4; + static size_t last_report_epoch = 0; + static size_t epoch_report_count = 0; + + size_t promotion_reserve; + size_t promotion_expended; + + size_t gc_id = control_thread()->get_gc_id(); + + if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) { + { + // Promotion failures should be very rare. Invest in providing useful diagnostic info. + ShenandoahHeapLocker locker(lock()); + promotion_reserve = get_promoted_reserve(); + promotion_expended = get_promoted_expended(); + } + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining(); + const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled"; + ShenandoahGeneration* old_gen = old_generation(); + size_t old_capacity = old_gen->max_capacity(); + size_t old_usage = old_gen->used(); + size_t old_free_regions = old_gen->free_unaffiliated_regions(); + + log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT + ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT + ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT, + size * HeapWordSize, plab == nullptr? "no": "yes", + words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended, + old_capacity, old_usage, old_free_regions); + + if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) { + log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch"); + } else if (gc_id != last_report_epoch) { + last_report_epoch = gc_id; + epoch_report_count = 1; + } } } @@ -749,6 +978,14 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively. size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2; + + // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size. This enables more equitable distribution of + // available evacuation buidget between the many threads that are coordinating in the evacuation effort. + if (ShenandoahMaxEvacLABRatio > 0) { + log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio); + new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio); + } + new_size = MIN2(new_size, PLAB::max_size()); new_size = MAX2(new_size, PLAB::min_size()); @@ -760,6 +997,7 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) if (new_size < size) { // New size still does not fit the object. Fall back to shared allocation. // This avoids retiring perfectly good GCLABs, when we encounter a large object. + log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size); return nullptr; } @@ -792,11 +1030,257 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) return gclab->allocate(size); } +// Establish a new PLAB and allocate size HeapWords within it. +HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { + // New object should fit the PLAB size + size_t min_size = MAX2(size, PLAB::min_size()); + + // Figure out size of new PLAB, looking back at heuristics. Expand aggressively. + size_t cur_size = ShenandoahThreadLocalData::plab_size(thread); + if (cur_size == 0) { + cur_size = PLAB::min_size(); + } + size_t future_size = cur_size * 2; + // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size. This enables more equitable distribution of + // available evacuation buidget between the many threads that are coordinating in the evacuation effort. + if (ShenandoahMaxEvacLABRatio > 0) { + future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio); + } + future_size = MIN2(future_size, PLAB::max_size()); + future_size = MAX2(future_size, PLAB::min_size()); + + size_t unalignment = future_size % CardTable::card_size_in_words(); + if (unalignment != 0) { + future_size = future_size - unalignment + CardTable::card_size_in_words(); + } + + // Record new heuristic value even if we take any shortcut. This captures + // the case when moderately-sized objects always take a shortcut. At some point, + // heuristics should catch up with them. Note that the requested cur_size may + // not be honored, but we remember that this is the preferred size. + ShenandoahThreadLocalData::set_plab_size(thread, future_size); + if (cur_size < size) { + // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. + // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. + return nullptr; + } + + // Retire current PLAB, and allocate a new one. + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + if (plab->words_remaining() < PLAB::min_size()) { + // Retire current PLAB, and allocate a new one. + // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock. This + // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is + // aligned with the start of a card's memory range. + retire_plab(plab, thread); + + size_t actual_size = 0; + // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is + // less than the remaining evacuation need. It also adjusts plab_preallocated and expend_promoted if appropriate. + HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); + if (plab_buf == nullptr) { + if (min_size == PLAB::min_size()) { + // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size. This allows us + // to fail faster on subsequent promotion attempts. + ShenandoahThreadLocalData::disable_plab_promotions(thread); + } + return NULL; + } else { + ShenandoahThreadLocalData::enable_plab_retries(thread); + } + assert (size <= actual_size, "allocation should fit"); + if (ZeroTLAB) { + // ..and clear it. + Copy::zero_to_words(plab_buf, actual_size); + } else { + // ...and zap just allocated object. +#ifdef ASSERT + // Skip mangling the space corresponding to the object header to + // ensure that the returned space is not considered parsable by + // any concurrent GC thread. + size_t hdr_size = oopDesc::header_size(); + Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); +#endif // ASSERT + } + plab->set_buf(plab_buf, actual_size); + if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { + return nullptr; + } + return plab->allocate(size); + } else { + // If there's still at least min_size() words available within the current plab, don't retire it. Let's gnaw + // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request + // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we + // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs. + return nullptr; + } +} + +// TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within +// this plab at the time we retire the plab. A tight registration loop will run within both code and data caches. This change +// would allow smaller and faster in-line implementation of alloc_from_plab(). Since plabs are aligned on card-table boundaries, +// this object registration loop can be performed without acquiring a lock. +void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) { + // We don't enforce limits on plab_evacuated. We let it consume all available old-gen memory in order to reduce + // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion + // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any + // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle. + + // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to + // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions. + // 1. Some of the plab may have been dedicated to evacuations. + // 2. Some of the plab may have been abandoned due to waste (at the end of the plab). + size_t not_promoted = + ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread); + ShenandoahThreadLocalData::reset_plab_promoted(thread); + ShenandoahThreadLocalData::reset_plab_evacuated(thread); + ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0); + if (not_promoted > 0) { + unexpend_promoted(not_promoted); + } + size_t waste = plab->waste(); + HeapWord* top = plab->top(); + plab->retire(); + if (top != nullptr && plab->waste() > waste && is_in_old(top)) { + // If retiring the plab created a filler object, then we + // need to register it with our card scanner so it can + // safely walk the region backing the plab. + log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT, + plab->waste() - waste, p2i(top)); + card_scan()->register_object_without_lock(top); + } +} + +void ShenandoahHeap::retire_plab(PLAB* plab) { + Thread* thread = Thread::current(); + retire_plab(plab, thread); +} + +void ShenandoahHeap::cancel_old_gc() { + shenandoah_assert_safepoint(); + assert(_old_generation != nullptr, "Should only have mixed collections in generation mode."); + if (_old_generation->state() == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP) { + assert(!old_generation()->is_concurrent_mark_in_progress(), "Cannot be marking in IDLE"); + assert(!old_heuristics()->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE"); + assert(!old_heuristics()->unprocessed_old_collection_candidates(), "Cannot have mixed collection candidates in IDLE"); + assert(!young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE"); + } else { + log_info(gc)("Terminating old gc cycle."); + // Stop marking + old_generation()->cancel_marking(); + // Stop tracking old regions + old_heuristics()->abandon_collection_candidates(); + // Remove old generation access to young generation mark queues + young_generation()->set_old_gen_task_queues(nullptr); + // Transition to IDLE now. + _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); + } +} + +// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations +// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to +// xfer_limit, and any excess is transferred to the young generation. +// xfer_limit is the maximum we're able to transfer from young to old. +void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( + size_t xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { + + // We can limit the old reserve to the size of anticipated promotions: + // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, + // clamped by the old generation space available. + // + // Here's the algebra. + // Let SOEP = ShenandoahOldEvacRatioPercent, + // OE = old evac, + // YE = young evac, and + // TE = total evac = OE + YE + // By definition: + // SOEP/100 = OE/TE + // = OE/(OE+YE) + // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) + // = OE/YE + // => OE = YE*SOEP/(100-SOEP) + + // We have to be careful in the event that SOEP is set to 100 by the user. + assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); + const size_t old_available = old_generation()->available(); + // The free set will reserve this amount of memory to hold young evacuations + const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? + old_available : MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + old_available); + + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + // Decide how much old space we should reserve for a mixed collection + size_t reserve_for_mixed = 0; + const size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates(); + const bool doing_mixed = (mixed_candidates > 0); + if (doing_mixed) { + // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we + // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. + size_t max_evac_need = (size_t) + (old_heuristics()->unprocessed_old_collection_candidates_live_memory() * ShenandoahOldEvacWaste); + assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, + "Unaffiliated available must be less than total available"); + size_t old_fragmented_available = + old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes; + reserve_for_mixed = max_evac_need + old_fragmented_available; + if (reserve_for_mixed > max_old_reserve) { + reserve_for_mixed = max_old_reserve; + } + } + + // Decide how much space we should reserve for promotions from young + size_t reserve_for_promo = 0; + const size_t promo_load = get_promotion_potential(); + const bool doing_promotions = promo_load > 0; + if (doing_promotions) { + // We're promoting and have a bound on the maximum amount that can be promoted + const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; + reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); + } + + // This is the total old we want to ideally reserve + const size_t old_reserve = reserve_for_mixed + reserve_for_promo; + assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); + + // We now check if the old generation is running a surplus or a deficit. + size_t old_region_deficit = 0; + size_t old_region_surplus = 0; + + const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; + if (max_old_available >= old_reserve) { + // We are running a surplus, so the old region surplus can go to young + const size_t old_surplus = max_old_available - old_reserve; + old_region_surplus = old_surplus / region_size_bytes; + const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; + old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); + } else { + // We are running a deficit which we'd like to fill from young. + // Ignore that this will directly impact young_generation()->max_capacity(), + // indirectly impacting young_reserve and old_reserve. These computations are conservative. + const size_t old_need = old_reserve - max_old_available; + // The old region deficit (rounded up) will come from young + old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes; + + // Round down the regions we can transfer from young to old. If we're running short + // on young-gen memory, we restrict the xfer. Old-gen collection activities will be + // curtailed if the budget is restricted. + const size_t max_old_region_xfer = xfer_limit / region_size_bytes; + old_region_deficit = MIN2(old_region_deficit, max_old_region_xfer); + } + assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); + + set_old_region_surplus(old_region_surplus); + set_old_region_deficit(old_region_deficit); +} + +// Called from stubs in JIT code or interpreter HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size); - HeapWord* res = allocate_memory(req); + HeapWord* res = allocate_memory(req, false); if (res != nullptr) { *actual_size = req.actual_size(); } else { @@ -809,7 +1293,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size) { ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); - HeapWord* res = allocate_memory(req); + HeapWord* res = allocate_memory(req, false); if (res != nullptr) { *actual_size = req.actual_size(); } else { @@ -818,7 +1302,29 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, return res; } -HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { +HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size, + size_t word_size, + size_t* actual_size) { + // Align requested sizes to card sized multiples + size_t words_in_card = CardTable::card_size_in_words(); + size_t align_mask = ~(words_in_card - 1); + min_size = (min_size + words_in_card - 1) & align_mask; + word_size = (word_size + words_in_card - 1) & align_mask; + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); + // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread + // if we are at risk of infringing on the old-gen evacuation budget. + HeapWord* res = allocate_memory(req, false); + if (res != nullptr) { + *actual_size = req.actual_size(); + } else { + *actual_size = 0; + } + return res; +} + +// is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation +// to old-gen. plab allocates are not known as such, since they may hold old-gen evacuations. +HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) { intptr_t pacer_epoch = 0; bool in_new_region = false; HeapWord* result = nullptr; @@ -830,7 +1336,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { } if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { - result = allocate_memory_under_lock(req, in_new_region); + result = allocate_memory_under_lock(req, in_new_region, is_promotion); } // Allocation failed, block until control thread reacted, then retry allocation. @@ -844,19 +1350,29 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { while (result == nullptr && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) { control_thread()->handle_alloc_failure(req); - result = allocate_memory_under_lock(req, in_new_region); + result = allocate_memory_under_lock(req, in_new_region, is_promotion); } + } else { assert(req.is_gc_alloc(), "Can only accept GC allocs here"); - result = allocate_memory_under_lock(req, in_new_region); + result = allocate_memory_under_lock(req, in_new_region, is_promotion); // Do not call handle_alloc_failure() here, because we cannot block. // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac(). } if (in_new_region) { control_thread()->notify_heap_changed(); + regulator_thread()->notify_heap_changed(); } + if (result == nullptr) { + req.set_actual_size(0); + } + + // This is called regardless of the outcome of the allocation to account + // for any waste created by retiring regions with this request. + increase_used(req); + if (result != nullptr) { size_t requested = req.size(); size_t actual = req.actual_size(); @@ -866,31 +1382,206 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual); if (req.is_mutator_alloc()) { - notify_mutator_alloc_words(actual, false); - // If we requested more than we were granted, give the rest back to pacer. // This only matters if we are in the same pacing epoch: do not try to unpace // over the budget for the other phase. if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { pacer()->unpace_for_alloc(pacer_epoch, requested - actual); } - } else { - increase_used(actual*HeapWordSize); } } return result; } -HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { - ShenandoahHeapLocker locker(lock()); - return _free_set->allocate(req, in_new_region); +HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) { + bool try_smaller_lab_size = false; + size_t smaller_lab_size; + { + // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions. + bool promotion_eligible = false; + bool allow_allocation = true; + bool plab_alloc = false; + size_t requested_bytes = req.size() * HeapWordSize; + HeapWord* result = nullptr; + ShenandoahHeapLocker locker(lock()); + Thread* thread = Thread::current(); + + if (mode()->is_generational()) { + if (req.affiliation() == YOUNG_GENERATION) { + if (req.is_mutator_alloc()) { + size_t young_words_available = young_generation()->available() / HeapWordSize; + if (req.is_lab_alloc() && (req.min_size() < young_words_available)) { + // Allow ourselves to try a smaller lab size even if requested_bytes <= young_available. We may need a smaller + // lab size because young memory has become too fragmented. + try_smaller_lab_size = true; + smaller_lab_size = (young_words_available < req.size())? young_words_available: req.size(); + } else if (req.size() > young_words_available) { + // Can't allocate because even min_size() is larger than remaining young_available + log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT + ", young words available: " SIZE_FORMAT, req.type_string(), + HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_words_available); + return nullptr; + } + } + } else { // reg.affiliation() == OLD_GENERATION + assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory"); + if (req.type() == ShenandoahAllocRequest::_alloc_plab) { + plab_alloc = true; + size_t promotion_avail = get_promoted_reserve(); + size_t promotion_expended = get_promoted_expended(); + if (promotion_expended + requested_bytes > promotion_avail) { + promotion_avail = 0; + if (get_old_evac_reserve() == 0) { + // There are no old-gen evacuations in this pass. There's no value in creating a plab that cannot + // be used for promotions. + allow_allocation = false; + } + } else { + promotion_avail = promotion_avail - (promotion_expended + requested_bytes); + promotion_eligible = true; + } + } else if (is_promotion) { + // This is a shared alloc for promotion + size_t promotion_avail = get_promoted_reserve(); + size_t promotion_expended = get_promoted_expended(); + if (promotion_expended + requested_bytes > promotion_avail) { + promotion_avail = 0; + } else { + promotion_avail = promotion_avail - (promotion_expended + requested_bytes); + } + if (promotion_avail == 0) { + // We need to reserve the remaining memory for evacuation. Reject this allocation. The object will be + // evacuated to young-gen memory and promoted during a future GC pass. + return nullptr; + } + // Else, we'll allow the allocation to proceed. (Since we hold heap lock, the tested condition remains true.) + } else { + // This is a shared allocation for evacuation. Memory has already been reserved for this purpose. + } + } + } // This ends the is_generational() block + + // First try the original request. If TLAB request size is greater than available, allocate() will attempt to downsize + // request to fit within available memory. + result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr; + if (result != nullptr) { + if (req.is_old()) { + ShenandoahThreadLocalData::reset_plab_promoted(thread); + if (req.is_gc_alloc()) { + bool disable_plab_promotions = false; + if (req.type() == ShenandoahAllocRequest::_alloc_plab) { + if (promotion_eligible) { + size_t actual_size = req.actual_size() * HeapWordSize; + // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries). + // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread. + if (get_promoted_expended() + actual_size <= get_promoted_reserve()) { + // Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach. + // When we retire this plab, we'll unexpend what we don't really use. + ShenandoahThreadLocalData::enable_plab_promotions(thread); + expend_promoted(actual_size); + assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted"); + ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size); + } else { + disable_plab_promotions = true; + } + } else { + disable_plab_promotions = true; + } + if (disable_plab_promotions) { + // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations. + ShenandoahThreadLocalData::disable_plab_promotions(thread); + ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0); + } + } else if (is_promotion) { + // Shared promotion. Assume size is requested_bytes. + expend_promoted(requested_bytes); + assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted"); + } + } + + // Register the newly allocated object while we're holding the global lock since there's no synchronization + // built in to the implementation of register_object(). There are potential races when multiple independent + // threads are allocating objects, some of which might span the same card region. For example, consider + // a card table's memory region within which three objects are being allocated by three different threads: + // + // objects being "concurrently" allocated: + // [-----a------][-----b-----][--------------c------------------] + // [---- card table memory range --------------] + // + // Before any objects are allocated, this card's memory range holds no objects. Note that allocation of object a + // wants to set the starts-object, first-start, and last-start attributes of the preceding card region. + // allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region. + // allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this + // card region. + // + // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as + // last-start representing object b while first-start represents object c. This is why we need to require all + // register_object() invocations to be "mutually exclusive" with respect to each card's memory range. + ShenandoahHeap::heap()->card_scan()->register_object(result); + } + } else { + // The allocation failed. If this was a plab allocation, We've already retired it and no longer have a plab. + if (req.is_old() && req.is_gc_alloc() && (req.type() == ShenandoahAllocRequest::_alloc_plab)) { + // We don't need to disable PLAB promotions because there is no PLAB. We leave promotions enabled because + // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size. + ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0); + } + } + if ((result != nullptr) || !try_smaller_lab_size) { + return result; + } + // else, fall through to try_smaller_lab_size + } // This closes the block that holds the heap lock, releasing the lock. + + // We failed to allocate the originally requested lab size. Let's see if we can allocate a smaller lab size. + if (req.size() == smaller_lab_size) { + // If we were already trying to allocate min size, no value in attempting to repeat the same. End the recursion. + return nullptr; + } + + // We arrive here if the tlab allocation request can be resized to fit within young_available + assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() && + (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations"); + + // By convention, ShenandoahAllocationRequest is primarily read-only. The only mutable instance data is represented by + // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied. We use a + // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument. + // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable + // fields remain constant. The original TLAB allocation request was for memory that exceeded the current capacity. We'll + // attempt to allocate a smaller TLAB. If this is successful, we'll update actual_size() of our incoming + // ShenandoahAllocRequest. If the recursive request fails, we'll simply return nullptr. + + // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive + // call reacquires the lock. If that happens, we will need another recursive call to further reduce the size of our request + // for each time another thread allocates young memory during the brief intervals that the heap lock is available to + // interfering threads. We expect this interference to be rare. The recursion bottoms out when young_available is + // smaller than req.min_size(). The inner-nested call to allocate_memory_under_lock() uses the same min_size() value + // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most + // recently saw as the memory currently available within the young generation. + + // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration. We need at most one + // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration + // of the loop required for each time the existing solution would recurse. An iterative solution would be more efficient + // in CPU time and stack memory utilization. The expectation is that it is very rare that we would recurse more than once + // so making this change is not currently seen as a high priority. + + ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size); + + // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to + // satisfy the allocation request. The reality is the actual TLAB size is likely to be even smaller, because it will + // depend on how much memory is available within mutator regions that are not yet fully used. + HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion); + if (result != nullptr) { + req.set_actual_size(smaller_req.actual_size()); + } + return result; } HeapWord* ShenandoahHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); - return allocate_memory(req); + return allocate_memory(req, false); } MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, @@ -899,8 +1590,8 @@ MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo MetaWord* result; // Inform metaspace OOM to GC heuristics if class unloading is possible. - if (heuristics()->can_unload_classes()) { - ShenandoahHeuristics* h = heuristics(); + ShenandoahHeuristics* h = global_generation()->heuristics(); + if (h->can_unload_classes()) { h->record_metaspace_oom(); } @@ -979,11 +1670,102 @@ class ShenandoahEvacuationTask : public WorkerTask { ShenandoahHeapRegion* r; while ((r =_cs->claim_next()) != nullptr) { assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index()); + _sh->marked_object_iterate(r, &cl); if (ShenandoahPacing) { _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); } + if (_sh->check_cancelled_gc_and_yield(_concurrent)) { + break; + } + } + } +}; + +// Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set. +// This is needed in order to promote humongous start regions if age() >= tenure threshold. +class ShenandoahGenerationalEvacuationTask : public WorkerTask { +private: + ShenandoahHeap* const _sh; + ShenandoahRegionIterator *_regions; + bool _concurrent; + uint _tenuring_threshold; + +public: + ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh, + ShenandoahRegionIterator* iterator, + bool concurrent) : + WorkerTask("Shenandoah Evacuation"), + _sh(sh), + _regions(iterator), + _concurrent(concurrent), + _tenuring_threshold(0) + { + if (_sh->mode()->is_generational()) { + _tenuring_threshold = _sh->age_census()->tenuring_threshold(); + } + } + + void work(uint worker_id) { + if (_concurrent) { + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj; + ShenandoahEvacOOMScope oom_evac_scope; + do_work(); + } else { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahEvacOOMScope oom_evac_scope; + do_work(); + } + } + +private: + void do_work() { + ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); + ShenandoahHeapRegion* r; + ShenandoahMarkingContext* const ctx = ShenandoahHeap::heap()->marking_context(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100; + while ((r = _regions->next()) != nullptr) { + log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]", + r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(), + r->is_active()? "active": "inactive", + r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular", + r->is_cset()? "cset": "not-cset"); + + if (r->is_cset()) { + assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index()); + _sh->marked_object_iterate(r, &cl); + if (ShenandoahPacing) { + _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); + } + } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) { + HeapWord* tams = ctx->top_at_mark_start(r); + if (r->is_humongous_start()) { + // We promote humongous_start regions along with their affiliated continuations during evacuation rather than + // doing this work during a safepoint. We cannot put humongous regions into the collection set because that + // triggers the load-reference barrier (LRB) to copy on reference fetch. + r->promote_humongous(); + } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) { + assert(r->garbage_before_padded_for_promote() < old_garbage_threshold, + "Region " SIZE_FORMAT " has too much garbage for promotion", r->index()); + assert(r->get_top_before_promote() == tams, + "Region " SIZE_FORMAT " has been used for allocations before promotion", r->index()); + // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger + // the LRB to copy on reference fetch. + r->promote_in_place(); + } + // Aged humongous continuation regions are handled with their start region. If an aged regular region has + // more garbage than ShenandoahOldGarbageTrheshold, we'll promote by evacuation. If there is room for evacuation + // in this cycle, the region will be in the collection set. If there is not room, the region will be promoted + // by evacuation in some future GC cycle. + + // If an aged regular region has received allocations during the current cycle, we do not promote because the + // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle. + } + // else, region is free, or OLD, or not in collection set, or humongous_continuation, + // or is young humongous_start that is too young to be promoted if (_sh->check_cancelled_gc_and_yield(_concurrent)) { break; @@ -993,8 +1775,14 @@ class ShenandoahEvacuationTask : public WorkerTask { }; void ShenandoahHeap::evacuate_collection_set(bool concurrent) { - ShenandoahEvacuationTask task(this, _collection_set, concurrent); - workers()->run_task(&task); + if (ShenandoahHeap::heap()->mode()->is_generational()) { + ShenandoahRegionIterator regions; + ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent); + workers()->run_task(&task); + } else { + ShenandoahEvacuationTask task(this, _collection_set, concurrent); + workers()->run_task(&task); + } } void ShenandoahHeap::trash_cset_regions() { @@ -1024,7 +1812,7 @@ void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { } } -void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { +size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { assert(start->is_humongous_start(), "reclaim regions starting with the first one"); oop humongous_obj = cast_to_oop(start->bottom()); @@ -1044,6 +1832,7 @@ void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { region->make_trash_immediate(); } + return required_regions; } class ShenandoahCheckCleanGCLABClosure : public ThreadClosure { @@ -1053,6 +1842,10 @@ class ShenandoahCheckCleanGCLABClosure : public ThreadClosure { PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name()); assert(gclab->words_remaining() == 0, "GCLAB should not need retirement"); + + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); + assert(plab->words_remaining() == 0, "PLAB should not need retirement"); } }; @@ -1068,6 +1861,17 @@ class ShenandoahRetireGCLABClosure : public ThreadClosure { if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) { ShenandoahThreadLocalData::set_gclab_size(thread, 0); } + + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); + + // There are two reasons to retire all plabs between old-gen evacuation passes. + // 1. We need to make the plab memory parsable by remembered-set scanning. + // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region + ShenandoahHeap::heap()->retire_plab(plab, thread); + if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) { + ShenandoahThreadLocalData::set_plab_size(thread, 0); + } } }; @@ -1127,9 +1931,13 @@ void ShenandoahHeap::gclabs_retire(bool resize) { // Returns size in bytes size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { - // Return the max allowed size, and let the allocation path - // figure out the safe size for current allocation. - return ShenandoahHeapRegion::max_tlab_size_bytes(); + if (mode()->is_generational()) { + return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available()); + } else { + // Return the max allowed size, and let the allocation path + // figure out the safe size for current allocation. + return ShenandoahHeapRegion::max_tlab_size_bytes(); + } } size_t ShenandoahHeap::max_tlab_size() const { @@ -1169,7 +1977,12 @@ void ShenandoahHeap::prepare_for_verify() { } void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { + if (_shenandoah_policy->is_at_shutdown()) { + return; + } + tcl->do_thread(_control_thread); + tcl->do_thread(_regulator_thread); workers()->threads_do(tcl); if (_safepoint_workers != nullptr) { _safepoint_workers->threads_do(tcl); @@ -1189,11 +2002,33 @@ void ShenandoahHeap::print_tracing_info() const { shenandoah_policy()->print_gc_stats(&ls); + ls.cr(); + + evac_tracker()->print_global_on(&ls); + ls.cr(); ls.cr(); } } +void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) { + set_gc_cause(cause); + set_gc_generation(generation); + + shenandoah_policy()->record_cycle_start(); + generation->heuristics()->record_cycle_start(); +} + +void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) { + generation->heuristics()->record_cycle_end(); + if (mode()->is_generational() && generation->is_global()) { + // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well + young_generation()->heuristics()->record_cycle_end(); + old_generation()->heuristics()->record_cycle_end(); + } + set_gc_cause(GCCause::_no_gc); +} + void ShenandoahHeap::verify(VerifyOption vo) { if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { if (ShenandoahVerify) { @@ -1515,29 +2350,6 @@ void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* b } } -class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { -private: - ShenandoahMarkingContext* const _ctx; -public: - ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} - - void heap_region_do(ShenandoahHeapRegion* r) { - assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); - if (r->is_active()) { - // Check if region needs updating its TAMS. We have updated it already during concurrent - // reset, so it is very likely we don't need to do another write here. - if (_ctx->top_at_mark_start(r) != r->top()) { - _ctx->capture_top_at_mark_start(r); - } - } else { - assert(_ctx->top_at_mark_start(r) == r->top(), - "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); - } - } - - bool is_thread_safe() { return true; } -}; - class ShenandoahRendezvousClosure : public HandshakeClosure { public: inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {} @@ -1553,105 +2365,6 @@ void ShenandoahHeap::recycle_trash() { free_set()->recycle_trash(); } -class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { -private: - ShenandoahMarkingContext* const _ctx; -public: - ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} - - void heap_region_do(ShenandoahHeapRegion* r) { - if (r->is_active()) { - // Reset live data and set TAMS optimistically. We would recheck these under the pause - // anyway to capture any updates that happened since now. - r->clear_live_data(); - _ctx->capture_top_at_mark_start(r); - } - } - - bool is_thread_safe() { return true; } -}; - -void ShenandoahHeap::prepare_gc() { - reset_mark_bitmap(); - - ShenandoahResetUpdateRegionStateClosure cl; - parallel_heap_region_iterate(&cl); -} - -class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { -private: - ShenandoahMarkingContext* const _ctx; - ShenandoahHeapLock* const _lock; - -public: - ShenandoahFinalMarkUpdateRegionStateClosure() : - _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {} - - void heap_region_do(ShenandoahHeapRegion* r) { - if (r->is_active()) { - // All allocations past TAMS are implicitly live, adjust the region data. - // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. - HeapWord *tams = _ctx->top_at_mark_start(r); - HeapWord *top = r->top(); - if (top > tams) { - r->increase_live_data_alloc_words(pointer_delta(top, tams)); - } - - // We are about to select the collection set, make sure it knows about - // current pinning status. Also, this allows trashing more regions that - // now have their pinning status dropped. - if (r->is_pinned()) { - if (r->pin_count() == 0) { - ShenandoahHeapLocker locker(_lock); - r->make_unpinned(); - } - } else { - if (r->pin_count() > 0) { - ShenandoahHeapLocker locker(_lock); - r->make_pinned(); - } - } - - // Remember limit for updating refs. It's guaranteed that we get no - // from-space-refs written from here on. - r->set_update_watermark_at_safepoint(r->top()); - } else { - assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); - assert(_ctx->top_at_mark_start(r) == r->top(), - "Region " SIZE_FORMAT " should have correct TAMS", r->index()); - } - } - - bool is_thread_safe() { return true; } -}; - -void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) { - assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); - { - ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : - ShenandoahPhaseTimings::degen_gc_final_update_region_states); - ShenandoahFinalMarkUpdateRegionStateClosure cl; - parallel_heap_region_iterate(&cl); - - assert_pinned_region_status(); - } - - { - ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : - ShenandoahPhaseTimings::degen_gc_choose_cset); - ShenandoahHeapLocker locker(lock()); - _collection_set->clear(); - heuristics()->choose_collection_set(_collection_set); - } - - { - ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : - ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); - ShenandoahHeapLocker locker(lock()); - _free_set->rebuild(); - } -} - void ShenandoahHeap::do_class_unloading() { _unloader.unload(); } @@ -1662,7 +2375,7 @@ void ShenandoahHeap::stw_weak_refs(bool full_gc) { : ShenandoahPhaseTimings::degen_gc_weakrefs; ShenandoahTimingsTracker t(phase); ShenandoahGCWorkerPhase worker_phase(phase); - ref_processor()->process_references(phase, workers(), false /* concurrent */); + active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */); } void ShenandoahHeap::prepare_update_heap_references(bool concurrent) { @@ -1693,10 +2406,66 @@ void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { set_gc_state_all_threads(_gc_state.raw_value()); } -void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { - assert(!has_forwarded_objects(), "Not expected before/after mark phase"); - set_gc_state_mask(MARKING, in_progress); - ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); +void ShenandoahHeap::set_evacuation_reserve_quantities(bool is_valid) { + _has_evacuation_reserve_quantities = is_valid; +} + +void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) { + uint mask; + assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation"); + if (!in_progress && is_concurrent_old_mark_in_progress()) { + assert(mode()->is_generational(), "Only generational GC has old marking"); + assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING"); + // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on + mask = YOUNG_MARKING; + } else { + mask = MARKING | YOUNG_MARKING; + } + set_gc_state_mask(mask, in_progress); + manage_satb_barrier(in_progress); +} + +void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) { +#ifdef ASSERT + // has_forwarded_objects() iff UPDATEREFS or EVACUATION + bool has_forwarded = has_forwarded_objects(); + bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION); + bool evacuating = _gc_state.is_set(EVACUATION); + assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()), + "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding"); +#endif + if (!in_progress && is_concurrent_young_mark_in_progress()) { + // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on + assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING"); + set_gc_state_mask(OLD_MARKING, in_progress); + } else { + set_gc_state_mask(MARKING | OLD_MARKING, in_progress); + } + manage_satb_barrier(in_progress); +} + +bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const { + return old_generation()->state() == ShenandoahOldGeneration::FILLING; +} + +void ShenandoahHeap::set_aging_cycle(bool in_progress) { + _is_aging_cycle.set_cond(in_progress); +} + +void ShenandoahHeap::manage_satb_barrier(bool active) { + if (is_concurrent_mark_in_progress()) { + // Ignore request to deactivate barrier while concurrent mark is in progress. + // Do not attempt to re-activate the barrier if it is already active. + if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) { + ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active); + } + } else { + // No concurrent marking is in progress so honor request to deactivate, + // but only if the barrier is already active. + if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) { + ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active); + } + } } void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { @@ -1729,11 +2498,20 @@ bool ShenandoahHeap::try_cancel_gc() { return prev == CANCELLABLE; } +void ShenandoahHeap::cancel_concurrent_mark() { + _young_generation->cancel_marking(); + _old_generation->cancel_marking(); + _global_generation->cancel_marking(); + + ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); +} + void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { if (try_cancel_gc()) { FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); log_info(gc)("%s", msg.buffer()); Events::log(Thread::current(), "%s", msg.buffer()); + _cancel_requested_time = os::elapsedTime(); } } @@ -1744,18 +2522,21 @@ uint ShenandoahHeap::max_workers() { void ShenandoahHeap::stop() { // The shutdown sequence should be able to terminate when GC is running. - // Step 0. Notify policy to disable event recording. + // Step 1. Notify policy to disable event recording and prevent visiting gc threads during shutdown _shenandoah_policy->record_shutdown(); - // Step 1. Notify control thread that we are in shutdown. + // Step 2. Stop requesting collections. + regulator_thread()->stop(); + + // Step 3. Notify control thread that we are in shutdown. // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. control_thread()->prepare_for_graceful_shutdown(); - // Step 2. Notify GC workers that we are cancelling GC. + // Step 4. Notify GC workers that we are cancelling GC. cancel_gc(GCCause::_shenandoah_stop_vm); - // Step 3. Wait until GC worker exits normally. + // Step 5. Wait until GC worker exits normally. control_thread()->stop(); } @@ -1849,12 +2630,13 @@ address ShenandoahHeap::in_cset_fast_test_addr() { return (address) heap->collection_set()->biased_map_address(); } -size_t ShenandoahHeap::bytes_allocated_since_gc_start() { - return Atomic::load(&_bytes_allocated_since_gc_start); -} - void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { - Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); + if (mode()->is_generational()) { + young_generation()->reset_bytes_allocated_since_gc_start(); + old_generation()->reset_bytes_allocated_since_gc_start(); + } + + global_generation()->reset_bytes_allocated_since_gc_start(); } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { @@ -1918,8 +2700,10 @@ void ShenandoahHeap::sync_pinned_region_status() { void ShenandoahHeap::assert_pinned_region_status() { for (size_t i = 0; i < num_regions(); i++) { ShenandoahHeapRegion* r = get_region(i); - assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), - "Region " SIZE_FORMAT " pinning status is inconsistent", i); + if (active_generation()->contains(r)) { + assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), + "Region " SIZE_FORMAT " pinning status is inconsistent", i); + } } } #endif @@ -1979,37 +2763,89 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask { private: ShenandoahHeap* _heap; ShenandoahRegionIterator* _regions; + ShenandoahRegionChunkIterator* _work_chunks; + public: - ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) : + explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, + ShenandoahRegionChunkIterator* work_chunks) : WorkerTask("Shenandoah Update References"), _heap(ShenandoahHeap::heap()), - _regions(regions) { + _regions(regions), + _work_chunks(work_chunks) + { + log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(_heap->is_old_bitmap_stable())); } void work(uint worker_id) { if (CONCURRENT) { ShenandoahConcurrentWorkerSession worker_session(worker_id); ShenandoahSuspendibleThreadSetJoiner stsj; - do_work(); + do_work(worker_id); } else { ShenandoahParallelWorkerSession worker_session(worker_id); - do_work(); + do_work(worker_id); } } private: template - void do_work() { + void do_work(uint worker_id) { T cl; + if (CONCURRENT && (worker_id == 0)) { + // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the + // results of evacuation. These reserves are no longer necessary because evacuation has completed. + size_t cset_regions = _heap->collection_set()->count(); + // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because + // we need the reclaimed collection set regions to replenish the collector reserves + _heap->free_set()->move_collector_sets_to_mutator(cset_regions); + } + // If !CONCURRENT, there's no value in expanding Mutator free set + ShenandoahHeapRegion* r = _regions->next(); - ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); + // We update references for global, old, and young collections. + assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking"); + ShenandoahMarkingContext* const ctx = _heap->marking_context(); + bool is_mixed = _heap->collection_set()->has_old_regions(); while (r != nullptr) { HeapWord* update_watermark = r->get_update_watermark(); assert (update_watermark >= r->bottom(), "sanity"); + + log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index()); + bool region_progress = false; if (r->is_active() && !r->is_cset()) { - _heap->marked_object_oop_iterate(r, &cl, update_watermark); + if (!_heap->mode()->is_generational() || r->is_young()) { + _heap->marked_object_oop_iterate(r, &cl, update_watermark); + region_progress = true; + } else if (r->is_old()) { + if (_heap->active_generation()->is_global()) { + // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles. This is because + // concurrent GC threads are parceled out entire heap regions of work at a time and there + // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller + // and more easily distributed more fairly across threads. + + // TODO: Consider an improvement to load balance GLOBAL GC. + _heap->marked_object_oop_iterate(r, &cl, update_watermark); + region_progress = true; + } + // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below. + // Don't bother to report pacing progress in this case. + } else { + // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions + // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's + // active status may propagate at a different speed than the changing of the region's affiliation. + + // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen + // by this thread before the region's affiliation() is seen by this thread. + + // It's ok for this race to occur because the newly transformed region does not have any references to be + // updated. + + assert(r->get_update_watermark() == r->bottom(), + "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE", + r->affiliation_name(), r->index()); + } } - if (ShenandoahPacing) { + if (region_progress && ShenandoahPacing) { _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom())); } if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) { @@ -2017,33 +2853,175 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask { } r = _regions->next(); } + + if (_heap->mode()->is_generational() && !_heap->active_generation()->is_global()) { + // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered + // set processing if not in generational mode or if GLOBAL mode. + + // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set. + // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other + // threads during this phase, allowing all threads to work more effectively in parallel. + struct ShenandoahRegionChunk assignment; + RememberedScanner* scanner = _heap->card_scan(); + + while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) { + // Keep grabbing next work chunk to process until finished, or asked to yield + ShenandoahHeapRegion* r = assignment._r; + if (r->is_active() && !r->is_cset() && r->is_old()) { + HeapWord* start_of_range = r->bottom() + assignment._chunk_offset; + HeapWord* end_of_range = r->get_update_watermark(); + if (end_of_range > start_of_range + assignment._chunk_size) { + end_of_range = start_of_range + assignment._chunk_size; + } + + // Old region in a young cycle or mixed cycle. + if (is_mixed) { + // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating + // within old-gen HeapRegions. This remembered set can be constructed by old-gen concurrent marking + // and augmented by card marking. For example, old-gen concurrent marking can remember for each old-gen + // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific. + // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY + // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an + // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific + // old-gen heap regions. + + if (r->is_humongous()) { + if (start_of_range < end_of_range) { + // Need to examine both dirty and clean cards during mixed evac. + r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true); + } + } else { + // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced + // and filled. Use mark bits to find objects that need to be updated. + // + // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen + // regions which are in the collection set for a particular mixed evacuation. + if (start_of_range < end_of_range) { + HeapWord* p = nullptr; + size_t card_index = scanner->card_index_for_addr(start_of_range); + // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top() + ShenandoahObjectToOopBoundedClosure objs(&cl, start_of_range, r->top()); + + // Any object that begins in a previous range is part of a different scanning assignment. Any object that + // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does + // not hold pointers to from-space, or is beyond the range of my assigned work chunk.) + + // Find the first object that begins in my range, if there is one. + p = start_of_range; + oop obj = cast_to_oop(p); + HeapWord* tams = ctx->top_at_mark_start(r); + if (p >= tams) { + // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead, + // we need to use the remembered set crossing map to advance p to the first object that starts + // within the enclosing card. + + while (true) { + HeapWord* first_object = scanner->first_object_in_card(card_index); + if (first_object != nullptr) { + p = first_object; + break; + } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) { + card_index++; + } else { + // Force the loop that follows to immediately terminate. + p = end_of_range; + break; + } + } + obj = cast_to_oop(p); + // Note: p may be >= end_of_range + } else if (!ctx->is_marked(obj)) { + p = ctx->get_next_marked_addr(p, tams); + obj = cast_to_oop(p); + // If there are no more marked objects before tams, this returns tams. + // Note that tams is either >= end_of_range, or tams is the start of an object that is marked. + } + while (p < end_of_range) { + // p is known to point to the beginning of marked object obj + objs.do_object(obj); + HeapWord* prev_p = p; + p += obj->size(); + if (p < tams) { + p = ctx->get_next_marked_addr(p, tams); + // If there are no more marked objects before tams, this returns tams. Note that tams is + // either >= end_of_range, or tams is the start of an object that is marked. + } + assert(p != prev_p, "Lack of forward progress"); + obj = cast_to_oop(p); + } + } + } + } else { + // This is a young evac.. + if (start_of_range < end_of_range) { + size_t cluster_size = + CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; + size_t clusters = assignment._chunk_size / cluster_size; + assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries"); + scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id); + } + } + if (ShenandoahPacing && (start_of_range < end_of_range)) { + _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range)); + } + } + } + } } }; void ShenandoahHeap::update_heap_references(bool concurrent) { assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); + uint nworkers = workers()->active_workers(); + ShenandoahRegionChunkIterator work_list(nworkers); if (concurrent) { - ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator); + ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, &work_list); workers()->run_task(&task); } else { - ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator); + ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, &work_list); workers()->run_task(&task); } + if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy + card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS); + } } - class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { private: + ShenandoahMarkingContext* _ctx; ShenandoahHeapLock* const _lock; + bool _is_generational; public: - ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {} + ShenandoahFinalUpdateRefsUpdateRegionStateClosure( + ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()), + _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { } void heap_region_do(ShenandoahHeapRegion* r) { + + // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor + // regions. We consult region age during the subsequent evacuation to determine whether certain objects need to + // be promoted. + if (_is_generational && r->is_young() && r->is_active()) { + HeapWord *tams = _ctx->top_at_mark_start(r); + HeapWord *top = r->top(); + + // Allocations move the watermark when top moves. However compacting + // objects will sometimes lower top beneath the watermark, after which, + // attempts to read the watermark will assert out (watermark should not be + // higher than top). + if (top > tams) { + // There have been allocations in this region since the start of the cycle. + // Any objects new to this region must not assimilate elevated age. + r->reset_age(); + } else if (ShenandoahHeap::heap()->is_aging_cycle()) { + r->increment_age(); + } + } + // Drop unnecessary "pinned" state from regions that does not have CP marks // anymore, as this would allow trashing them. - if (r->is_active()) { if (r->is_pinned()) { if (r->pin_count() == 0) { @@ -2070,7 +3048,7 @@ void ShenandoahHeap::update_heap_region_states(bool concurrent) { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_refs_update_region_states : ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states); - ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl; + ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context()); parallel_heap_region_iterate(&cl); assert_pinned_region_status(); @@ -2085,12 +3063,90 @@ void ShenandoahHeap::update_heap_region_states(bool concurrent) { } void ShenandoahHeap::rebuild_free_set(bool concurrent) { - { - ShenandoahGCPhase phase(concurrent ? - ShenandoahPhaseTimings::final_update_refs_rebuild_freeset : - ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset); - ShenandoahHeapLocker locker(lock()); - _free_set->rebuild(); + ShenandoahGCPhase phase(concurrent ? + ShenandoahPhaseTimings::final_update_refs_rebuild_freeset : + ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + ShenandoahHeapLocker locker(lock()); + size_t young_cset_regions, old_cset_regions; + size_t first_old_region, last_old_region, old_region_count; + _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); + // If there are no old regions, first_old_region will be greater than last_old_region + assert((first_old_region > last_old_region) || + ((last_old_region + 1 - first_old_region >= old_region_count) && + get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()), + "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT, + old_region_count, first_old_region, last_old_region); + + if (mode()->is_generational()) { + assert(verify_generation_usage(true, old_generation()->used_regions(), + old_generation()->used(), old_generation()->get_humongous_waste(), + true, young_generation()->used_regions(), + young_generation()->used(), young_generation()->get_humongous_waste()), + "Generation accounts are inaccurate"); + + // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this + // available for transfer to old. Note that transfer of humongous regions does not impact available. + size_t allocation_runway = young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); + adjust_generation_sizes_for_next_cycle(allocation_runway, young_cset_regions, old_cset_regions); + + // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available + // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular + // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation + // will decrease as promote-by-copy consumes the available memory within these partially consumed regions. + // + // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides + // within partially consumed regions of memory. + } + // Rebuild free set based on adjusted generation sizes. + _free_set->rebuild(young_cset_regions, old_cset_regions); + + if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) { + size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0; + size_t allowed_old_gen_span = num_regions() - (ShenandoahGenerationalHumongousReserve * num_regions() / 100); + + // Tolerate lower density if total span is small. Here's the implementation: + // if old_gen spans more than 100% and density < 75%, trigger old-defrag + // else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag + // else if old_gen spans more than 75% and density < 50%, trigger old-defrag + // else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag + // else if old_gen spans more than 50% and density < 25%, trigger old-defrag + // + // A previous implementation was more aggressive in triggering, resulting in degraded throughput when + // humongous allocation was not required. + + ShenandoahGeneration* old_gen = old_generation(); + size_t old_available = old_gen->available(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t old_unaffiliated_available = old_gen->free_unaffiliated_regions() * region_size_bytes; + assert(old_available >= old_unaffiliated_available, "sanity"); + size_t old_fragmented_available = old_available - old_unaffiliated_available; + + size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available; + size_t old_bytes_spanned = old_region_span * region_size_bytes; + double old_density = ((double) old_bytes_consumed) / old_bytes_spanned; + + uint eighths = 8; + for (uint i = 0; i < 5; i++) { + size_t span_threshold = eighths * allowed_old_gen_span / 8; + double density_threshold = (eighths - 2) / 8.0; + if ((old_region_span >= span_threshold) && (old_density < density_threshold)) { + old_heuristics()->trigger_old_is_fragmented(old_density, first_old_region, last_old_region); + break; + } + eighths--; + } + + size_t old_used = old_generation()->used() + old_generation()->get_humongous_waste(); + size_t trigger_threshold = old_generation()->usage_trigger_threshold(); + // Detects unsigned arithmetic underflow + assert(old_used <= capacity(), + "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")", + old_generation()->used(), old_generation()->get_humongous_waste(), capacity()); + + if (old_used > trigger_threshold) { + old_heuristics()->trigger_old_has_grown(); + } } } @@ -2201,9 +3257,18 @@ bool ShenandoahHeap::should_inject_alloc_failure() { } void ShenandoahHeap::initialize_serviceability() { - _memory_pool = new ShenandoahMemoryPool(this); - _cycle_memory_manager.add_pool(_memory_pool); - _stw_memory_manager.add_pool(_memory_pool); + if (mode()->is_generational()) { + _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); + _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this); + _cycle_memory_manager.add_pool(_young_gen_memory_pool); + _cycle_memory_manager.add_pool(_old_gen_memory_pool); + _stw_memory_manager.add_pool(_young_gen_memory_pool); + _stw_memory_manager.add_pool(_old_gen_memory_pool); + } else { + _memory_pool = new ShenandoahMemoryPool(this); + _cycle_memory_manager.add_pool(_memory_pool); + _stw_memory_manager.add_pool(_memory_pool); + } } GrowableArray ShenandoahHeap::memory_managers() { @@ -2215,12 +3280,17 @@ GrowableArray ShenandoahHeap::memory_managers() { GrowableArray ShenandoahHeap::memory_pools() { GrowableArray memory_pools(1); - memory_pools.append(_memory_pool); + if (mode()->is_generational()) { + memory_pools.append(_young_gen_memory_pool); + memory_pools.append(_old_gen_memory_pool); + } else { + memory_pools.append(_memory_pool); + } return memory_pools; } MemoryUsage ShenandoahHeap::memory_usage() { - return _memory_pool->get_memory_usage(); + return MemoryUsage(_initial_size, used(), committed(), max_capacity()); } ShenandoahRegionIterator::ShenandoahRegionIterator() : @@ -2258,6 +3328,7 @@ void ShenandoahHeap::flush_liveness_cache(uint worker_id) { assert(worker_id < _max_workers, "sanity"); assert(_liveness_cache != nullptr, "sanity"); ShenandoahLiveData* ld = _liveness_cache[worker_id]; + for (uint i = 0; i < num_regions(); i++) { ShenandoahLiveData live = ld[i]; if (live > 0) { @@ -2285,3 +3356,106 @@ bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const { return false; } + +void ShenandoahHeap::transfer_old_pointers_from_satb() { + _old_generation->transfer_pointers_from_satb(); +} + +template<> +void ShenandoahGenerationRegionClosure::heap_region_do(ShenandoahHeapRegion* region) { + // Visit young regions + if (region->is_young()) { + _cl->heap_region_do(region); + } +} + +template<> +void ShenandoahGenerationRegionClosure::heap_region_do(ShenandoahHeapRegion* region) { + // Visit old regions + if (region->is_old()) { + _cl->heap_region_do(region); + } +} + +template<> +void ShenandoahGenerationRegionClosure::heap_region_do(ShenandoahHeapRegion* region) { + _cl->heap_region_do(region); +} + +template<> +void ShenandoahGenerationRegionClosure::heap_region_do(ShenandoahHeapRegion* region) { + _cl->heap_region_do(region); +} + +bool ShenandoahHeap::verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste, + bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste) { + size_t tally_old_regions = 0; + size_t tally_old_bytes = 0; + size_t tally_old_waste = 0; + size_t tally_young_regions = 0; + size_t tally_young_bytes = 0; + size_t tally_young_waste = 0; + + shenandoah_assert_heaplocked_or_safepoint(); + for (size_t i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* r = get_region(i); + if (r->is_old()) { + tally_old_regions++; + tally_old_bytes += r->used(); + if (r->is_humongous()) { + ShenandoahHeapRegion* start = r->humongous_start_region(); + HeapWord* obj_addr = start->bottom(); + oop obj = cast_to_oop(obj_addr); + size_t word_size = obj->size(); + HeapWord* end_addr = obj_addr + word_size; + if (end_addr <= r->end()) { + tally_old_waste += (r->end() - end_addr) * HeapWordSize; + } + } + } else if (r->is_young()) { + tally_young_regions++; + tally_young_bytes += r->used(); + if (r->is_humongous()) { + ShenandoahHeapRegion* start = r->humongous_start_region(); + HeapWord* obj_addr = start->bottom(); + oop obj = cast_to_oop(obj_addr); + size_t word_size = obj->size(); + HeapWord* end_addr = obj_addr + word_size; + if (end_addr <= r->end()) { + tally_young_waste += (r->end() - end_addr) * HeapWordSize; + } + } + } + } + if (verify_young && + ((young_regions != tally_young_regions) || (young_bytes != tally_young_bytes) || (young_waste != tally_young_waste))) { + return false; + } else if (verify_old && + ((old_regions != tally_old_regions) || (old_bytes != tally_old_bytes) || (old_waste != tally_old_waste))) { + return false; + } else { + return true; + } +} + +ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const { + if (!mode()->is_generational()) { + return global_generation(); + } else if (affiliation == YOUNG_GENERATION) { + return young_generation(); + } else if (affiliation == OLD_GENERATION) { + return old_generation(); + } + + ShouldNotReachHere(); + return nullptr; +} + +void ShenandoahHeap::log_heap_status(const char* msg) const { + if (mode()->is_generational()) { + young_generation()->log_status(msg); + old_generation()->log_status(msg); + } else { + global_generation()->log_status(msg); + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 642faef807e..051f728882a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,14 +27,22 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP +#include "gc/shared/ageTable.hpp" #include "gc/shared/markBitMap.hpp" #include "gc/shared/softRefPolicy.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shenandoah/shenandoahAgeCensus.hpp" +#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahAllocRequest.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahLock.hpp" #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" +#include "gc/shenandoah/shenandoahEvacTracker.hpp" +#include "gc/shenandoah/shenandoahGenerationType.hpp" +#include "gc/shenandoah/shenandoahMmuTracker.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "gc/shenandoah/shenandoahUnload.hpp" #include "memory/metaspace.hpp" @@ -43,13 +52,19 @@ class ConcurrentGCTimer; class ObjectIterateScanRootClosure; +class PLAB; class ShenandoahCollectorPolicy; class ShenandoahControlThread; +class ShenandoahRegulatorThread; class ShenandoahGCSession; class ShenandoahGCStateResetter; +class ShenandoahGeneration; +class ShenandoahYoungGeneration; +class ShenandoahOldGeneration; class ShenandoahHeuristics; +class ShenandoahOldHeuristics; +class ShenandoahYoungHeuristics; class ShenandoahMarkingContext; -class ShenandoahMode; class ShenandoahPhaseTimings; class ShenandoahHeap; class ShenandoahHeapRegion; @@ -59,6 +74,7 @@ class ShenandoahFreeSet; class ShenandoahConcurrentMark; class ShenandoahFullGC; class ShenandoahMonitoringSupport; +class ShenandoahMode; class ShenandoahPacer; class ShenandoahReferenceProcessor; class ShenandoahVerifier; @@ -108,6 +124,16 @@ class ShenandoahHeapRegionClosure : public StackObj { virtual bool is_thread_safe() { return false; } }; +template +class ShenandoahGenerationRegionClosure : public ShenandoahHeapRegionClosure { + public: + explicit ShenandoahGenerationRegionClosure(ShenandoahHeapRegionClosure* cl) : _cl(cl) {} + void heap_region_do(ShenandoahHeapRegion* r); + virtual bool is_thread_safe() { return _cl->is_thread_safe(); } + private: + ShenandoahHeapRegionClosure* _cl; +}; + typedef ShenandoahLock ShenandoahHeapLock; typedef ShenandoahLocker ShenandoahHeapLocker; typedef Stack ShenandoahScanObjectStack; @@ -125,6 +151,7 @@ class ShenandoahHeap : public CollectedHeap { friend class ShenandoahSafepoint; // Supported GC friend class ShenandoahConcurrentGC; + friend class ShenandoahOldGC; friend class ShenandoahDegenGC; friend class ShenandoahFullGC; friend class ShenandoahUnload; @@ -133,12 +160,30 @@ class ShenandoahHeap : public CollectedHeap { // private: ShenandoahHeapLock _lock; + ShenandoahGeneration* _gc_generation; public: ShenandoahHeapLock* lock() { return &_lock; } + ShenandoahGeneration* active_generation() const { + // last or latest generation might be a better name here. + return _gc_generation; + } + + void set_gc_generation(ShenandoahGeneration* generation) { + _gc_generation = generation; + } + + ShenandoahHeuristics* heuristics(); + ShenandoahOldHeuristics* old_heuristics(); + ShenandoahYoungHeuristics* young_heuristics(); + + bool doing_mixed_evacuations(); + bool is_old_bitmap_stable() const; + bool is_gc_generation_young() const; + // ---------- Initialization, termination, identification, printing routines // public: @@ -150,9 +195,8 @@ class ShenandoahHeap : public CollectedHeap { ShenandoahHeap(ShenandoahCollectorPolicy* policy); jint initialize() override; void post_initialize() override; - void initialize_mode(); - void initialize_heuristics(); - + void initialize_heuristics_generations(); + virtual void print_init_logger() const; void initialize_serviceability() override; void print_on(outputStream* st) const override; @@ -165,6 +209,9 @@ class ShenandoahHeap : public CollectedHeap { void prepare_for_verify() override; void verify(VerifyOption vo) override; + bool verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste, + bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste); + // WhiteBox testing support. bool supports_concurrent_gc_breakpoints() const override { return true; @@ -173,25 +220,29 @@ class ShenandoahHeap : public CollectedHeap { // ---------- Heap counters and metrics // private: - size_t _initial_size; - size_t _minimum_size; + size_t _initial_size; + size_t _minimum_size; + size_t _promotion_potential; + size_t _pad_for_promote_in_place; // bytes of filler + size_t _promotable_humongous_regions; + size_t _regular_regions_promoted_in_place; + volatile size_t _soft_max_size; shenandoah_padding(0); - volatile size_t _used; volatile size_t _committed; - volatile size_t _bytes_allocated_since_gc_start; shenandoah_padding(1); + void increase_used(const ShenandoahAllocRequest& req); + public: - void increase_used(size_t bytes); - void decrease_used(size_t bytes); - void set_used(size_t bytes); + void increase_used(ShenandoahGeneration* generation, size_t bytes); + void decrease_used(ShenandoahGeneration* generation, size_t bytes); + void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes); + void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes); void increase_committed(size_t bytes); void decrease_committed(size_t bytes); - void increase_allocated(size_t bytes); - size_t bytes_allocated_since_gc_start(); void reset_bytes_allocated_since_gc_start(); size_t min_capacity() const; @@ -227,6 +278,7 @@ class ShenandoahHeap : public CollectedHeap { bool _heap_region_special; size_t _num_regions; ShenandoahHeapRegion** _regions; + uint8_t* _affiliations; // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode ShenandoahRegionIterator _update_refs_iterator; public: @@ -236,14 +288,16 @@ class ShenandoahHeap : public CollectedHeap { inline size_t num_regions() const { return _num_regions; } inline bool is_heap_region_special() { return _heap_region_special; } - inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; + inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const; inline size_t heap_region_index_containing(const void* addr) const; - inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; + inline ShenandoahHeapRegion* get_region(size_t region_idx) const; void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; + inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; }; + // ---------- GC state machinery // // GC state describes the important parts of collector state, that may be @@ -259,6 +313,7 @@ class ShenandoahHeap : public CollectedHeap { HAS_FORWARDED_BITPOS = 0, // Heap is under marking: needs SATB barriers. + // For generational mode, it means either young or old marking, or both. MARKING_BITPOS = 1, // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED) @@ -269,6 +324,12 @@ class ShenandoahHeap : public CollectedHeap { // Heap is under weak-reference/roots processing: needs weak-LRB barriers. WEAK_ROOTS_BITPOS = 4, + + // Young regions are under marking, need SATB barriers. + YOUNG_MARKING_BITPOS = 5, + + // Old regions are under marking, need SATB barriers. + OLD_MARKING_BITPOS = 6 }; enum GCState { @@ -278,6 +339,8 @@ class ShenandoahHeap : public CollectedHeap { EVACUATION = 1 << EVACUATION_BITPOS, UPDATEREFS = 1 << UPDATEREFS_BITPOS, WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS, + YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS, + OLD_MARKING = 1 << OLD_MARKING_BITPOS }; private: @@ -288,13 +351,46 @@ class ShenandoahHeap : public CollectedHeap { ShenandoahSharedFlag _progress_last_gc; ShenandoahSharedFlag _concurrent_strong_root_in_progress; + // TODO: Revisit the following comment. It may not accurately represent the true behavior when evacuations fail due to + // difficulty finding memory to hold evacuated objects. + // + // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally + // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need. In the case that there is an excessive amount + // of waste, it may be that one thread fails to grab a new GCLAB, this does not necessarily doom the associated evacuation + // effort. If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object. + // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed. + + size_t _promoted_reserve; // Bytes reserved within old-gen to hold the results of promotion + volatile size_t _promoted_expended; // Bytes of old-gen memory expended on promotions + + size_t _old_evac_reserve; // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set + size_t _young_evac_reserve; // Bytes reserved within young-gen to hold evacuated objects from young-gen collection set + + ShenandoahAgeCensus* _age_census; // Age census used for adapting tenuring threshold in generational mode + + // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to + // hold the results of evacuating to young-gen and to old-gen. These quantitites, stored in _promoted_reserve, + // _old_evac_reserve, and _young_evac_reserve, are consulted prior to rebuilding the free set (ShenandoahFreeSet) + // in preparation for evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the + // collector and old_collector sets to hold if _has_evacuation_reserve_quantities is true. The other time we + // rebuild the freeset is at the end of GC, as we prepare to idle GC until the next trigger. In this case, + // _has_evacuation_reserve_quantities is false because we don't yet know how much memory will need to be evacuated + // in the next GC cycle. When _has_evacuation_reserve_quantities is false, the free set rebuild operation reserves + // for the collector and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, + // ShenandoahOldEvacReserve, and ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve + // for old_collector set when not _has_evacuation_reserve_quantities is based in part on anticipated promotion as + // determined by analysis of live data found during the previous GC pass which is one less than the current tenure age. + bool _has_evacuation_reserve_quantities; + void set_gc_state_all_threads(char state); void set_gc_state_mask(uint mask, bool value); public: char gc_state() const; - void set_concurrent_mark_in_progress(bool in_progress); + void set_evacuation_reserve_quantities(bool is_valid); + void set_concurrent_young_mark_in_progress(bool in_progress); + void set_concurrent_old_mark_in_progress(bool in_progress); void set_evacuation_in_progress(bool in_progress); void set_update_refs_in_progress(bool in_progress); void set_degenerated_gc_in_progress(bool in_progress); @@ -304,9 +400,14 @@ class ShenandoahHeap : public CollectedHeap { void set_concurrent_strong_root_in_progress(bool cond); void set_concurrent_weak_root_in_progress(bool cond); + void set_aging_cycle(bool cond); + inline bool is_stable() const; inline bool is_idle() const; + inline bool has_evacuation_reserve_quantities() const; inline bool is_concurrent_mark_in_progress() const; + inline bool is_concurrent_young_mark_in_progress() const; + inline bool is_concurrent_old_mark_in_progress() const; inline bool is_update_refs_in_progress() const; inline bool is_evacuation_in_progress() const; inline bool is_degenerated_gc_in_progress() const; @@ -317,8 +418,47 @@ class ShenandoahHeap : public CollectedHeap { inline bool is_stw_gc_in_progress() const; inline bool is_concurrent_strong_root_in_progress() const; inline bool is_concurrent_weak_root_in_progress() const; + bool is_prepare_for_old_mark_in_progress() const; + inline bool is_aging_cycle() const; + + inline void clear_promotion_potential() { _promotion_potential = 0; }; + inline void set_promotion_potential(size_t val) { _promotion_potential = val; }; + inline size_t get_promotion_potential() { return _promotion_potential; }; + + inline void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; } + inline size_t get_pad_for_promote_in_place() { return _pad_for_promote_in_place; } + + inline void reserve_promotable_humongous_regions(size_t region_count) { _promotable_humongous_regions = region_count; } + inline void reserve_promotable_regular_regions(size_t region_count) { _regular_regions_promoted_in_place = region_count; } + + inline size_t get_promotable_humongous_regions() { return _promotable_humongous_regions; } + inline size_t get_regular_regions_promoted_in_place() { return _regular_regions_promoted_in_place; } + + // Returns previous value + inline size_t set_promoted_reserve(size_t new_val); + inline size_t get_promoted_reserve() const; + inline void augment_promo_reserve(size_t increment); + + inline void reset_promoted_expended(); + inline size_t expend_promoted(size_t increment); + inline size_t unexpend_promoted(size_t decrement); + inline size_t get_promoted_expended(); + + // Returns previous value + inline size_t set_old_evac_reserve(size_t new_val); + inline size_t get_old_evac_reserve() const; + inline void augment_old_evac_reserve(size_t increment); + + // Returns previous value + inline size_t set_young_evac_reserve(size_t new_val); + inline size_t get_young_evac_reserve() const; + + // Return the age census object for young gen (in generational mode) + inline ShenandoahAgeCensus* age_census() const; private: + void manage_satb_barrier(bool active); + enum CancelState { // Normal state. GC has not been cancelled and is open for cancellation. // Worker threads can suspend for safepoint. @@ -329,16 +469,22 @@ class ShenandoahHeap : public CollectedHeap { CANCELLED }; + double _cancel_requested_time; ShenandoahSharedEnumFlag _cancelled_gc; + + // Returns true if cancel request was successfully communicated. + // Returns false if some other thread already communicated cancel + // request. A true return value does not mean GC has been + // cancelled, only that the process of cancelling GC has begun. bool try_cancel_gc(); public: - inline bool cancelled_gc() const; inline bool check_cancelled_gc_and_yield(bool sts_active = true); - inline void clear_cancelled_gc(); + inline void clear_cancelled_gc(bool clear_oom_handler = true); + void cancel_concurrent_mark(); void cancel_gc(GCCause::Cause cause); public: @@ -348,9 +494,6 @@ class ShenandoahHeap : public CollectedHeap { private: // GC support - // Reset bitmap, prepare regions for new GC cycle - void prepare_gc(); - void prepare_regions_and_collection_set(bool concurrent); // Evacuation void evacuate_collection_set(bool concurrent); // Concurrent root processing @@ -363,37 +506,57 @@ class ShenandoahHeap : public CollectedHeap { void update_heap_references(bool concurrent); // Final update region states void update_heap_region_states(bool concurrent); - void rebuild_free_set(bool concurrent); void rendezvous_threads(); void recycle_trash(); public: + void rebuild_free_set(bool concurrent); void notify_gc_progress() { _progress_last_gc.set(); } void notify_gc_no_progress() { _progress_last_gc.unset(); } // // Mark support private: + ShenandoahYoungGeneration* _young_generation; + ShenandoahGeneration* _global_generation; + ShenandoahOldGeneration* _old_generation; + ShenandoahControlThread* _control_thread; + ShenandoahRegulatorThread* _regulator_thread; ShenandoahCollectorPolicy* _shenandoah_policy; ShenandoahMode* _gc_mode; - ShenandoahHeuristics* _heuristics; ShenandoahFreeSet* _free_set; ShenandoahPacer* _pacer; ShenandoahVerifier* _verifier; - ShenandoahPhaseTimings* _phase_timings; + ShenandoahPhaseTimings* _phase_timings; + ShenandoahEvacuationTracker* _evac_tracker; + ShenandoahMmuTracker _mmu_tracker; + ShenandoahGenerationSizer _generation_sizer; - ShenandoahControlThread* control_thread() { return _control_thread; } + ShenandoahRegulatorThread* regulator_thread() { return _regulator_thread; } public: + ShenandoahControlThread* control_thread() { return _control_thread; } + ShenandoahYoungGeneration* young_generation() const { return _young_generation; } + ShenandoahGeneration* global_generation() const { return _global_generation; } + ShenandoahOldGeneration* old_generation() const { return _old_generation; } + ShenandoahGeneration* generation_for(ShenandoahAffiliation affiliation) const; + const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; } + + size_t max_size_for(ShenandoahGeneration* generation) const; + size_t min_size_for(ShenandoahGeneration* generation) const; + ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; } ShenandoahMode* mode() const { return _gc_mode; } - ShenandoahHeuristics* heuristics() const { return _heuristics; } ShenandoahFreeSet* free_set() const { return _free_set; } ShenandoahPacer* pacer() const { return _pacer; } - ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } + ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } + ShenandoahEvacuationTracker* evac_tracker() const { return _evac_tracker; } + + void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation); + void on_cycle_end(ShenandoahGeneration* generation); ShenandoahVerifier* verifier(); @@ -402,6 +565,9 @@ class ShenandoahHeap : public CollectedHeap { private: ShenandoahMonitoringSupport* _monitoring_support; MemoryPool* _memory_pool; + MemoryPool* _young_gen_memory_pool; + MemoryPool* _old_gen_memory_pool; + GCMemoryManager _stw_memory_manager; GCMemoryManager _cycle_memory_manager; ConcurrentGCTimer* _gc_timer; @@ -410,7 +576,7 @@ class ShenandoahHeap : public CollectedHeap { // For exporting to SA int _log_min_obj_alignment_in_bytes; public: - ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; } + ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; } GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; } GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; } SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; } @@ -421,17 +587,10 @@ class ShenandoahHeap : public CollectedHeap { GCTracer* tracer(); ConcurrentGCTimer* gc_timer() const; -// ---------- Reference processing -// -private: - ShenandoahReferenceProcessor* const _ref_processor; - -public: - ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; } - // ---------- Class Unloading // private: + ShenandoahSharedFlag _is_aging_cycle; ShenandoahSharedFlag _unload_classes; ShenandoahUnload _unloader; @@ -447,6 +606,9 @@ class ShenandoahHeap : public CollectedHeap { void stw_process_weak_roots(bool full_gc); void stw_weak_refs(bool full_gc); + inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation, + ShenandoahAffiliation new_affiliation); + // Heap iteration support void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops); bool prepare_aux_bitmap_for_iteration(); @@ -460,7 +622,17 @@ class ShenandoahHeap : public CollectedHeap { public: bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false); - bool is_in(const void* p) const override; + inline bool is_in(const void* p) const override; + + inline bool is_in_active_generation(oop obj) const; + inline bool is_in_young(const void* p) const; + inline bool is_in_old(const void* p) const; + inline bool is_old(oop pobj) const; + + inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r); + inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation); + + inline ShenandoahAffiliation region_affiliation(size_t index); bool requires_barriers(stackChunkOop obj) const override; @@ -514,19 +686,28 @@ class ShenandoahHeap : public CollectedHeap { // ---------- Allocation support // private: - HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region); + // How many bytes to transfer between old and young after we have finished recycling collection set regions? + size_t _old_regions_surplus; + size_t _old_regions_deficit; + + HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion); + inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); + inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion); + HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion); + HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size); + public: - HeapWord* allocate_memory(ShenandoahAllocRequest& request); + HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion); HeapWord* mem_allocate(size_t size, bool* what) override; MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, size_t size, Metaspace::MetadataType mdtype) override; - void notify_mutator_alloc_words(size_t words, bool waste); + void notify_mutator_alloc_words(size_t words, size_t waste); HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override; size_t tlab_capacity(Thread *thr) const override; @@ -540,6 +721,12 @@ class ShenandoahHeap : public CollectedHeap { void tlabs_retire(bool resize); void gclabs_retire(bool resize); + inline void set_old_region_surplus(size_t surplus) { _old_regions_surplus = surplus; }; + inline void set_old_region_deficit(size_t deficit) { _old_regions_deficit = deficit; }; + + inline size_t get_old_region_surplus() { return _old_regions_surplus; }; + inline size_t get_old_region_deficit() { return _old_regions_deficit; }; + // ---------- Marking support // private: @@ -564,8 +751,6 @@ class ShenandoahHeap : public CollectedHeap { public: inline ShenandoahMarkingContext* complete_marking_context() const; inline ShenandoahMarkingContext* marking_context() const; - inline void mark_complete_marking_context(); - inline void mark_incomplete_marking_context(); template inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); @@ -576,8 +761,6 @@ class ShenandoahHeap : public CollectedHeap { template inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); - void reset_mark_bitmap(); - // SATB barriers hooks inline bool requires_marking(const void* entry) const; @@ -597,8 +780,15 @@ class ShenandoahHeap : public CollectedHeap { private: ShenandoahCollectionSet* _collection_set; ShenandoahEvacOOMHandler _oom_evac_handler; + ShenandoahSharedFlag _old_gen_oom_evac; + + inline oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen); + void handle_old_evacuation(HeapWord* obj, size_t words, bool promotion); + void handle_old_evacuation_failure(); public: + void report_promotion_failure(Thread* thread, size_t size); + static address in_cset_fast_test_addr(); ShenandoahCollectionSet* collection_set() const { return _collection_set; } @@ -609,7 +799,7 @@ class ShenandoahHeap : public CollectedHeap { // Checks if location is in the collection set. Can be interior pointer, not the oop itself. inline bool in_collection_set_loc(void* loc) const; - // Evacuates object src. Returns the evacuated object, either evacuated + // Evacuates or promotes object src. Returns the evacuated object, either evacuated // by this thread, or by some other thread. inline oop evacuate_object(oop src, Thread* thread); @@ -617,6 +807,23 @@ class ShenandoahHeap : public CollectedHeap { inline void enter_evacuation(Thread* t); inline void leave_evacuation(Thread* t); + inline bool clear_old_evacuation_failure(); + +// ---------- Generational support +// +private: + RememberedScanner* _card_scan; + +public: + inline RememberedScanner* card_scan() { return _card_scan; } + void clear_cards_for(ShenandoahHeapRegion* region); + void mark_card_as_dirty(void* location); + void retire_plab(PLAB* plab); + void retire_plab(PLAB* plab, Thread* thread); + void cancel_old_gc(); + + void adjust_generation_sizes_for_next_cycle(size_t old_xfer_limit, size_t young_cset_regions, size_t old_cset_regions); + // ---------- Helper functions // public: @@ -638,7 +845,18 @@ class ShenandoahHeap : public CollectedHeap { static inline void atomic_clear_oop(narrowOop* addr, oop compare); static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare); - void trash_humongous_region_at(ShenandoahHeapRegion *r); + size_t trash_humongous_region_at(ShenandoahHeapRegion *r); + + static inline void increase_object_age(oop obj, uint additional_age); + + // Return the object's age, or a sentinel value when the age can't + // necessarily be determined because of concurrent locking by the + // mutator + static inline uint get_object_age(oop obj); + + void transfer_old_pointers_from_satb(); + + void log_heap_status(const char *msg) const; private: void trash_cset_regions(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 226190822a1..14d20a60064 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,12 +43,16 @@ #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahControlThread.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" #include "runtime/prefetch.inline.hpp" +#include "runtime/objectMonitor.inline.hpp" #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" @@ -80,7 +85,7 @@ inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) con return index; } -inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { +inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { size_t index = heap_region_index_containing(addr); ShenandoahHeapRegion* const result = get_region(index); assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); @@ -252,9 +257,17 @@ inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { return cancelled_gc(); } -inline void ShenandoahHeap::clear_cancelled_gc() { +inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) { _cancelled_gc.set(CANCELLABLE); - _oom_evac_handler.clear(); + if (_cancel_requested_time > 0) { + double cancel_time = os::elapsedTime() - _cancel_requested_time; + log_info(gc)("GC cancellation took %.3fs", cancel_time); + _cancel_requested_time = 0; + } + + if (clear_oom_handler) { + _oom_evac_handler.clear(); + } } inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { @@ -271,12 +284,50 @@ inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size if (obj != nullptr) { return obj; } - // Otherwise... return allocate_from_gclab_slow(thread, size); } +inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) { + assert(UseTLAB, "TLABs should be enabled"); + + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + HeapWord* obj; + + if (plab == nullptr) { + assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name()); + // No PLABs in this thread, fallback to shared allocation + return nullptr; + } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { + return nullptr; + } + // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy + obj = plab->allocate(size); + if ((obj == nullptr) && (plab->words_remaining() < PLAB::min_size())) { + // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations + obj = allocate_from_plab_slow(thread, size, is_promotion); + } + // if plab->words_remaining() >= PLAB::min_size(), just return nullptr so we can use a shared allocation + if (obj == nullptr) { + return nullptr; + } + + if (is_promotion) { + ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize); + } else { + ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize); + } + return obj; +} + +inline ShenandoahAgeCensus* ShenandoahHeap::age_census() const { + assert(mode()->is_generational(), "Only in generational mode"); + assert(_age_census != nullptr, "Error: not initialized"); + return _age_census; +} + inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { - if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { + assert(thread == Thread::current(), "Expected thread parameter to be current thread."); + if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) { // This thread went through the OOM during evac protocol and it is safe to return // the forward pointer. It must not attempt to evacuate any more. return ShenandoahBarrierSet::resolve_forwarded(p); @@ -284,12 +335,40 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); - size_t size = p->size(); + ShenandoahHeapRegion* r = heap_region_containing(p); + assert(!r->is_humongous(), "never evacuate humongous objects"); - assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); + ShenandoahAffiliation target_gen = r->affiliation(); + if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() && + target_gen == YOUNG_GENERATION) { + markWord mark = p->mark(); + if (mark.is_marked()) { + // Already forwarded. + return ShenandoahBarrierSet::resolve_forwarded(p); + } + if (mark.has_displaced_mark_helper()) { + // We don't want to deal with MT here just to ensure we read the right mark word. + // Skip the potential promotion attempt for this one. + } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) { + oop result = try_evacuate_object(p, thread, r, OLD_GENERATION); + if (result != nullptr) { + return result; + } + // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen. + } + } + return try_evacuate_object(p, thread, r, target_gen); +} - bool alloc_from_gclab = true; +// try_evacuate_object registers the object and dirties the associated remembered set information when evacuating +// to OLD_GENERATION. +inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, + ShenandoahAffiliation target_gen) { + bool alloc_from_lab = true; + bool has_plab = false; HeapWord* copy = nullptr; + size_t size = p->size(); + bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young(); #ifdef ASSERT if (ShenandoahOOMDuringEvacALot && @@ -298,18 +377,83 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { } else { #endif if (UseTLAB) { - copy = allocate_from_gclab(thread, size); + switch (target_gen) { + case YOUNG_GENERATION: { + copy = allocate_from_gclab(thread, size); + if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) { + // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting + // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations. + ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size()); + copy = allocate_from_gclab(thread, size); + // If we still get nullptr, we'll try a shared allocation below. + } + break; + } + case OLD_GENERATION: { + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + if (plab != nullptr) { + has_plab = true; + } + copy = allocate_from_plab(thread, size, is_promotion); + if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) && + ShenandoahThreadLocalData::plab_retries_enabled(thread)) { + // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because + // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, + // where abundance is defined as >= PLAB::min_size(). In the former case, we try resetting the desired + // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations. + + // In this situation, PLAB memory is precious. We'll try to preserve our existing PLAB by forcing + // this particular allocation to be shared. + if (plab->words_remaining() < PLAB::min_size()) { + ShenandoahThreadLocalData::set_plab_size(thread, PLAB::min_size()); + copy = allocate_from_plab(thread, size, is_promotion); + // If we still get nullptr, we'll try a shared allocation below. + if (copy == nullptr) { + // If retry fails, don't continue to retry until we have success (probably in next GC pass) + ShenandoahThreadLocalData::disable_plab_retries(thread); + } + } + // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs. + } + break; + } + default: { + ShouldNotReachHere(); + break; + } + } } + if (copy == nullptr) { - ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); - copy = allocate_memory(req); - alloc_from_gclab = false; + // If we failed to allocate in LAB, we'll try a shared allocation. + if (!is_promotion || !has_plab || (size > PLAB::min_size())) { + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen); + copy = allocate_memory(req, is_promotion); + alloc_from_lab = false; + } + // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate. + // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too + // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future + // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size()) } #ifdef ASSERT } #endif if (copy == nullptr) { + if (target_gen == OLD_GENERATION) { + assert(mode()->is_generational(), "Should only be here in generational mode."); + if (from_region->is_young()) { + // Signal that promotion failed. Will evacuate this old object somewhere in young gen. + report_promotion_failure(thread, size); + return nullptr; + } else { + // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this + // after the evacuation threads have finished. + handle_old_evacuation_failure(); + } + } + control_thread()->handle_alloc_failure_evac(size); _oom_evac_handler.handle_out_of_memory_during_evacuation(); @@ -318,15 +462,36 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { } // Copy the object: + _evac_tracker->begin_evacuation(thread, size * HeapWordSize); Copy::aligned_disjoint_words(cast_from_oop(p), copy, size); - // Try to install the new forwarding pointer. oop copy_val = cast_to_oop(copy); + + if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) { + ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1); + } + + // Try to install the new forwarding pointer. ContinuationGCSupport::relativize_stack_chunk(copy_val); oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); if (result == copy_val) { // Successfully evacuated. Our copy is now the public one! + _evac_tracker->end_evacuation(thread, size * HeapWordSize); + if (mode()->is_generational()) { + if (target_gen == OLD_GENERATION) { + handle_old_evacuation(copy, size, from_region->is_young()); + } else { + // When copying to the old generation above, we don't care + // about recording object age in the census stats. + assert(target_gen == YOUNG_GENERATION, "Error"); + // We record this census only when simulating pre-adaptive tenuring behavior, or + // when we have been asked to record the census at evacuation rather than at mark + if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) { + _evac_tracker->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val)); + } + } + } shenandoah_assert_correct(nullptr, copy_val); return copy_val; } else { @@ -335,23 +500,183 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { // But if it happens to contain references to evacuated regions, those references would // not get updated for this stale copy during this cycle, and we will crash while scanning // it the next cycle. - // - // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next - // object will overwrite this stale copy, or the filler object on LAB retirement will - // do this. For non-GCLAB allocations, we have no way to retract the allocation, and - // have to explicitly overwrite the copy with the filler object. With that overwrite, - // we have to keep the fwdptr initialized and pointing to our (stale) copy. - if (alloc_from_gclab) { - ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); + if (alloc_from_lab) { + // For LAB allocations, it is enough to rollback the allocation ptr. Either the next + // object will overwrite this stale copy, or the filler object on LAB retirement will + // do this. + switch (target_gen) { + case YOUNG_GENERATION: { + ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); + break; + } + case OLD_GENERATION: { + ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size); + if (is_promotion) { + ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize); + } else { + ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize); + } + break; + } + default: { + ShouldNotReachHere(); + break; + } + } } else { + // For non-LAB allocations, we have no way to retract the allocation, and + // have to explicitly overwrite the copy with the filler object. With that overwrite, + // we have to keep the fwdptr initialized and pointing to our (stale) copy. + assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size"); fill_with_object(copy, size); shenandoah_assert_correct(nullptr, copy_val); + // For non-LAB allocations, the object has already been registered } shenandoah_assert_correct(nullptr, result); return result; } } +void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) { + // This operates on new copy of an object. This means that the object's mark-word + // is thread-local and therefore safe to access. However, when the mark is + // displaced (i.e. stack-locked or monitor-locked), then it must be considered + // a shared memory location. It can be accessed by other threads. + // In particular, a competing evacuating thread can succeed to install its copy + // as the forwardee and continue to unlock the object, at which point 'our' + // write to the foreign stack-location would potentially over-write random + // information on that stack. Writing to a monitor is less problematic, + // but still not safe: while the ObjectMonitor would not randomly disappear, + // the other thread would also write to the same displaced header location, + // possibly leading to increase the age twice. + // For all these reasons, we take the conservative approach and not attempt + // to increase the age when the header is displaced. + markWord w = obj->mark(); + // The mark-word has been copied from the original object. It can not be + // inflating, because inflation can not be interrupted by a safepoint, + // and after a safepoint, a Java thread would first have to successfully + // evacuate the object before it could inflate the monitor. + assert(!w.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT, "must not inflate monitor before evacuation of object succeeds"); + // It is possible that we have copied the object after another thread has + // already successfully completed evacuation. While harmless (we would never + // publish our copy), don't even attempt to modify the age when that + // happens. + if (!w.has_displaced_mark_helper() && !w.is_marked()) { + w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age)); + obj->set_mark(w); + } +} + +// Return the object's age, or a sentinel value when the age can't +// necessarily be determined because of concurrent locking by the +// mutator +uint ShenandoahHeap::get_object_age(oop obj) { + // This is impossible to do unless we "freeze" ABA-type oscillations + // With Lilliput, we can do this more easily. + markWord w = obj->mark(); + assert(!w.is_marked(), "must not be forwarded"); + if (w.has_monitor()) { + w = w.monitor()->header(); + } else if (w.is_being_inflated() || w.has_displaced_mark_helper()) { + // Informs caller that we aren't able to determine the age + return markWord::max_age + 1; // sentinel + } + assert(w.age() <= markWord::max_age, "Impossible!"); + return w.age(); +} + +inline bool ShenandoahHeap::clear_old_evacuation_failure() { + return _old_gen_oom_evac.try_unset(); +} + +bool ShenandoahHeap::is_in(const void* p) const { + HeapWord* heap_base = (HeapWord*) base(); + HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); + return p >= heap_base && p < last_region_end; +} + +inline bool ShenandoahHeap::is_in_active_generation(oop obj) const { + if (!mode()->is_generational()) { + // everything is the same single generation + return true; + } + + if (active_generation() == nullptr) { + // no collection is happening, only expect this to be called + // when concurrent processing is active, but that could change + return false; + } + + assert(is_in(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj)); + assert((active_generation() == (ShenandoahGeneration*) old_generation()) || + (active_generation() == (ShenandoahGeneration*) young_generation()) || + (active_generation() == global_generation()), "Active generation must be old, young, or global"); + + size_t index = heap_region_containing(obj)->index(); + switch (_affiliations[index]) { + case ShenandoahAffiliation::FREE: + // Free regions are in Old, Young, Global + return true; + case ShenandoahAffiliation::YOUNG_GENERATION: + // Young regions are in young_generation and global_generation, not in old_generation + return (active_generation() != (ShenandoahGeneration*) old_generation()); + case ShenandoahAffiliation::OLD_GENERATION: + // Old regions are in old_generation and global_generation, not in young_generation + return (active_generation() != (ShenandoahGeneration*) young_generation()); + default: + assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, _affiliations[index], index); + return false; + } +} + +inline bool ShenandoahHeap::is_in_young(const void* p) const { + return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::YOUNG_GENERATION); +} + +inline bool ShenandoahHeap::is_in_old(const void* p) const { + return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::OLD_GENERATION); +} + +inline bool ShenandoahHeap::is_old(oop obj) const { + return is_gc_generation_young() && is_in_old(obj); +} + +inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) { + return (ShenandoahAffiliation) _affiliations[r->index()]; +} + +inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation, + ShenandoahAffiliation new_affiliation) { + // A lock is required when changing from FREE to NON-FREE. Though it may be possible to elide the lock when + // transitioning from in-use to FREE, the current implementation uses a lock for this transition. A lock is + // not required to change from YOUNG to OLD (i.e. when promoting humongous region). + // + // new_affiliation is: FREE YOUNG OLD + // orig_affiliation is: FREE X L L + // YOUNG L X + // OLD L X X + // X means state transition won't happen (so don't care) + // L means lock should be held + // Blank means no lock required because affiliation visibility will not be required until subsequent safepoint + // + // Note: during full GC, all transitions between states are possible. During Full GC, we should be in a safepoint. + + if ((orig_affiliation == ShenandoahAffiliation::FREE) || (new_affiliation == ShenandoahAffiliation::FREE)) { + shenandoah_assert_heaplocked_or_fullgc_safepoint(); + } +} + +inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation) { +#ifdef ASSERT + assert_lock_for_affiliation(region_affiliation(r), new_affiliation); +#endif + _affiliations[r->index()] = (uint8_t) new_affiliation; +} + +inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) { + return (ShenandoahAffiliation) _affiliations[index]; +} + inline bool ShenandoahHeap::requires_marking(const void* entry) const { oop obj = cast_to_oop(entry); return !_marking_context->is_marked_strong(obj); @@ -367,10 +692,15 @@ inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { return collection_set()->is_in_loc(p); } + inline bool ShenandoahHeap::is_stable() const { return _gc_state.is_clear(); } +inline bool ShenandoahHeap::has_evacuation_reserve_quantities() const { + return _has_evacuation_reserve_quantities; +} + inline bool ShenandoahHeap::is_idle() const { return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); } @@ -379,6 +709,14 @@ inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { return _gc_state.is_set(MARKING); } +inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const { + return _gc_state.is_set(YOUNG_MARKING); +} + +inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const { + return _gc_state.is_set(OLD_MARKING); +} + inline bool ShenandoahHeap::is_evacuation_in_progress() const { return _gc_state.is_set(EVACUATION); } @@ -411,6 +749,64 @@ inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { return _gc_state.is_set(WEAK_ROOTS); } +inline bool ShenandoahHeap::is_aging_cycle() const { + return _is_aging_cycle.is_set(); +} + +inline size_t ShenandoahHeap::set_promoted_reserve(size_t new_val) { + size_t orig = _promoted_reserve; + _promoted_reserve = new_val; + return orig; +} + +inline size_t ShenandoahHeap::get_promoted_reserve() const { + return _promoted_reserve; +} + +inline size_t ShenandoahHeap::set_old_evac_reserve(size_t new_val) { + size_t orig = _old_evac_reserve; + _old_evac_reserve = new_val; + return orig; +} + +inline size_t ShenandoahHeap::get_old_evac_reserve() const { + return _old_evac_reserve; +} + +inline void ShenandoahHeap::augment_old_evac_reserve(size_t increment) { + _old_evac_reserve += increment; +} + +inline void ShenandoahHeap::augment_promo_reserve(size_t increment) { + _promoted_reserve += increment; +} + +inline void ShenandoahHeap::reset_promoted_expended() { + Atomic::store(&_promoted_expended, (size_t) 0); +} + +inline size_t ShenandoahHeap::expend_promoted(size_t increment) { + return Atomic::add(&_promoted_expended, increment); +} + +inline size_t ShenandoahHeap::unexpend_promoted(size_t decrement) { + return Atomic::sub(&_promoted_expended, decrement); +} + +inline size_t ShenandoahHeap::get_promoted_expended() { + return Atomic::load(&_promoted_expended); +} + +inline size_t ShenandoahHeap::set_young_evac_reserve(size_t new_val) { + size_t orig = _young_evac_reserve; + _young_evac_reserve = new_val; + return orig; +} + +inline size_t ShenandoahHeap::get_young_evac_reserve() const { + return _young_evac_reserve; +} + template inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { marked_object_iterate(region, cl, region->top()); @@ -420,8 +816,7 @@ template inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); - ShenandoahMarkingContext* const ctx = complete_marking_context(); - assert(ctx->is_complete(), "sanity"); + ShenandoahMarkingContext* const ctx = marking_context(); HeapWord* tams = ctx->top_at_mark_start(region); @@ -544,7 +939,7 @@ inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* regi } } -inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { +inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const { if (region_idx < _num_regions) { return _regions[region_idx]; } else { @@ -552,14 +947,6 @@ inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) } } -inline void ShenandoahHeap::mark_complete_marking_context() { - _marking_context->mark_complete(); -} - -inline void ShenandoahHeap::mark_incomplete_marking_context() { - _marking_context->mark_incomplete(); -} - inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { assert (_marking_context->is_complete()," sanity"); return _marking_context; @@ -569,4 +956,16 @@ inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { return _marking_context; } +inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) { + if (mode()->is_generational()) { + _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom())); + } +} + +inline void ShenandoahHeap::mark_card_as_dirty(void* location) { + if (mode()->is_generational()) { + _card_scan->mark_card_as_dirty((HeapWord*)location); + } +} + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index a46c7edc348..a300ff5eae6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,12 +25,19 @@ */ #include "precompiled.hpp" +#include "gc/shared/cardTable.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/tlab_globals.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "jfr/jfrEvents.hpp" #include "memory/allocation.hpp" #include "memory/iterator.inline.hpp" @@ -44,6 +52,7 @@ #include "runtime/safepoint.hpp" #include "utilities/powerOfTwo.hpp" + size_t ShenandoahHeapRegion::RegionCount = 0; size_t ShenandoahHeapRegion::RegionSizeBytes = 0; size_t ShenandoahHeapRegion::RegionSizeWords = 0; @@ -62,13 +71,20 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c _end(start + RegionSizeWords), _new_top(nullptr), _empty_time(os::elapsedTime()), + _top_before_promoted(nullptr), _state(committed ? _empty_committed : _empty_uncommitted), _top(start), _tlab_allocs(0), _gclab_allocs(0), + _plab_allocs(0), _live_data(0), _critical_pins(0), - _update_watermark(start) { + _update_watermark(start), + _age(0) +#ifdef SHENANDOAH_CENSUS_NOISE + , _youth(0) +#endif // SHENANDOAH_CENSUS_NOISE + { assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end), "invalid space boundaries"); @@ -84,13 +100,14 @@ void ShenandoahHeapRegion::report_illegal_transition(const char *method) { fatal("%s", ss.freeze()); } -void ShenandoahHeapRegion::make_regular_allocation() { +void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) { shenandoah_assert_heaplocked(); - + reset_age(); switch (_state) { case _empty_uncommitted: do_commit(); case _empty_committed: + assert(this->affiliation() == affiliation, "Region affiliation should already be established"); set_state(_regular); case _regular: case _pinned: @@ -100,11 +117,38 @@ void ShenandoahHeapRegion::make_regular_allocation() { } } +// Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned. This implements +// behavior previously performed as a side effect of make_regular_bypass(). +void ShenandoahHeapRegion::make_young_maybe() { + shenandoah_assert_heaplocked(); + switch (_state) { + case _empty_uncommitted: + case _empty_committed: + case _cset: + case _humongous_start: + case _humongous_cont: + if (affiliation() != YOUNG_GENERATION) { + if (is_old()) { + ShenandoahHeap::heap()->old_generation()->decrement_affiliated_region_count(); + } + set_affiliation(YOUNG_GENERATION); + ShenandoahHeap::heap()->young_generation()->increment_affiliated_region_count(); + } + return; + case _pinned_cset: + case _regular: + case _pinned: + return; + default: + assert(false, "Unexpected _state in make_young_maybe"); + } +} + void ShenandoahHeapRegion::make_regular_bypass() { shenandoah_assert_heaplocked(); assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(), "only for full or degen GC"); - + reset_age(); switch (_state) { case _empty_uncommitted: do_commit(); @@ -127,6 +171,7 @@ void ShenandoahHeapRegion::make_regular_bypass() { void ShenandoahHeapRegion::make_humongous_start() { shenandoah_assert_heaplocked(); + reset_age(); switch (_state) { case _empty_uncommitted: do_commit(); @@ -138,10 +183,12 @@ void ShenandoahHeapRegion::make_humongous_start() { } } -void ShenandoahHeapRegion::make_humongous_start_bypass() { +void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) { shenandoah_assert_heaplocked(); assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); - + // Don't bother to account for affiliated regions during Full GC. We recompute totals at end. + set_affiliation(affiliation); + reset_age(); switch (_state) { case _empty_committed: case _regular: @@ -156,6 +203,7 @@ void ShenandoahHeapRegion::make_humongous_start_bypass() { void ShenandoahHeapRegion::make_humongous_cont() { shenandoah_assert_heaplocked(); + reset_age(); switch (_state) { case _empty_uncommitted: do_commit(); @@ -167,10 +215,12 @@ void ShenandoahHeapRegion::make_humongous_cont() { } } -void ShenandoahHeapRegion::make_humongous_cont_bypass() { +void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) { shenandoah_assert_heaplocked(); assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); - + set_affiliation(affiliation); + // Don't bother to account for affiliated regions during Full GC. We recompute totals at end. + reset_age(); switch (_state) { case _empty_committed: case _regular: @@ -211,6 +261,7 @@ void ShenandoahHeapRegion::make_unpinned() { switch (_state) { case _pinned: + assert(is_affiliated(), "Pinned region should be affiliated"); set_state(_regular); return; case _regular: @@ -229,6 +280,7 @@ void ShenandoahHeapRegion::make_unpinned() { void ShenandoahHeapRegion::make_cset() { shenandoah_assert_heaplocked(); + // Leave age untouched. We need to consult the age when we are deciding whether to promote evacuated objects. switch (_state) { case _regular: set_state(_cset); @@ -241,12 +293,17 @@ void ShenandoahHeapRegion::make_cset() { void ShenandoahHeapRegion::make_trash() { shenandoah_assert_heaplocked(); + reset_age(); switch (_state) { - case _cset: - // Reclaiming cset regions case _humongous_start: case _humongous_cont: - // Reclaiming humongous regions + { + // Reclaiming humongous regions and reclaim humongous waste. When this region is eventually recycled, we'll reclaim + // its used memory. At recycle time, we no longer recognize this as a humongous region. + decrement_humongous_waste(); + } + case _cset: + // Reclaiming cset regions case _regular: // Immediate region reclaim set_state(_trash); @@ -261,11 +318,14 @@ void ShenandoahHeapRegion::make_trash_immediate() { // On this path, we know there are no marked objects in the region, // tell marking context about it to bypass bitmap resets. - ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this); + assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here."); + ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this); } void ShenandoahHeapRegion::make_empty() { shenandoah_assert_heaplocked(); + reset_age(); + CENSUS_NOISE(clear_youth();) switch (_state) { case _trash: set_state(_empty_committed); @@ -305,10 +365,11 @@ void ShenandoahHeapRegion::make_committed_bypass() { void ShenandoahHeapRegion::reset_alloc_metadata() { _tlab_allocs = 0; _gclab_allocs = 0; + _plab_allocs = 0; } size_t ShenandoahHeapRegion::get_shared_allocs() const { - return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize; + return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize; } size_t ShenandoahHeapRegion::get_tlab_allocs() const { @@ -319,6 +380,10 @@ size_t ShenandoahHeapRegion::get_gclab_allocs() const { return _gclab_allocs * HeapWordSize; } +size_t ShenandoahHeapRegion::get_plab_allocs() const { + return _plab_allocs * HeapWordSize; +} + void ShenandoahHeapRegion::set_live_data(size_t s) { assert(Thread::current()->is_VM_thread(), "by VM thread"); _live_data = (s >> LogHeapWordSize); @@ -363,6 +428,8 @@ void ShenandoahHeapRegion::print_on(outputStream* st) const { ShouldNotReachHere(); } + st->print("|%s", shenandoah_affiliation_code(affiliation())); + #define SHR_PTR_FORMAT "%12" PRIxPTR st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT, @@ -374,6 +441,9 @@ void ShenandoahHeapRegion::print_on(outputStream* st) const { st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); + if (ShenandoahHeap::heap()->mode()->is_generational()) { + st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs())); + } st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); st->print("|CP " SIZE_FORMAT_W(3), pin_count()); @@ -382,26 +452,201 @@ void ShenandoahHeapRegion::print_on(outputStream* st) const { #undef SHR_PTR_FORMAT } -void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) { +// oop_iterate without closure and without cancellation. always return true. +bool ShenandoahHeapRegion::oop_fill_and_coalesce_without_cancel() { + HeapWord* obj_addr = resume_coalesce_and_fill(); + + assert(!is_humongous(), "No need to fill or coalesce humongous regions"); + if (!is_active()) { + end_preemptible_coalesce_and_fill(); + return true; + } + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* marking_context = heap->marking_context(); + // All objects above TAMS are considered live even though their mark bits will not be set. Note that young- + // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen + // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS + // and will be treated as live during the current old-gen marking pass, even though they will not be + // explicitly marked. + HeapWord* t = marking_context->top_at_mark_start(this); + + // Expect marking to be completed before these threads invoke this service. + assert(heap->active_generation()->is_mark_complete(), "sanity"); + while (obj_addr < t) { + oop obj = cast_to_oop(obj_addr); + if (marking_context->is_marked(obj)) { + assert(obj->klass() != nullptr, "klass should not be nullptr"); + obj_addr += obj->size(); + } else { + // Object is not marked. Coalesce and fill dead object with dead neighbors. + HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t); + assert(next_marked_obj <= t, "next marked object cannot exceed top"); + size_t fill_size = next_marked_obj - obj_addr; + assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size"); + ShenandoahHeap::fill_with_object(obj_addr, fill_size); + heap->card_scan()->coalesce_objects(obj_addr, fill_size); + obj_addr = next_marked_obj; + } + } + // Mark that this region has been coalesced and filled + end_preemptible_coalesce_and_fill(); + return true; +} + +// oop_iterate without closure, return true if completed without cancellation +bool ShenandoahHeapRegion::oop_fill_and_coalesce() { + HeapWord* obj_addr = resume_coalesce_and_fill(); + // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free). + const size_t preemption_stride = 128; + + assert(!is_humongous(), "No need to fill or coalesce humongous regions"); + if (!is_active()) { + end_preemptible_coalesce_and_fill(); + return true; + } + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* marking_context = heap->marking_context(); + // All objects above TAMS are considered live even though their mark bits will not be set. Note that young- + // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen + // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS + // and will be treated as live during the current old-gen marking pass, even though they will not be + // explicitly marked. + HeapWord* t = marking_context->top_at_mark_start(this); + + // Expect marking to be completed before these threads invoke this service. + assert(heap->active_generation()->is_mark_complete(), "sanity"); + + size_t ops_before_preempt_check = preemption_stride; + while (obj_addr < t) { + oop obj = cast_to_oop(obj_addr); + if (marking_context->is_marked(obj)) { + assert(obj->klass() != nullptr, "klass should not be nullptr"); + obj_addr += obj->size(); + } else { + // Object is not marked. Coalesce and fill dead object with dead neighbors. + HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t); + assert(next_marked_obj <= t, "next marked object cannot exceed top"); + size_t fill_size = next_marked_obj - obj_addr; + assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size"); + ShenandoahHeap::fill_with_object(obj_addr, fill_size); + heap->card_scan()->coalesce_objects(obj_addr, fill_size); + obj_addr = next_marked_obj; + } + if (ops_before_preempt_check-- == 0) { + if (heap->cancelled_gc()) { + suspend_coalesce_and_fill(obj_addr); + return false; + } + ops_before_preempt_check = preemption_stride; + } + } + // Mark that this region has been coalesced and filled + end_preemptible_coalesce_and_fill(); + return true; +} + +void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) { if (!is_active()) return; if (is_humongous()) { + // No need to fill dead within humongous regions. Either the entire region is dead, or the entire region is + // unchanged. A humongous region holds no more than one humongous object. oop_iterate_humongous(blk); } else { - oop_iterate_objects(blk); + global_oop_iterate_objects_and_fill_dead(blk); } } -void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) { - assert(! is_humongous(), "no humongous region here"); +void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) { + assert(!is_humongous(), "no humongous region here"); HeapWord* obj_addr = bottom(); - HeapWord* t = top(); - // Could call objects iterate, but this is easier. + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* marking_context = heap->marking_context(); + RememberedScanner* rem_set_scanner = heap->card_scan(); + // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts. + HeapWord* t = marking_context->top_at_mark_start(this); + + assert(heap->active_generation()->is_mark_complete(), "sanity"); + + while (obj_addr < t) { + oop obj = cast_to_oop(obj_addr); + if (marking_context->is_marked(obj)) { + assert(obj->klass() != nullptr, "klass should not be nullptr"); + // when promoting an entire region, we have to register the marked objects as well + obj_addr += obj->oop_iterate_size(blk); + } else { + // Object is not marked. Coalesce and fill dead object with dead neighbors. + HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t); + assert(next_marked_obj <= t, "next marked object cannot exceed top"); + size_t fill_size = next_marked_obj - obj_addr; + assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size"); + ShenandoahHeap::fill_with_object(obj_addr, fill_size); + // coalesce_objects() unregisters all but first object subsumed within coalesced range. + rem_set_scanner->coalesce_objects(obj_addr, fill_size); + obj_addr = next_marked_obj; + } + } + + // Any object above TAMS and below top() is considered live. + t = top(); while (obj_addr < t) { oop obj = cast_to_oop(obj_addr); obj_addr += obj->oop_iterate_size(blk); } } +// DO NOT CANCEL. If this worker thread has accepted responsibility for scanning a particular range of addresses, it +// must finish the work before it can be cancelled. +void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only, + HeapWord* start, size_t words, bool write_table) { + assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards"); + assert(is_humongous(), "only humongous region here"); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Find head. + ShenandoahHeapRegion* r = humongous_start_region(); + assert(r->is_humongous_start(), "need humongous head here"); + assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words, + "slice must be integral number of cards"); + + oop obj = cast_to_oop(r->bottom()); + RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan(); + size_t card_index = scanner->card_index_for_addr(start); + size_t num_cards = words / CardTable::card_size_in_words(); + + if (dirty_only) { + if (write_table) { + while (num_cards-- > 0) { + if (scanner->is_write_card_dirty(card_index++)) { + obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words())); + } + start += CardTable::card_size_in_words(); + } + } else { + while (num_cards-- > 0) { + if (scanner->is_card_dirty(card_index++)) { + obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words())); + } + start += CardTable::card_size_in_words(); + } + } + } else { + // Scan all data, regardless of whether cards are dirty + obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words())); + } +} + +void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) { + assert(is_humongous(), "only humongous region here"); + // Find head. + ShenandoahHeapRegion* r = humongous_start_region(); + assert(r->is_humongous_start(), "need humongous head here"); + oop obj = cast_to_oop(r->bottom()); + obj->oop_iterate(blk, MemRegion(start, start + words)); +} + void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) { assert(is_humongous(), "only humongous region here"); // Find head. @@ -427,16 +672,22 @@ ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { } void ShenandoahHeapRegion::recycle() { + shenandoah_assert_heaplocked(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGeneration* generation = heap->generation_for(affiliation()); + heap->decrease_used(generation, used()); + set_top(bottom()); clear_live_data(); reset_alloc_metadata(); - ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this); + heap->marking_context()->reset_top_at_mark_start(this); set_update_watermark(bottom()); make_empty(); - + ShenandoahHeap::heap()->generation_for(affiliation())->decrement_affiliated_region_count(); + set_affiliation(FREE); if (ZapUnusedHeapArea) { SpaceMangler::mangle_region(MemRegion(bottom(), end())); } @@ -480,6 +731,11 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE); } + // Generational Shenandoah needs this alignment for card tables. + if (strcmp(ShenandoahGCMode, "generational") == 0) { + max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint()); + } + size_t region_size; if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) { if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { @@ -668,3 +924,230 @@ void ShenandoahHeapRegion::record_unpin() { size_t ShenandoahHeapRegion::pin_count() const { return Atomic::load(&_critical_pins); } + +void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + ShenandoahAffiliation region_affiliation = heap->region_affiliation(this); + { + ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); + log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT + ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT, + index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation), + p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this))); + } + +#ifdef ASSERT + { + // During full gc, heap->complete_marking_context() is not valid, may equal nullptr. + ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); + size_t idx = this->index(); + HeapWord* top_bitmap = ctx->top_bitmap(this); + + assert(ctx->is_bitmap_clear_range(top_bitmap, _end), + "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx, + p2i(top_bitmap), p2i(_end)); + } +#endif + + if (region_affiliation == new_affiliation) { + return; + } + + if (!heap->mode()->is_generational()) { + log_trace(gc)("Changing affiliation of region %zu from %s to %s", + index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation)); + heap->set_affiliation(this, new_affiliation); + return; + } + + switch (new_affiliation) { + case FREE: + assert(!has_live(), "Free region should not have live data"); + break; + case YOUNG_GENERATION: + reset_age(); + break; + case OLD_GENERATION: + // TODO: should we reset_age() for OLD as well? Examine invocations of set_affiliation(). Some contexts redundantly + // invoke reset_age(). + break; + default: + ShouldNotReachHere(); + return; + } + heap->set_affiliation(this, new_affiliation); +} + +// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered +// set scans of this region's content. The region will be coalesced and filled prior to the next old-gen marking effort. +// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners" +// contained herein. +void ShenandoahHeapRegion::promote_in_place() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* marking_context = heap->marking_context(); + HeapWord* tams = marking_context->top_at_mark_start(this); + assert(heap->active_generation()->is_mark_complete(), "sanity"); + assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking"); + assert(is_young(), "Only young regions can be promoted"); + assert(is_regular(), "Use different service to promote humongous regions"); + assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged"); + + ShenandoahOldGeneration* old_gen = heap->old_generation(); + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + assert(get_top_before_promote() == tams, "Cannot promote regions in place if top has advanced beyond TAMS"); + + // Rebuild the remembered set information and mark the entire range as DIRTY. We do NOT scan the content of this + // range to determine which cards need to be DIRTY. That would force us to scan the region twice, once now, and + // once during the subsequent remembered set scan. Instead, we blindly (conservatively) mark everything as DIRTY + // now and then sort out the CLEAN pages during the next remembered set scan. + // + // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here, + // then registering every live object and every coalesced range of free objects in the loop that follows. + heap->card_scan()->reset_object_range(bottom(), end()); + heap->card_scan()->mark_range_as_dirty(bottom(), get_top_before_promote() - bottom()); + + // TODO: use an existing coalesce-and-fill function rather than replicating the code here. + HeapWord* obj_addr = bottom(); + while (obj_addr < tams) { + oop obj = cast_to_oop(obj_addr); + if (marking_context->is_marked(obj)) { + assert(obj->klass() != nullptr, "klass should not be NULL"); + // This thread is responsible for registering all objects in this region. No need for lock. + heap->card_scan()->register_object_without_lock(obj_addr); + obj_addr += obj->size(); + } else { + HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams); + assert(next_marked_obj <= tams, "next marked object cannot exceed tams"); + size_t fill_size = next_marked_obj - obj_addr; + assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size"); + ShenandoahHeap::fill_with_object(obj_addr, fill_size); + heap->card_scan()->register_object_without_lock(obj_addr); + obj_addr = next_marked_obj; + } + } + // We do not need to scan above TAMS because restored top equals tams + assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams"); + + { + ShenandoahHeapLocker locker(heap->lock()); + + HeapWord* update_watermark = get_update_watermark(); + + // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the + // is_collector_free range. + restore_top_before_promote(); + + size_t region_capacity = free(); + size_t region_used = used(); + + // The update_watermark was likely established while we had the artificially high value of top. Make it sane now. + assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark"); + set_update_watermark(top()); + + // Unconditionally transfer one region from young to old to represent the newly promoted region. + // This expands old and shrinks new by the size of one region. Strictly, we do not "need" to expand old + // if there are already enough unaffiliated regions in old to account for this newly promoted region. + // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have + // otherwise been available to hold old evacuations, because old available is max_capacity - used and now + // we would be trading a fully empty region for a partially used region. + + young_gen->decrease_used(region_used); + young_gen->decrement_affiliated_region_count(); + + // transfer_to_old() increases capacity of old and decreases capacity of young + heap->generation_sizer()->force_transfer_to_old(1); + set_affiliation(OLD_GENERATION); + + old_gen->increment_affiliated_region_count(); + old_gen->increase_used(region_used); + + // add_old_collector_free_region() increases promoted_reserve() if available space exceeds PLAB::min_size() + heap->free_set()->add_old_collector_free_region(this); + } +} + +void ShenandoahHeapRegion::promote_humongous() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahMarkingContext* marking_context = heap->marking_context(); + assert(heap->active_generation()->is_mark_complete(), "sanity"); + assert(is_young(), "Only young regions can be promoted"); + assert(is_humongous_start(), "Should not promote humongous continuation in isolation"); + assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged"); + + ShenandoahGeneration* old_generation = heap->old_generation(); + ShenandoahGeneration* young_generation = heap->young_generation(); + + oop obj = cast_to_oop(bottom()); + assert(marking_context->is_marked(obj), "promoted humongous object should be alive"); + + // TODO: Consider not promoting humongous objects that represent primitive arrays. Leaving a primitive array + // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not + // scanned. Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when + // it becomes garbage. Better to not make this change until sizes of young-gen and old-gen are completely + // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone + // has carefully analyzed the required sizes of an application's young-gen and old-gen. + size_t used_bytes = obj->size() * HeapWordSize; + size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes); + size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize; + size_t index_limit = index() + spanned_regions; + { + // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from + // young to old. + ShenandoahHeapLocker locker(heap->lock()); + + // We promote humongous objects unconditionally, without checking for availability. We adjust + // usage totals, including humongous waste, after evacuation is done. + log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions); + + young_generation->decrease_used(used_bytes); + young_generation->decrease_humongous_waste(humongous_waste); + young_generation->decrease_affiliated_region_count(spanned_regions); + + // transfer_to_old() increases capacity of old and decreases capacity of young + heap->generation_sizer()->force_transfer_to_old(spanned_regions); + + // For this region and each humongous continuation region spanned by this humongous object, change + // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory + // in the last humongous region that is not spanned by obj is currently not used. + for (size_t i = index(); i < index_limit; i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT, + r->index(), p2i(r->bottom()), p2i(r->top())); + // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here + r->set_affiliation(OLD_GENERATION); + } + + old_generation->increase_affiliated_region_count(spanned_regions); + old_generation->increase_used(used_bytes); + old_generation->increase_humongous_waste(humongous_waste); + } + + // Since this region may have served previously as OLD, it may hold obsolete object range info. + heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words()); + // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation. + heap->card_scan()->register_object_without_lock(bottom()); + + if (obj->is_typeArray()) { + // Primitive arrays don't need to be scanned. + log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT, + index(), p2i(bottom()), p2i(bottom() + obj->size())); + heap->card_scan()->mark_range_as_clean(bottom(), obj->size()); + } else { + log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT, + index(), p2i(bottom()), p2i(bottom() + obj->size())); + heap->card_scan()->mark_range_as_dirty(bottom(), obj->size()); + } +} + +void ShenandoahHeapRegion::decrement_humongous_waste() const { + assert(is_humongous(), "Should only use this for humongous regions"); + size_t waste_bytes = free(); + if (waste_bytes > 0) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGeneration* generation = heap->generation_for(affiliation()); + heap->decrease_humongous_waste(generation, waste_bytes); + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 755e2cc1c9a..9577b6bab3d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +28,7 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/spaceDecorator.hpp" +#include "gc/shenandoah/shenandoahAffiliation.hpp" #include "gc/shenandoah/shenandoahAllocRequest.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" @@ -163,17 +165,18 @@ class ShenandoahHeapRegion { void report_illegal_transition(const char* method); public: - static const int region_states_num() { + static int region_states_num() { return _REGION_STATES_NUM; } // Allowed transitions from the outside code: - void make_regular_allocation(); + void make_regular_allocation(ShenandoahAffiliation affiliation); + void make_young_maybe(); void make_regular_bypass(); void make_humongous_start(); void make_humongous_cont(); - void make_humongous_start_bypass(); - void make_humongous_cont_bypass(); + void make_humongous_start_bypass(ShenandoahAffiliation affiliation); + void make_humongous_cont_bypass(ShenandoahAffiliation affiliation); void make_pinned(); void make_unpinned(); void make_cset(); @@ -198,6 +201,9 @@ class ShenandoahHeapRegion { bool is_committed() const { return !is_empty_uncommitted(); } bool is_cset() const { return _state == _cset || _state == _pinned_cset; } bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } + inline bool is_young() const; + inline bool is_old() const; + inline bool is_affiliated() const; // Macro-properties: bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } @@ -232,20 +238,27 @@ class ShenandoahHeapRegion { HeapWord* _new_top; double _empty_time; + HeapWord* _top_before_promoted; + // Seldom updated fields RegionState _state; + HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates. // Frequently updated fields HeapWord* _top; size_t _tlab_allocs; size_t _gclab_allocs; + size_t _plab_allocs; volatile size_t _live_data; volatile size_t _critical_pins; HeapWord* volatile _update_watermark; + uint _age; + CENSUS_NOISE(uint _youth;) // tracks epochs of retrograde ageing (rejuvenation) + public: ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed); @@ -334,8 +347,16 @@ class ShenandoahHeapRegion { return _index; } - // Allocation (return null if full) - inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); + inline void save_top_before_promote(); + inline HeapWord* get_top_before_promote() const { return _top_before_promoted; } + inline void restore_top_before_promote(); + inline size_t garbage_before_padded_for_promote() const; + + // Allocation (return nullptr if full) + inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_words); + + // Allocation (return nullptr if full) + inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req); inline void clear_live_data(); void set_live_data(size_t s); @@ -356,7 +377,41 @@ class ShenandoahHeapRegion { void recycle(); - void oop_iterate(OopIterateClosure* cl); + inline void begin_preemptible_coalesce_and_fill() { + _coalesce_and_fill_boundary = _bottom; + } + + inline void end_preemptible_coalesce_and_fill() { + _coalesce_and_fill_boundary = _end; + } + + inline void suspend_coalesce_and_fill(HeapWord* next_focus) { + _coalesce_and_fill_boundary = next_focus; + } + + inline HeapWord* resume_coalesce_and_fill() { + return _coalesce_and_fill_boundary; + } + + // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set. + // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Return true iff + // region is completely coalesced and filled. Returns false if cancelled before task is complete. + bool oop_fill_and_coalesce(); + + // Like oop_fill_and_coalesce(), but without honoring cancellation requests. + bool oop_fill_and_coalesce_without_cancel(); + + // During global collections, this service iterates through an old-gen heap region that is not part of collection + // set to fill and register ranges of dead memory. Note that live objects were previously registered. Some dead objects + // that are subsumed into coalesced ranges of dead memory need to be "unregistered". + void global_oop_iterate_and_fill_dead(OopIterateClosure* cl); + void oop_iterate_humongous(OopIterateClosure* cl); + void oop_iterate_humongous(OopIterateClosure* cl, HeapWord* start, size_t words); + + // Invoke closure on every reference contained within the humongous object that spans this humongous + // region if the reference is contained within a DIRTY card and the reference is no more than words following + // start within the humongous object. + void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table); HeapWord* block_start(const void* p) const; size_t block_size(const HeapWord* p) const; @@ -376,24 +431,61 @@ class ShenandoahHeapRegion { size_t capacity() const { return byte_size(bottom(), end()); } size_t used() const { return byte_size(bottom(), top()); } + size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); } size_t free() const { return byte_size(top(), end()); } + // Does this region contain this address? + bool contains(HeapWord* p) const { + return (bottom() <= p) && (p < top()); + } + inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); void reset_alloc_metadata(); size_t get_shared_allocs() const; size_t get_tlab_allocs() const; size_t get_gclab_allocs() const; + size_t get_plab_allocs() const; inline HeapWord* get_update_watermark() const; inline void set_update_watermark(HeapWord* w); inline void set_update_watermark_at_safepoint(HeapWord* w); + inline ShenandoahAffiliation affiliation() const; + inline const char* affiliation_name() const; + + void set_affiliation(ShenandoahAffiliation new_affiliation); + + // Region ageing and rejuvenation + uint age() { return _age; } + CENSUS_NOISE(uint youth() { return _youth; }) + + void increment_age() { + const uint max_age = markWord::max_age; + assert(_age <= max_age, "Error"); + if (_age++ >= max_age) { + _age = max_age; // clamp + } + } + + void reset_age() { + CENSUS_NOISE(_youth += _age;) + _age = 0; + } + + CENSUS_NOISE(void clear_youth() { _youth = 0; }) + + // Register all objects. Set all remembered set cards to dirty. + void promote_humongous(); + void promote_in_place(); + private: + void decrement_humongous_waste() const; void do_commit(); void do_uncommit(); - void oop_iterate_objects(OopIterateClosure* cl); - void oop_iterate_humongous(OopIterateClosure* cl); + // This is an old-region that was not part of the collection set during a GLOBAL collection. We coalesce the dead + // objects, but do not need to register the live objects as they are already registered. + void global_oop_iterate_objects_and_fill_dead(OopIterateClosure* cl); inline void internal_increase_live_data(size_t s); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index 0435333fe1e..04d2a3e1408 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,14 +32,85 @@ #include "gc/shenandoah/shenandoahPacer.inline.hpp" #include "runtime/atomic.hpp" -HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) { +// If next available memory is not aligned on address that is multiple of alignment, fill the empty space +// so that returned object is aligned on an address that is a multiple of alignment_in_words. Requested +// size is in words. It is assumed that this->is_old(). A pad object is allocated, filled, and registered +// if necessary to assure the new allocation is properly aligned. +HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) { + shenandoah_assert_heaplocked_or_safepoint(); + assert(req.is_lab_alloc(), "allocate_aligned() only applies to LAB allocations"); + assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size); + assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs"); + + HeapWord* orig_top = top(); + size_t addr_as_int = (uintptr_t) orig_top; + + // unalignment_bytes is the amount by which current top() exceeds the desired alignment point. We subtract this amount + // from alignment_in_bytes to determine padding required to next alignment point. + + // top is HeapWord-aligned so unalignment_bytes is a multiple of HeapWordSize + size_t unalignment_bytes = addr_as_int % alignment_in_bytes; + size_t unalignment_words = unalignment_bytes / HeapWordSize; + + size_t pad_words; + HeapWord* aligned_obj; + if (unalignment_words > 0) { + pad_words = (alignment_in_bytes / HeapWordSize) - unalignment_words; + if (pad_words < ShenandoahHeap::min_fill_size()) { + pad_words += (alignment_in_bytes / HeapWordSize); + } + aligned_obj = orig_top + pad_words; + } else { + pad_words = 0; + aligned_obj = orig_top; + } + + if (pointer_delta(end(), aligned_obj) < size) { + size = pointer_delta(end(), aligned_obj); + // Force size to align on multiple of alignment_in_bytes + size_t byte_size = size * HeapWordSize; + size_t excess_bytes = byte_size % alignment_in_bytes; + // Note: excess_bytes is a multiple of HeapWordSize because it is the difference of HeapWord-aligned end + // and proposed HeapWord-aligned object address. + if (excess_bytes > 0) { + size -= excess_bytes / HeapWordSize; + } + } + + // Both originally requested size and adjusted size must be properly aligned + assert ((size * HeapWordSize) % alignment_in_bytes == 0, "Size must be multiple of alignment constraint"); + if (size >= req.min_size()) { + // Even if req.min_size() is not a multiple of card size, we know that size is. + if (pad_words > 0) { + assert(pad_words >= ShenandoahHeap::min_fill_size(), "pad_words expanded above to meet size constraint"); + ShenandoahHeap::fill_with_object(orig_top, pad_words); + ShenandoahHeap::heap()->card_scan()->register_object(orig_top); + } + + make_regular_allocation(req.affiliation()); + adjust_alloc_metadata(req.type(), size); + + HeapWord* new_top = aligned_obj + size; + assert(new_top <= end(), "PLAB cannot span end of heap region"); + set_top(new_top); + req.set_actual_size(size); + req.set_waste(pad_words); + assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top)); + assert(is_aligned(aligned_obj, alignment_in_bytes), "obj is not aligned: " PTR_FORMAT, p2i(aligned_obj)); + return aligned_obj; + } else { + return nullptr; + } +} + +HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest req) { shenandoah_assert_heaplocked_or_safepoint(); assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size); HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { - make_regular_allocation(); - adjust_alloc_metadata(type, size); + make_regular_allocation(req.affiliation()); + adjust_alloc_metadata(req.type(), size); HeapWord* new_top = obj + size; set_top(new_top); @@ -64,6 +136,9 @@ inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest:: case ShenandoahAllocRequest::_alloc_gclab: _gclab_allocs += size; break; + case ShenandoahAllocRequest::_alloc_plab: + _plab_allocs += size; + break; default: ShouldNotReachHere(); } @@ -82,12 +157,6 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) { inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed); -#ifdef ASSERT - size_t live_bytes = new_live_data * HeapWordSize; - size_t used_bytes = used(); - assert(live_bytes <= used_bytes, - "can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes); -#endif } inline void ShenandoahHeapRegion::clear_live_data() { @@ -115,6 +184,17 @@ inline size_t ShenandoahHeapRegion::garbage() const { return result; } +inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const { + assert(get_top_before_promote() != nullptr, "top before promote should not equal null"); + size_t used_before_promote = byte_size(bottom(), get_top_before_promote()); + assert(used_before_promote >= get_live_data_bytes(), + "Live Data must be a subset of used before promotion live: " SIZE_FORMAT " used: " SIZE_FORMAT, + get_live_data_bytes(), used_before_promote); + size_t result = used_before_promote - get_live_data_bytes(); + return result; + +} + inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const { HeapWord* watermark = Atomic::load_acquire(&_update_watermark); assert(bottom() <= watermark && watermark <= top(), "within bounds"); @@ -133,4 +213,34 @@ inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) _update_watermark = w; } +inline ShenandoahAffiliation ShenandoahHeapRegion::affiliation() const { + return ShenandoahHeap::heap()->region_affiliation(this); +} + +inline const char* ShenandoahHeapRegion::affiliation_name() const { + return shenandoah_affiliation_name(affiliation()); +} + +inline bool ShenandoahHeapRegion::is_young() const { + return affiliation() == YOUNG_GENERATION; +} + +inline bool ShenandoahHeapRegion::is_old() const { + return affiliation() == OLD_GENERATION; +} + +inline bool ShenandoahHeapRegion::is_affiliated() const { + return affiliation() != FREE; +} + +inline void ShenandoahHeapRegion::save_top_before_promote() { + _top_before_promoted = _top; +} + +inline void ShenandoahHeapRegion::restore_top_before_promote() { + _top = _top_before_promoted; + _top_before_promoted = nullptr; + } + + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp index 3fb6b329f2c..66b6ce2bfaf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,15 +22,18 @@ * questions. * */ - #include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahHeapRegionCounters.hpp" +#include "logging/logStream.hpp" #include "memory/resourceArea.hpp" #include "runtime/atomic.hpp" #include "runtime/perfData.inline.hpp" +#include "utilities/defaultStream.hpp" ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : _last_sample_millis(0) @@ -41,7 +45,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : size_t num_regions = heap->num_regions(); const char* cns = PerfDataManager::name_space("shenandoah", "regions"); _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); - strcpy(_name_space, cns); + strcpy(_name_space, cns); // copy cns into _name_space const char* cname = PerfDataManager::counter_name(_name_space, "timestamp"); _timestamp = PerfDataManager::create_long_variable(SUN_GC, cname, PerfData::U_None, CHECK); @@ -49,6 +53,9 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : cname = PerfDataManager::counter_name(_name_space, "max_regions"); PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, num_regions, CHECK); + cname = PerfDataManager::counter_name(_name_space, "protocol_version"); //creating new protocol_version + PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, VERSION_NUMBER, CHECK); + cname = PerfDataManager::counter_name(_name_space, "region_size"); PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, ShenandoahHeapRegion::region_size_bytes() >> 10, CHECK); @@ -57,6 +64,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : PerfData::U_None, CHECK); _regions_data = NEW_C_HEAP_ARRAY(PerfVariable*, num_regions, mtGC); + // Initializing performance data resources for each region for (uint i = 0; i < num_regions; i++) { const char* reg_name = PerfDataManager::name_space(_name_space, "region", i); const char* data_name = PerfDataManager::counter_name(reg_name, "data"); @@ -66,6 +74,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : _regions_data[i] = PerfDataManager::create_long_variable(SUN_GC, data_name, PerfData::U_None, CHECK); } + } } @@ -73,27 +82,42 @@ ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() { if (_name_space != nullptr) FREE_C_HEAP_ARRAY(char, _name_space); } +void ShenandoahHeapRegionCounters::write_snapshot(PerfLongVariable** regions, + PerfLongVariable* ts, + PerfLongVariable* status, + size_t num_regions, + size_t region_size, size_t protocol_version) { + LogTarget(Trace, gc, region) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + + ls.print_cr(JLONG_FORMAT " " JLONG_FORMAT " " SIZE_FORMAT " " SIZE_FORMAT " " SIZE_FORMAT, + ts->get_value(), status->get_value(), num_regions, region_size, protocol_version); + if (num_regions > 0) { + ls.print(JLONG_FORMAT, regions[0]->get_value()); + } + for (uint i = 1; i < num_regions; ++i) { + ls.print(" " JLONG_FORMAT, regions[i]->get_value()); + } + ls.cr(); + } +} + void ShenandoahHeapRegionCounters::update() { if (ShenandoahRegionSampling) { jlong current = nanos_to_millis(os::javaTimeNanos()); jlong last = _last_sample_millis; - if (current - last > ShenandoahRegionSamplingRate && - Atomic::cmpxchg(&_last_sample_millis, last, current) == last) { + if (current - last > ShenandoahRegionSamplingRate && Atomic::cmpxchg(&_last_sample_millis, last, current) == last) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - jlong status = 0; - if (heap->is_concurrent_mark_in_progress()) status |= 1 << 0; - if (heap->is_evacuation_in_progress()) status |= 1 << 1; - if (heap->is_update_refs_in_progress()) status |= 1 << 2; - _status->set_value(status); - + _status->set_value(encode_heap_status(heap)); _timestamp->set_value(os::elapsed_counter()); - size_t num_regions = heap->num_regions(); - { ShenandoahHeapLocker locker(heap->lock()); size_t rs = ShenandoahHeapRegion::region_size_bytes(); + size_t num_regions = heap->num_regions(); for (uint i = 0; i < num_regions; i++) { ShenandoahHeapRegion* r = heap->get_region(i); jlong data = 0; @@ -101,12 +125,79 @@ void ShenandoahHeapRegionCounters::update() { data |= ((100 * r->get_live_data_bytes() / rs) & PERCENT_MASK) << LIVE_SHIFT; data |= ((100 * r->get_tlab_allocs() / rs) & PERCENT_MASK) << TLAB_SHIFT; data |= ((100 * r->get_gclab_allocs() / rs) & PERCENT_MASK) << GCLAB_SHIFT; + data |= ((100 * r->get_plab_allocs() / rs) & PERCENT_MASK) << PLAB_SHIFT; data |= ((100 * r->get_shared_allocs() / rs) & PERCENT_MASK) << SHARED_SHIFT; + + data |= (r->age() & AGE_MASK) << AGE_SHIFT; + data |= (r->affiliation() & AFFILIATION_MASK) << AFFILIATION_SHIFT; data |= (r->state_ordinal() & STATUS_MASK) << STATUS_SHIFT; _regions_data[i]->set_value(data); } + + // If logging enabled, dump current region snapshot to log file + write_snapshot(_regions_data, _timestamp, _status, num_regions, rs >> 10, VERSION_NUMBER); } + } + } +} + +static int encode_phase(ShenandoahHeap* heap) { + if (heap->is_evacuation_in_progress() || heap->is_full_gc_move_in_progress()) { + return 2; + } + if (heap->is_update_refs_in_progress() || heap->is_full_gc_move_in_progress()) { + return 3; + } + if (heap->is_concurrent_mark_in_progress() || heap->is_full_gc_in_progress()) { + return 1; + } + assert(heap->is_idle(), "What is it doing?"); + return 0; +} + +static int get_generation_shift(ShenandoahGeneration* generation) { + switch (generation->type()) { + case GLOBAL_NON_GEN: + case GLOBAL_GEN: + return 0; + case OLD: + return 2; + case YOUNG: + return 4; + default: + ShouldNotReachHere(); + return -1; + } +} + +jlong ShenandoahHeapRegionCounters::encode_heap_status(ShenandoahHeap* heap) { + + if (heap->is_idle() && !heap->is_full_gc_in_progress()) { + return 0; + } + jlong status = 0; + if (!heap->mode()->is_generational()) { + status = encode_phase(heap); + } else { + int phase = encode_phase(heap); + ShenandoahGeneration* generation = heap->active_generation(); + assert(generation != nullptr, "Expected active generation in this mode."); + int shift = get_generation_shift(generation); + status |= ((phase & 0x3) << shift); + if (heap->is_concurrent_old_mark_in_progress()) { + status |= (1 << 2); } + log_develop_trace(gc)("%s, phase=%u, old_mark=%s, status=" JLONG_FORMAT, + generation->name(), phase, BOOL_TO_STR(heap->is_concurrent_old_mark_in_progress()), status); } + + if (heap->is_degenerated_gc_in_progress()) { + status |= (1 << 6); + } + if (heap->is_full_gc_in_progress()) { + status |= (1 << 7); + } + + return status; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp index f0d4c2ad38f..5d56b9c3c09 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +27,7 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP #include "memory/allocation.hpp" +#include "logging/logFileStreamOutput.hpp" /** * This provides the following in JVMStat: @@ -37,9 +39,14 @@ * * variables: * - sun.gc.shenandoah.regions.status current GC status: - * - bit 0 set when marking in progress - * - bit 1 set when evacuation in progress - * - bit 2 set when update refs in progress + * | global | old | young | mode | + * | 0..1 | 2..3 | 4..5 | 6..7 | + * + * For each generation: + * 0 = idle, 1 = marking, 2 = evacuating, 3 = updating refs + * + * For mode: + * 0 = concurrent, 1 = degenerated, 2 = full * * two variable counters per region, with $max_regions (see above) counters: * - sun.gc.shenandoah.regions.region.$i.data @@ -51,24 +58,31 @@ * - bits 14-20 tlab allocated memory in percent * - bits 21-27 gclab allocated memory in percent * - bits 28-34 shared allocated memory in percent - * - bits 35-41 + * - bits 35-41 plab allocated memory in percent * - bits 42-50 - * - bits 51-57 + * - bits 51-55 age + * - bits 56-57 affiliation: 0 = free, young = 1, old = 2 * - bits 58-63 status * - bits describe the state as recorded in ShenandoahHeapRegion */ class ShenandoahHeapRegionCounters : public CHeapObj { private: - static const jlong PERCENT_MASK = 0x7f; - static const jlong STATUS_MASK = 0x3f; + static const jlong PERCENT_MASK = 0x7f; + static const jlong AGE_MASK = 0x1f; + static const jlong AFFILIATION_MASK = 0x03; + static const jlong STATUS_MASK = 0x3f; - static const jlong USED_SHIFT = 0; - static const jlong LIVE_SHIFT = 7; - static const jlong TLAB_SHIFT = 14; - static const jlong GCLAB_SHIFT = 21; - static const jlong SHARED_SHIFT = 28; + static const jlong USED_SHIFT = 0; + static const jlong LIVE_SHIFT = 7; + static const jlong TLAB_SHIFT = 14; + static const jlong GCLAB_SHIFT = 21; + static const jlong SHARED_SHIFT = 28; + static const jlong PLAB_SHIFT = 35; + static const jlong AGE_SHIFT = 51; + static const jlong AFFILIATION_SHIFT = 56; + static const jlong STATUS_SHIFT = 58; - static const jlong STATUS_SHIFT = 58; + static const jlong VERSION_NUMBER = 2; char* _name_space; PerfLongVariable** _regions_data; @@ -76,10 +90,20 @@ class ShenandoahHeapRegionCounters : public CHeapObj { PerfLongVariable* _status; volatile jlong _last_sample_millis; + void write_snapshot(PerfLongVariable** regions, + PerfLongVariable* ts, + PerfLongVariable* status, + size_t num_regions, + size_t region_size, size_t protocolVersion); + + uint _count = 0; public: ShenandoahHeapRegionCounters(); ~ShenandoahHeapRegionCounters(); void update(); + +private: + static jlong encode_heap_status(ShenandoahHeap* heap) ; }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp index 9f0233dd08c..24f98322490 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,37 +30,26 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "gc/shenandoah/mode/shenandoahMode.hpp" #include "logging/log.hpp" -#include "runtime/globals.hpp" #include "utilities/globalDefinitions.hpp" +void ShenandoahInitLogger::print() { + ShenandoahInitLogger init_log; + init_log.print_all(); +} + void ShenandoahInitLogger::print_heap() { GCInitLogger::print_heap(); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - - log_info(gc, init)("Mode: %s", - heap->mode()->name()); - - log_info(gc, init)("Heuristics: %s", - heap->heuristics()->name()); - - log_info(gc, init)("Heap Region Count: " SIZE_FORMAT, - ShenandoahHeapRegion::region_count()); - - log_info(gc, init)("Heap Region Size: " SIZE_FORMAT "%s", - byte_size_in_exact_unit(ShenandoahHeapRegion::region_size_bytes()), - exact_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); - - log_info(gc, init)("TLAB Size Max: " SIZE_FORMAT "%s", - byte_size_in_exact_unit(ShenandoahHeapRegion::max_tlab_size_bytes()), - exact_unit_for_byte_size(ShenandoahHeapRegion::max_tlab_size_bytes())); - - log_info(gc, init)("Humongous Object Threshold: " SIZE_FORMAT "%s", - byte_size_in_exact_unit(ShenandoahHeapRegion::humongous_threshold_bytes()), - exact_unit_for_byte_size(ShenandoahHeapRegion::humongous_threshold_bytes())); + log_info(gc, init)("Heap Region Count: " SIZE_FORMAT, ShenandoahHeapRegion::region_count()); + log_info(gc, init)("Heap Region Size: " PROPERFMT, PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes())); + log_info(gc, init)("TLAB Size Max: " PROPERFMT, PROPERFMTARGS(ShenandoahHeapRegion::max_tlab_size_bytes())); + log_info(gc, init)("Humongous Object Threshold: " PROPERFMT, PROPERFMTARGS(ShenandoahHeapRegion::humongous_threshold_bytes())); } -void ShenandoahInitLogger::print() { - ShenandoahInitLogger init_log; - init_log.print_all(); +void ShenandoahInitLogger::print_gc_specific() { + GCInitLogger::print_gc_specific(); + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + log_info(gc, init)("Mode: %s", heap->mode()->name()); + log_info(gc, init)("Heuristics: %s", heap->heuristics()->name()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp index 98c918c58f7..8c0da413399 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp @@ -29,7 +29,8 @@ class ShenandoahInitLogger : public GCInitLogger { protected: - virtual void print_heap(); + void print_heap() override; + void print_gc_specific() override; public: static void print(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp index 4725b8c3dfa..4e34e7d9707 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +28,7 @@ #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" @@ -34,17 +36,6 @@ #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" -ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : - MetadataVisitingOopIterateClosure(rp), - _queue(q), - _mark_context(ShenandoahHeap::heap()->marking_context()), - _weak(false) -{ } - -ShenandoahMark::ShenandoahMark() : - _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) { -} - void ShenandoahMark::start_mark() { if (!CodeCache::is_gc_marking_cycle_active()) { CodeCache::on_gc_marking_cycle_start(); @@ -54,70 +45,101 @@ void ShenandoahMark::start_mark() { void ShenandoahMark::end_mark() { // Unlike other GCs, we do not arm the nmethods // when marking terminates. - CodeCache::on_gc_marking_cycle_finish(); + if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) { + CodeCache::on_gc_marking_cycle_finish(); + } } -void ShenandoahMark::clear() { - // Clean up marking stacks. - ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues(); - queues->clear(); +ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : + MetadataVisitingOopIterateClosure(rp), + _queue(q), + _old_queue(old_q), + _mark_context(ShenandoahHeap::heap()->marking_context()), + _weak(false) +{ } - // Cancel SATB buffers. - ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); +ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) : + _generation(generation), + _task_queues(generation->task_queues()), + _old_gen_task_queues(generation->old_gen_task_queues()) { } -template -void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) { +template +void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) { ShenandoahObjToScanQueue* q = get_queue(w); + ShenandoahObjToScanQueue* old_q = get_old_queue(w); ShenandoahHeap* const heap = ShenandoahHeap::heap(); ShenandoahLiveData* ld = heap->get_liveness_cache(w); // TODO: We can clean up this if we figure out how to do templated oop closures that // play nice with specialized_oop_iterators. - if (heap->has_forwarded_objects()) { - using Closure = ShenandoahMarkUpdateRefsClosure; - Closure cl(q, rp); - mark_loop_work(&cl, ld, w, t, req); + if (update_refs) { + using Closure = ShenandoahMarkUpdateRefsClosure; + Closure cl(q, rp, old_q); + mark_loop_work(&cl, ld, w, t, req); } else { - using Closure = ShenandoahMarkRefsClosure; - Closure cl(q, rp); - mark_loop_work(&cl, ld, w, t, req); + using Closure = ShenandoahMarkRefsClosure; + Closure cl(q, rp, old_q); + mark_loop_work(&cl, ld, w, t, req); } heap->flush_liveness_cache(w); } -void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, - bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) { +template +void ShenandoahMark::mark_loop(ShenandoahGenerationType generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) { + bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects(); + switch (generation) { + case YOUNG: + mark_loop_prework(worker_id, terminator, rp, req, update_refs); + break; + case OLD: + // Old generation collection only performs marking, it should not update references. + mark_loop_prework(worker_id, terminator, rp, req, false); + break; + case GLOBAL_GEN: + mark_loop_prework(worker_id, terminator, rp, req, update_refs); + break; + case GLOBAL_NON_GEN: + mark_loop_prework(worker_id, terminator, rp, req, update_refs); + break; + default: + ShouldNotReachHere(); + break; + } +} + +void ShenandoahMark::mark_loop(ShenandoahGenerationType generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, + bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) { if (cancellable) { switch(dedup_mode) { case NO_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; case ENQUEUE_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; case ALWAYS_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; } } else { switch(dedup_mode) { case NO_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; case ENQUEUE_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; case ALWAYS_DEDUP: - mark_loop_prework(worker_id, terminator, rp, req); + mark_loop(generation, worker_id, terminator, rp, req); break; } } } -template +template void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) { uintx stride = ShenandoahMarkLoopStride; @@ -126,7 +148,8 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w ShenandoahObjToScanQueue* q; ShenandoahMarkTask t; - heap->ref_processor()->set_mark_closure(worker_id, cl); + assert(heap->active_generation()->type() == GENERATION, "Sanity"); + heap->active_generation()->ref_processor()->set_mark_closure(worker_id, cl); /* * Process outstanding queues, if any. @@ -146,7 +169,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w for (uint i = 0; i < stride; i++) { if (q->pop(t)) { - do_task(q, cl, live_data, req, &t); + do_task(q, cl, live_data, req, &t, worker_id); } else { assert(q->is_empty(), "Must be empty"); q = queues->claim_next(); @@ -155,8 +178,9 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w } } q = get_queue(worker_id); + ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id); - ShenandoahSATBBufferClosure drain_satb(q); + ShenandoahSATBBufferClosure drain_satb(q, old_q); SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); /* @@ -166,7 +190,6 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { return; } - while (satb_mq_set.completed_buffers_num() > 0) { satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); } @@ -175,7 +198,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w for (uint i = 0; i < stride; i++) { if (q->pop(t) || queues->steal(worker_id, t)) { - do_task(q, cl, live_data, req, &t); + do_task(q, cl, live_data, req, &t, worker_id); work++; } else { break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp index 078b89a4ce7..98e13dc74dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +26,7 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP +#include "gc/shared/ageTable.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/taskTerminator.hpp" #include "gc/shenandoah/shenandoahOopClosures.hpp" @@ -35,16 +37,16 @@ // maintained by task queues, mark bitmap and SATB buffers (concurrent mark) class ShenandoahMark: public StackObj { protected: + ShenandoahGeneration* const _generation; ShenandoahObjToScanQueueSet* const _task_queues; + ShenandoahObjToScanQueueSet* const _old_gen_task_queues; protected: - ShenandoahMark(); + ShenandoahMark(ShenandoahGeneration* generation); public: - template - static inline void mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak); - - static void clear(); + template + static inline void mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak); // Loom support void start_mark(); @@ -52,12 +54,20 @@ class ShenandoahMark: public StackObj { // Helpers inline ShenandoahObjToScanQueueSet* task_queues() const; + ShenandoahObjToScanQueueSet* old_task_queues() { + return _old_gen_task_queues; + } + inline ShenandoahObjToScanQueue* get_queue(uint index) const; + inline ShenandoahObjToScanQueue* get_old_queue(uint index) const; + + inline ShenandoahGeneration* generation() { return _generation; }; -// ---------- Marking loop and tasks private: - template - inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task); +// ---------- Marking loop and tasks + + template + inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id); template inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array, bool weak); @@ -65,20 +75,31 @@ class ShenandoahMark: public StackObj { template inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow, bool weak); - inline void count_liveness(ShenandoahLiveData* live_data, oop obj); + template + inline void count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id); - template + template void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t, StringDedup::Requests* const req); - template - void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req); + template + void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs); + + template + static bool in_generation(ShenandoahHeap* const heap, oop obj); + + static void mark_ref(ShenandoahObjToScanQueue* q, + ShenandoahMarkingContext* const mark_context, + bool weak, oop obj); template inline void dedup_string(oop obj, StringDedup::Requests* const req); protected: - void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, + template + void mark_loop(ShenandoahGenerationType generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, + StringDedup::Requests* const req); + + void mark_loop(ShenandoahGenerationType generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP - diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp index db0b629f94e..36c72f4920e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,10 +57,14 @@ void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) { } } -template -void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) { +template +void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) { oop obj = task->obj(); + // TODO: This will push array chunks into the mark queue with no regard for + // generations. I don't think it will break anything, but the young generation + // scan might end up processing some old generation array chunks. + shenandoah_assert_not_forwarded(nullptr, obj); shenandoah_assert_marked(nullptr, obj); shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc()); @@ -94,7 +99,7 @@ void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveD // Avoid double-counting objects that are visited twice due to upgrade // from final- to strong mark. if (task->count_liveness()) { - count_liveness(live_data, obj); + count_liveness(live_data, obj, worker_id); } } else { // Case 4: Array chunk, has sensible chunk id. Process it. @@ -102,14 +107,27 @@ void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveD } } -inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) { - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - size_t region_idx = heap->heap_region_index_containing(obj); - ShenandoahHeapRegion* region = heap->get_region(region_idx); - size_t size = obj->size(); +template +inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) { + const ShenandoahHeap* const heap = ShenandoahHeap::heap(); + const size_t region_idx = heap->heap_region_index_containing(obj); + ShenandoahHeapRegion* const region = heap->get_region(region_idx); + const size_t size = obj->size(); + + // Age census for objects in the young generation + if (GENERATION == YOUNG || (GENERATION == GLOBAL_GEN && region->is_young())) { + assert(heap->mode()->is_generational(), "Only if generational"); + if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { + assert(region->is_young(), "Only for young objects"); + uint age = ShenandoahHeap::get_object_age(obj); + CENSUS_NOISE(heap->age_census()->add(age, region->age(), region->youth(), size, worker_id);) + NO_CENSUS_NOISE(heap->age_census()->add(age, region->age(), size, worker_id);) + } + } if (!region->is_humongous_start()) { assert(!region->is_humongous(), "Cannot have continuations here"); + assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx); ShenandoahLiveData cur = live_data[region_idx]; size_t new_val = size + cur; if (new_val >= SHENANDOAH_LIVEDATA_MAX) { @@ -124,9 +142,11 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob shenandoah_assert_in_correct_region(nullptr, obj); size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx); for (size_t i = region_idx; i < region_idx + num_regions; i++) { ShenandoahHeapRegion* chain_reg = heap->get_region(i); assert(chain_reg->is_humongous(), "Expecting a humongous region"); + assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i); chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); } } @@ -229,50 +249,101 @@ inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, array->oop_iterate_range(cl, from, to); } +template class ShenandoahSATBBufferClosure : public SATBBufferClosure { private: ShenandoahObjToScanQueue* _queue; + ShenandoahObjToScanQueue* _old_queue; ShenandoahHeap* _heap; ShenandoahMarkingContext* const _mark_context; public: - ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) : + ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) : _queue(q), + _old_queue(old_q), _heap(ShenandoahHeap::heap()), _mark_context(_heap->marking_context()) { } void do_buffer(void **buffer, size_t size) { - assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here"); + assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here"); for (size_t i = 0; i < size; ++i) { oop *p = (oop *) &buffer[i]; - ShenandoahMark::mark_through_ref(p, _queue, _mark_context, false); + ShenandoahMark::mark_through_ref(p, _queue, _old_queue, _mark_context, false); } } }; -template -inline void ShenandoahMark::mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) { +template +bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) { + // Each in-line expansion of in_generation() resolves GENERATION at compile time. + if (GENERATION == YOUNG) { + return heap->is_in_young(obj); + } else if (GENERATION == OLD) { + return heap->is_in_old(obj); + } else if (GENERATION == GLOBAL_GEN || GENERATION == GLOBAL_NON_GEN) { + return true; + } else { + return false; + } +} + +template +inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) { + // Note: This is a very hot code path, so the code should be conditional on GENERATION template + // parameter where possible, in order to generate the most efficient code. + T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + ShenandoahHeap* heap = ShenandoahHeap::heap(); shenandoah_assert_not_forwarded(p, obj); - shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc()); - - bool skip_live = false; - bool marked; - if (weak) { - marked = mark_context->mark_weak(obj); - } else { - marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live); - } - if (marked) { - bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak)); - assert(pushed, "overflow queue should always succeed pushing"); + shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); + if (in_generation(heap, obj)) { + mark_ref(q, mark_context, weak, obj); + shenandoah_assert_marked(p, obj); + // TODO: As implemented herein, GLOBAL_GEN collections reconstruct the card table during GLOBAL_GEN concurrent + // marking. Note that the card table is cleaned at init_mark time so it needs to be reconstructed to support + // future young-gen collections. It might be better to reconstruct card table in + // ShenandoahHeapRegion::global_oop_iterate_and_fill_dead. We could either mark all live memory as dirty, or could + // use the GLOBAL update-refs scanning of pointers to determine precisely which cards to flag as dirty. + if (GENERATION == YOUNG && heap->is_in_old(p)) { + // Mark card as dirty because remembered set scanning still finds interesting pointer. + heap->mark_card_as_dirty((HeapWord*)p); + } else if (GENERATION == GLOBAL_GEN && heap->is_in_old(p) && heap->is_in_young(obj)) { + // Mark card as dirty because GLOBAL marking finds interesting pointer. + heap->mark_card_as_dirty((HeapWord*)p); + } + } else if (old_q != nullptr) { + // Young mark, bootstrapping old_q or concurrent with old_q marking. + mark_ref(old_q, mark_context, weak, obj); + shenandoah_assert_marked(p, obj); + } else if (GENERATION == OLD) { + // Old mark, found a young pointer. + // TODO: Rethink this: may be redundant with dirtying of cards identified during young-gen remembered set scanning + // and by mutator write barriers. Assert + if (heap->is_in(p)) { + assert(heap->is_in_young(obj), "Expected young object."); + heap->mark_card_as_dirty(p); + } } + } +} - shenandoah_assert_marked(p, obj); +inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q, + ShenandoahMarkingContext* const mark_context, + bool weak, oop obj) { + bool skip_live = false; + bool marked; + if (weak) { + marked = mark_context->mark_weak(obj); + } else { + marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live); + } + if (marked) { + bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak)); + assert(pushed, "overflow queue should always succeed pushing"); } } @@ -283,4 +354,12 @@ ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const { ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const { return _task_queues->queue(index); } + +ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const { + if (_old_gen_task_queues != nullptr) { + return _old_gen_task_queues->queue(index); + } + return nullptr; +} + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp index 30389b4e95c..28eb2a9a515 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. and/or its affiliates. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,9 +44,32 @@ size_t ShenandoahMarkBitMap::mark_distance() { return MinObjAlignmentInBytes * BitsPerByte / 2; } +bool ShenandoahMarkBitMap::is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const { + // Similar to get_next_marked_addr(), without assertion. + // Round addr up to a possible object boundary to be safe. + if (start == end) { + return true; + } + size_t const addr_offset = address_to_index(align_up(start, HeapWordSize << LogMinObjAlignment)); + size_t const limit_offset = address_to_index(end); + size_t const next_offset = get_next_one_offset(addr_offset, limit_offset); + HeapWord* result = index_to_address(next_offset); + return (result == end); +} + + HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const { +#ifdef ASSERT + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapRegion* r = heap->heap_region_containing(addr); + ShenandoahMarkingContext* ctx = heap->marking_context(); + HeapWord* tams = ctx->top_at_mark_start(r); assert(limit != nullptr, "limit must not be null"); + assert(limit <= r->top(), "limit must be less than top"); + assert(addr <= tams, "addr must be less than TAMS"); +#endif + // Round addr up to a possible object boundary to be safe. size_t const addr_offset = address_to_index(align_up(addr, HeapWordSize << LogMinObjAlignment)); size_t const limit_offset = address_to_index(limit); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp index 40f48bae6f5..e262429edbf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. and/or its affiliates. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -159,6 +160,8 @@ class ShenandoahMarkBitMap { inline bool is_marked_strong(HeapWord* w) const; inline bool is_marked_weak(HeapWord* addr) const; + bool is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const; + // Return the address corresponding to the next marked bit at or after // "addr", and before "limit", if "limit" is non-null. If there is no // such bit, returns "limit" if that is non-null, or else "endWord()". diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.cpp new file mode 100644 index 00000000000..e63e418a241 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahMarkClosures.hpp" +#include "gc/shenandoah/shenandoahMarkingContext.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" + + +ShenandoahFinalMarkUpdateRegionStateClosure::ShenandoahFinalMarkUpdateRegionStateClosure( + ShenandoahMarkingContext *ctx) : + _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()) {} + +void ShenandoahFinalMarkUpdateRegionStateClosure::heap_region_do(ShenandoahHeapRegion* r) { + if (r->is_active()) { + if (_ctx != nullptr) { + // _ctx may be null when this closure is used to sync only the pin status + // update the watermark of old regions. For old regions we cannot reset + // the TAMS because we rely on that to keep promoted objects alive after + // old marking is complete. + + // All allocations past TAMS are implicitly live, adjust the region data. + // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. + HeapWord *tams = _ctx->top_at_mark_start(r); + HeapWord *top = r->top(); + if (top > tams) { + r->increase_live_data_alloc_words(pointer_delta(top, tams)); + } + } + + // We are about to select the collection set, make sure it knows about + // current pinning status. Also, this allows trashing more regions that + // now have their pinning status dropped. + if (r->is_pinned()) { + if (r->pin_count() == 0) { + ShenandoahHeapLocker locker(_lock); + r->make_unpinned(); + } + } else { + if (r->pin_count() > 0) { + ShenandoahHeapLocker locker(_lock); + r->make_pinned(); + } + } + + // Remember limit for updating refs. It's guaranteed that we get no + // from-space-refs written from here on. + r->set_update_watermark_at_safepoint(r->top()); + } else { + assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); + assert(_ctx == nullptr || _ctx->top_at_mark_start(r) == r->top(), + "Region " SIZE_FORMAT " should have correct TAMS", r->index()); + } +} + + +ShenandoahUpdateCensusZeroCohortClosure::ShenandoahUpdateCensusZeroCohortClosure( + ShenandoahMarkingContext *ctx) : + _ctx(ctx), _pop(0) {} + +void ShenandoahUpdateCensusZeroCohortClosure::heap_region_do(ShenandoahHeapRegion* r) { + if (_ctx != nullptr && r->is_active()) { + assert(r->is_young(), "Young regions only"); + HeapWord* tams = _ctx->top_at_mark_start(r); + HeapWord* top = r->top(); + if (top > tams) { + _pop += pointer_delta(top, tams); + } + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.hpp new file mode 100644 index 00000000000..3a8df3a46e3 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkClosures.hpp @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARKCLOSURES_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHMARKCLOSURES_HPP + +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahAgeCensus.hpp" + +class ShenandoahMarkingContext; +class ShenandoahHeapRegion; + +class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahMarkingContext* const _ctx; + ShenandoahHeapLock* const _lock; +public: + explicit ShenandoahFinalMarkUpdateRegionStateClosure(ShenandoahMarkingContext* ctx); + + void heap_region_do(ShenandoahHeapRegion* r); + + bool is_thread_safe() { return true; } +}; + +// Add [TAMS, top) volume over young regions. Used to correct age 0 cohort census +// for adaptive tenuring when census is taken during marking. +class ShenandoahUpdateCensusZeroCohortClosure : public ShenandoahHeapRegionClosure { +private: + ShenandoahMarkingContext* const _ctx; + size_t _pop; // running tally of population +public: + ShenandoahUpdateCensusZeroCohortClosure(ShenandoahMarkingContext* ctx); + + void heap_region_do(ShenandoahHeapRegion* r); + + size_t get_population() { return _pop; } +}; +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARKCLOSURES_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp index eaed74ceeb5..e031cc7c82c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,31 +26,14 @@ #include "precompiled.hpp" #include "gc/shared/markBitMap.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" -#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahMarkingContext.hpp" -#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" -#include "utilities/stack.inline.hpp" -ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions, uint max_queues) : +ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) : _mark_bit_map(heap_region, bitmap_region), _top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), _top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), _top_at_mark_starts(_top_at_mark_starts_base - - ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())), - _task_queues(new ShenandoahObjToScanQueueSet(max_queues)) { - assert(max_queues > 0, "At least one queue"); - for (uint i = 0; i < max_queues; ++i) { - ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); - _task_queues->register_queue(i, task_queue); - } -} - -ShenandoahMarkingContext::~ShenandoahMarkingContext() { - for (uint i = 0; i < _task_queues->size(); ++i) { - ShenandoahObjToScanQueue* q = _task_queues->queue(i); - delete q; - } - delete _task_queues; + ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) { } bool ShenandoahMarkingContext::is_bitmap_clear() const { @@ -57,31 +41,65 @@ bool ShenandoahMarkingContext::is_bitmap_clear() const { size_t num_regions = heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* r = heap->get_region(idx); - if (heap->is_bitmap_slice_committed(r) && !is_bitmap_clear_range(r->bottom(), r->end())) { + if (r->is_affiliated() && heap->is_bitmap_slice_committed(r) && !is_bitmap_clear_range(r->bottom(), r->end())) { return false; } } return true; } -bool ShenandoahMarkingContext::is_bitmap_clear_range(HeapWord* start, HeapWord* end) const { - return _mark_bit_map.get_next_marked_addr(start, end) == end; +bool ShenandoahMarkingContext::is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const { + if (start < end) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t start_idx = heap->heap_region_index_containing(start); + size_t end_idx = heap->heap_region_index_containing(end - 1); + while (start_idx <= end_idx) { + ShenandoahHeapRegion* r = heap->get_region(start_idx); + if (!heap->is_bitmap_slice_committed(r)) { + return true; + } + start_idx++; + } + } + return _mark_bit_map.is_bitmap_clear_range(start, end); } void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion* r) { size_t idx = r->index(); HeapWord *bottom = r->bottom(); + _top_at_mark_starts_base[idx] = bottom; _top_bitmaps[idx] = bottom; + + log_debug(gc)("SMC:initialize_top_at_mark_start for Region " SIZE_FORMAT ", TAMS: " PTR_FORMAT ", TopOfBitMap: " PTR_FORMAT, + r->index(), p2i(bottom), p2i(r->end())); +} + +HeapWord* ShenandoahMarkingContext::top_bitmap(ShenandoahHeapRegion* r) { + return _top_bitmaps[r->index()]; } void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) { + if (!r->is_affiliated()) { + // Heap iterators include FREE regions, which don't need to be cleared. + // TODO: would be better for certain iterators to not include FREE regions. + return; + } + HeapWord* bottom = r->bottom(); HeapWord* top_bitmap = _top_bitmaps[r->index()]; + + log_debug(gc)("SMC:clear_bitmap for %s Region " SIZE_FORMAT ", top_bitmap: " PTR_FORMAT, + r->affiliation_name(), r->index(), p2i(top_bitmap)); + if (top_bitmap > bottom) { _mark_bit_map.clear_range_large(MemRegion(bottom, top_bitmap)); _top_bitmaps[r->index()] = bottom; } + + // TODO: Why is clear_live_data here? + r->clear_live_data(); + assert(is_bitmap_clear_range(bottom, r->end()), "Region " SIZE_FORMAT " should have no marks in bitmap", r->index()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp index d58117c02e2..1a77a0beb00 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,12 +48,8 @@ class ShenandoahMarkingContext : public CHeapObj { ShenandoahSharedFlag _is_complete; - // Marking task queues - ShenandoahObjToScanQueueSet* _task_queues; - public: - ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions, uint max_queues); - ~ShenandoahMarkingContext(); + ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions); /* * Marks the object. Returns true if the object has not been marked before and has @@ -63,32 +60,34 @@ class ShenandoahMarkingContext : public CHeapObj { inline bool mark_weak(oop obj); // Simple versions of marking accessors, to be used outside of marking (e.g. no possible concurrent updates) - inline bool is_marked(oop) const; - inline bool is_marked_strong(oop obj) const; - inline bool is_marked_weak(oop obj) const; + // TODO: Do these really need to be const? + inline bool is_marked(const oop) const; + inline bool is_marked_strong(const oop obj) const; + inline bool is_marked_weak(const oop obj) const; + inline bool is_marked_or_old(const oop obj) const; + inline bool is_marked_strong_or_old(const oop obj) const; - inline HeapWord* get_next_marked_addr(HeapWord* addr, HeapWord* limit) const; + inline HeapWord* get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const; - inline bool allocated_after_mark_start(oop obj) const; - inline bool allocated_after_mark_start(HeapWord* addr) const; + inline bool allocated_after_mark_start(const oop obj) const; + inline bool allocated_after_mark_start(const HeapWord* addr) const; - inline HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const; + inline HeapWord* top_at_mark_start(const ShenandoahHeapRegion* r) const; inline void capture_top_at_mark_start(ShenandoahHeapRegion* r); inline void reset_top_at_mark_start(ShenandoahHeapRegion* r); void initialize_top_at_mark_start(ShenandoahHeapRegion* r); + HeapWord* top_bitmap(ShenandoahHeapRegion* r); + inline void reset_top_bitmap(ShenandoahHeapRegion *r); void clear_bitmap(ShenandoahHeapRegion *r); bool is_bitmap_clear() const; - bool is_bitmap_clear_range(HeapWord* start, HeapWord* end) const; + bool is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const; bool is_complete(); void mark_complete(); void mark_incomplete(); - - // Task queues - ShenandoahObjToScanQueueSet* task_queues() const { return _task_queues; } }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp index 34b8288f476..5f039535404 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +27,8 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP #include "gc/shenandoah/shenandoahMarkingContext.hpp" - #include "gc/shenandoah/shenandoahMarkBitMap.inline.hpp" +#include "logging/log.hpp" inline bool ShenandoahMarkingContext::mark_strong(oop obj, bool& was_upgraded) { return !allocated_after_mark_start(obj) && _mark_bit_map.mark_strong(cast_from_oop(obj), was_upgraded); @@ -37,35 +38,48 @@ inline bool ShenandoahMarkingContext::mark_weak(oop obj) { return !allocated_after_mark_start(obj) && _mark_bit_map.mark_weak(cast_from_oop(obj)); } -inline bool ShenandoahMarkingContext::is_marked(oop obj) const { +inline bool ShenandoahMarkingContext::is_marked(const oop obj) const { return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(cast_from_oop(obj)); } -inline bool ShenandoahMarkingContext::is_marked_strong(oop obj) const { +inline bool ShenandoahMarkingContext::is_marked_strong(const oop obj) const { return allocated_after_mark_start(obj) || _mark_bit_map.is_marked_strong(cast_from_oop(obj)); } -inline bool ShenandoahMarkingContext::is_marked_weak(oop obj) const { +inline bool ShenandoahMarkingContext::is_marked_weak(const oop obj) const { return allocated_after_mark_start(obj) || _mark_bit_map.is_marked_weak(cast_from_oop(obj)); } -inline HeapWord* ShenandoahMarkingContext::get_next_marked_addr(HeapWord* start, HeapWord* limit) const { +inline bool ShenandoahMarkingContext::is_marked_or_old(const oop obj) const { + return is_marked(obj) || ShenandoahHeap::heap()->is_old(obj); +} + +inline bool ShenandoahMarkingContext::is_marked_strong_or_old(const oop obj) const { + return is_marked_strong(obj) || ShenandoahHeap::heap()->is_old(obj); +} + +inline HeapWord* ShenandoahMarkingContext::get_next_marked_addr(const HeapWord* start, const HeapWord* limit) const { return _mark_bit_map.get_next_marked_addr(start, limit); } -inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const { - HeapWord* addr = cast_from_oop(obj); +inline bool ShenandoahMarkingContext::allocated_after_mark_start(const oop obj) const { + const HeapWord* addr = cast_from_oop(obj); return allocated_after_mark_start(addr); } -inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const { +inline bool ShenandoahMarkingContext::allocated_after_mark_start(const HeapWord* addr) const { uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); HeapWord* top_at_mark_start = _top_at_mark_starts[index]; - bool alloc_after_mark_start = addr >= top_at_mark_start; + const bool alloc_after_mark_start = addr >= top_at_mark_start; return alloc_after_mark_start; } inline void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRegion *r) { + if (!r->is_affiliated()) { + // Non-affiliated regions do not need their TAMS updated + return; + } + size_t idx = r->index(); HeapWord* old_tams = _top_at_mark_starts_base[idx]; HeapWord* new_tams = r->top(); @@ -73,19 +87,36 @@ inline void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRe assert(new_tams >= old_tams, "Region " SIZE_FORMAT", TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, idx, p2i(old_tams), p2i(new_tams)); - assert(is_bitmap_clear_range(old_tams, new_tams), + assert((new_tams == r->bottom()) || (old_tams == r->bottom()) || (new_tams >= _top_bitmaps[idx]), + "Region " SIZE_FORMAT", top_bitmaps updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, + idx, p2i(_top_bitmaps[idx]), p2i(new_tams)); + assert(old_tams == r->bottom() || is_bitmap_clear_range(old_tams, new_tams), "Region " SIZE_FORMAT ", bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT, idx, p2i(old_tams), p2i(new_tams)); + log_debug(gc)("Capturing TAMS for %s Region " SIZE_FORMAT ", was: " PTR_FORMAT ", now: " PTR_FORMAT, + r->affiliation_name(), idx, p2i(old_tams), p2i(new_tams)); + + if ((old_tams == r->bottom()) && (new_tams > old_tams)) { + log_debug(gc)("Clearing mark bitmap for %s Region " SIZE_FORMAT " while capturing TAMS", + r->affiliation_name(), idx); + // TODO: Do we really need to do bitmap clears here? + // This could take a while, and we would instead like to clear bitmaps outside the pause. + clear_bitmap(r); + } + _top_at_mark_starts_base[idx] = new_tams; - _top_bitmaps[idx] = new_tams; + if (new_tams > r->bottom()) { + // In this case, new_tams is greater than old _top_bitmaps[idx] + _top_bitmaps[idx] = new_tams; + } } inline void ShenandoahMarkingContext::reset_top_at_mark_start(ShenandoahHeapRegion* r) { _top_at_mark_starts_base[r->index()] = r->bottom(); } -inline HeapWord* ShenandoahMarkingContext::top_at_mark_start(ShenandoahHeapRegion* r) const { +inline HeapWord* ShenandoahMarkingContext::top_at_mark_start(const ShenandoahHeapRegion* r) const { return _top_at_mark_starts_base[r->index()]; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp index 339446e12e9..79d3d7c2e8a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,14 +25,28 @@ #include "precompiled.hpp" #include "gc/shenandoah/shenandoahMemoryPool.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" -ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap) : - CollectedMemoryPool("Shenandoah", +ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap, + const char* name) : + CollectedMemoryPool(name, heap->initial_capacity(), heap->max_capacity(), true /* support_usage_threshold */), _heap(heap) {} +ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap, + const char* name, + size_t initial_capacity, + size_t max_capacity) : + CollectedMemoryPool(name, + initial_capacity, + max_capacity, + true /* support_usage_threshold */), + _heap(heap) {} + + MemoryUsage ShenandoahMemoryPool::get_memory_usage() { size_t initial = initial_size(); size_t max = max_size(); @@ -51,3 +66,57 @@ MemoryUsage ShenandoahMemoryPool::get_memory_usage() { return MemoryUsage(initial, used, committed, max); } + +size_t ShenandoahMemoryPool::used_in_bytes() { + return _heap->used(); +} + +size_t ShenandoahMemoryPool::max_size() const { + return _heap->max_capacity(); +} + +ShenandoahYoungGenMemoryPool::ShenandoahYoungGenMemoryPool(ShenandoahHeap* heap) : + ShenandoahMemoryPool(heap, + "Shenandoah Young Gen", + 0, + heap->max_capacity()) { } + +MemoryUsage ShenandoahYoungGenMemoryPool::get_memory_usage() { + size_t initial = initial_size(); + size_t max = max_size(); + size_t used = used_in_bytes(); + size_t committed = _heap->young_generation()->used_regions_size(); + + return MemoryUsage(initial, used, committed, max); +} + +size_t ShenandoahYoungGenMemoryPool::used_in_bytes() { + return _heap->young_generation()->used(); +} + +size_t ShenandoahYoungGenMemoryPool::max_size() const { + return _heap->young_generation()->max_capacity(); +} + +ShenandoahOldGenMemoryPool::ShenandoahOldGenMemoryPool(ShenandoahHeap* heap) : + ShenandoahMemoryPool(heap, + "Shenandoah Old Gen", + 0, + heap->max_capacity()) { } + +MemoryUsage ShenandoahOldGenMemoryPool::get_memory_usage() { + size_t initial = initial_size(); + size_t max = max_size(); + size_t used = used_in_bytes(); + size_t committed = _heap->old_generation()->used_regions_size(); + + return MemoryUsage(initial, used, committed, max); +} + +size_t ShenandoahOldGenMemoryPool::used_in_bytes() { + return _heap->old_generation()->used(); +} + +size_t ShenandoahOldGenMemoryPool::max_size() const { + return _heap->old_generation()->max_capacity(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp index 2149213afa8..3cbbc0e9a8b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,14 +33,37 @@ #endif class ShenandoahMemoryPool : public CollectedMemoryPool { -private: +protected: ShenandoahHeap* _heap; public: - ShenandoahMemoryPool(ShenandoahHeap* pool); - MemoryUsage get_memory_usage(); - size_t used_in_bytes() { return _heap->used(); } - size_t max_size() const { return _heap->max_capacity(); } + ShenandoahMemoryPool(ShenandoahHeap* pool, + const char* name = "Shenandoah"); + virtual MemoryUsage get_memory_usage(); + virtual size_t used_in_bytes(); + virtual size_t max_size() const; + +protected: + ShenandoahMemoryPool(ShenandoahHeap* pool, + const char* name, + size_t initial_capacity, + size_t max_capacity); +}; + +class ShenandoahYoungGenMemoryPool : public ShenandoahMemoryPool { +public: + ShenandoahYoungGenMemoryPool(ShenandoahHeap* pool); + MemoryUsage get_memory_usage() override; + size_t used_in_bytes() override; + size_t max_size() const override; +}; + +class ShenandoahOldGenMemoryPool : public ShenandoahMemoryPool { +public: + ShenandoahOldGenMemoryPool(ShenandoahHeap* pool); + MemoryUsage get_memory_usage() override; + size_t used_in_bytes() override; + size_t max_size() const override; }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMEMORYPOOL_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp new file mode 100644 index 00000000000..7d51f7b454e --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp @@ -0,0 +1,342 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahMmuTracker.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "logging/log.hpp" +#include "runtime/os.hpp" +#include "runtime/task.hpp" + +class ShenandoahMmuTask : public PeriodicTask { + ShenandoahMmuTracker* _mmu_tracker; +public: + explicit ShenandoahMmuTask(ShenandoahMmuTracker* mmu_tracker) : + PeriodicTask(GCPauseIntervalMillis), _mmu_tracker(mmu_tracker) {} + + void task() override { + _mmu_tracker->report(); + } +}; + +class ThreadTimeAccumulator : public ThreadClosure { + public: + size_t total_time; + ThreadTimeAccumulator() : total_time(0) {} + void do_thread(Thread* thread) override { + total_time += os::thread_cpu_time(thread); + } +}; + +ShenandoahMmuTracker::ShenandoahMmuTracker() : + _most_recent_timestamp(0.0), + _most_recent_gc_time(0.0), + _most_recent_gcu(0.0), + _most_recent_mutator_time(0.0), + _most_recent_mu(0.0), + _most_recent_periodic_time_stamp(0.0), + _most_recent_periodic_gc_time(0.0), + _most_recent_periodic_mutator_time(0.0), + _mmu_periodic_task(new ShenandoahMmuTask(this)) { +} + +ShenandoahMmuTracker::~ShenandoahMmuTracker() { + _mmu_periodic_task->disenroll(); + delete _mmu_periodic_task; +} + +void ShenandoahMmuTracker::fetch_cpu_times(double &gc_time, double &mutator_time) { + ThreadTimeAccumulator cl; + // We include only the gc threads because those are the only threads + // we are responsible for. + ShenandoahHeap::heap()->gc_threads_do(&cl); + double most_recent_gc_thread_time = double(cl.total_time) / NANOSECS_PER_SEC; + gc_time = most_recent_gc_thread_time; + + double process_real_time(0.0), process_user_time(0.0), process_system_time(0.0); + bool valid = os::getTimesSecs(&process_real_time, &process_user_time, &process_system_time); + assert(valid, "don't know why this would not be valid"); + mutator_time =(process_user_time + process_system_time) - most_recent_gc_thread_time; +} + +void ShenandoahMmuTracker::update_utilization(size_t gcid, const char* msg) { + double current = os::elapsedTime(); + _most_recent_gcid = gcid; + _most_recent_is_full = false; + + if (gcid == 0) { + fetch_cpu_times(_most_recent_gc_time, _most_recent_mutator_time); + + _most_recent_timestamp = current; + } else { + double gc_cycle_period = current - _most_recent_timestamp; + _most_recent_timestamp = current; + + double gc_thread_time, mutator_thread_time; + fetch_cpu_times(gc_thread_time, mutator_thread_time); + double gc_time = gc_thread_time - _most_recent_gc_time; + _most_recent_gc_time = gc_thread_time; + _most_recent_gcu = gc_time / (_active_processors * gc_cycle_period); + double mutator_time = mutator_thread_time - _most_recent_mutator_time; + _most_recent_mutator_time = mutator_thread_time; + _most_recent_mu = mutator_time / (_active_processors * gc_cycle_period); + log_info(gc, ergo)("At end of %s: GCU: %.1f%%, MU: %.1f%% during period of %.3fs", + msg, _most_recent_gcu * 100, _most_recent_mu * 100, gc_cycle_period); + } +} + +void ShenandoahMmuTracker::record_young(size_t gcid) { + update_utilization(gcid, "Concurrent Young GC"); +} + +void ShenandoahMmuTracker::record_global(size_t gcid) { + update_utilization(gcid, "Concurrent Global GC"); +} + +void ShenandoahMmuTracker::record_bootstrap(size_t gcid) { + // Not likely that this will represent an "ideal" GCU, but doesn't hurt to try + update_utilization(gcid, "Concurrent Bootstrap GC"); +} + +void ShenandoahMmuTracker::record_old_marking_increment(bool old_marking_done) { + // No special processing for old marking + double now = os::elapsedTime(); + double duration = now - _most_recent_timestamp; + + double gc_time, mutator_time; + fetch_cpu_times(gc_time, mutator_time); + double gcu = (gc_time - _most_recent_gc_time) / duration; + double mu = (mutator_time - _most_recent_mutator_time) / duration; + log_info(gc, ergo)("At end of %s: GCU: %.1f%%, MU: %.1f%% for duration %.3fs (totals to be subsumed in next gc report)", + old_marking_done? "last OLD marking increment": "OLD marking increment", + gcu * 100, mu * 100, duration); +} + +void ShenandoahMmuTracker::record_mixed(size_t gcid) { + update_utilization(gcid, "Mixed Concurrent GC"); +} + +void ShenandoahMmuTracker::record_degenerated(size_t gcid, bool is_old_bootstrap) { + if ((gcid == _most_recent_gcid) && _most_recent_is_full) { + // Do nothing. This is a redundant recording for the full gc that just completed. + // TODO: avoid making the call to record_degenerated() in the case that this degenerated upgraded to full gc. + } else if (is_old_bootstrap) { + update_utilization(gcid, "Degenerated Bootstrap Old GC"); + } else { + update_utilization(gcid, "Degenerated Young GC"); + } +} + +void ShenandoahMmuTracker::record_full(size_t gcid) { + update_utilization(gcid, "Full GC"); + _most_recent_is_full = true; +} + +void ShenandoahMmuTracker::report() { + // This is only called by the periodic thread. + double current = os::elapsedTime(); + double time_delta = current - _most_recent_periodic_time_stamp; + _most_recent_periodic_time_stamp = current; + + double gc_time, mutator_time; + fetch_cpu_times(gc_time, mutator_time); + + double gc_delta = gc_time - _most_recent_periodic_gc_time; + _most_recent_periodic_gc_time = gc_time; + + double mutator_delta = mutator_time - _most_recent_periodic_mutator_time; + _most_recent_periodic_mutator_time = mutator_time; + + double mu = mutator_delta / (_active_processors * time_delta); + double gcu = gc_delta / (_active_processors * time_delta); + log_info(gc)("Periodic Sample: GCU = %.3f%%, MU = %.3f%% during most recent %.1fs", gcu * 100, mu * 100, time_delta); +} + +void ShenandoahMmuTracker::initialize() { + // initialize static data + _active_processors = os::initial_active_processor_count(); + + _most_recent_periodic_time_stamp = os::elapsedTime(); + fetch_cpu_times(_most_recent_periodic_gc_time, _most_recent_periodic_mutator_time); + _mmu_periodic_task->enroll(); +} + +ShenandoahGenerationSizer::ShenandoahGenerationSizer() + : _sizer_kind(SizerDefaults), + _min_desired_young_regions(0), + _max_desired_young_regions(0) { + + if (FLAG_IS_CMDLINE(NewRatio)) { + if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { + log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); + } else { + _sizer_kind = SizerNewRatio; + return; + } + } + + if (NewSize > MaxNewSize) { + if (FLAG_IS_CMDLINE(MaxNewSize)) { + log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + FLAG_SET_ERGO(MaxNewSize, NewSize); + } + + if (FLAG_IS_CMDLINE(NewSize)) { + _min_desired_young_regions = MAX2(uint(NewSize / ShenandoahHeapRegion::region_size_bytes()), 1U); + if (FLAG_IS_CMDLINE(MaxNewSize)) { + _max_desired_young_regions = MAX2(uint(MaxNewSize / ShenandoahHeapRegion::region_size_bytes()), 1U); + _sizer_kind = SizerMaxAndNewSize; + } else { + _sizer_kind = SizerNewSizeOnly; + } + } else if (FLAG_IS_CMDLINE(MaxNewSize)) { + _max_desired_young_regions = MAX2(uint(MaxNewSize / ShenandoahHeapRegion::region_size_bytes()), 1U); + _sizer_kind = SizerMaxNewSizeOnly; + } +} + +size_t ShenandoahGenerationSizer::calculate_min_young_regions(size_t heap_region_count) { + size_t min_young_regions = (heap_region_count * ShenandoahMinYoungPercentage) / 100; + return MAX2(min_young_regions, (size_t) 1U); +} + +size_t ShenandoahGenerationSizer::calculate_max_young_regions(size_t heap_region_count) { + size_t max_young_regions = (heap_region_count * ShenandoahMaxYoungPercentage) / 100; + return MAX2(max_young_regions, (size_t) 1U); +} + +void ShenandoahGenerationSizer::recalculate_min_max_young_length(size_t heap_region_count) { + assert(heap_region_count > 0, "Heap must be initialized"); + + switch (_sizer_kind) { + case SizerDefaults: + _min_desired_young_regions = calculate_min_young_regions(heap_region_count); + _max_desired_young_regions = calculate_max_young_regions(heap_region_count); + break; + case SizerNewSizeOnly: + _max_desired_young_regions = calculate_max_young_regions(heap_region_count); + _max_desired_young_regions = MAX2(_min_desired_young_regions, _max_desired_young_regions); + break; + case SizerMaxNewSizeOnly: + _min_desired_young_regions = calculate_min_young_regions(heap_region_count); + _min_desired_young_regions = MIN2(_min_desired_young_regions, _max_desired_young_regions); + break; + case SizerMaxAndNewSize: + // Do nothing. Values set on the command line, don't update them at runtime. + break; + case SizerNewRatio: + _min_desired_young_regions = MAX2(uint(heap_region_count / (NewRatio + 1)), 1U); + _max_desired_young_regions = _min_desired_young_regions; + break; + default: + ShouldNotReachHere(); + } + + assert(_min_desired_young_regions <= _max_desired_young_regions, "Invalid min/max young gen size values"); +} + +void ShenandoahGenerationSizer::heap_size_changed(size_t heap_size) { + recalculate_min_max_young_length(heap_size / ShenandoahHeapRegion::region_size_bytes()); +} + +// Returns true iff transfer is successful +bool ShenandoahGenerationSizer::transfer_to_old(size_t regions) const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGeneration* old_gen = heap->old_generation(); + ShenandoahGeneration* young_gen = heap->young_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t bytes_to_transfer = regions * region_size_bytes; + + if (young_gen->free_unaffiliated_regions() < regions) { + return false; + } else if (old_gen->max_capacity() + bytes_to_transfer > heap->max_size_for(old_gen)) { + return false; + } else if (young_gen->max_capacity() - bytes_to_transfer < heap->min_size_for(young_gen)) { + return false; + } else { + young_gen->decrease_capacity(bytes_to_transfer); + old_gen->increase_capacity(bytes_to_transfer); + size_t new_size = old_gen->max_capacity(); + log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s", + regions, young_gen->name(), old_gen->name(), + byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size)); + return true; + } +} + +// This is used when promoting humongous or highly utilized regular regions in place. It is not required in this situation +// that the transferred regions be unaffiliated. +void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGeneration* old_gen = heap->old_generation(); + ShenandoahGeneration* young_gen = heap->young_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t bytes_to_transfer = regions * region_size_bytes; + + young_gen->decrease_capacity(bytes_to_transfer); + old_gen->increase_capacity(bytes_to_transfer); + size_t new_size = old_gen->max_capacity(); + log_info(gc)("Forcing transfer of " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s", + regions, young_gen->name(), old_gen->name(), + byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size)); +} + + +bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahGeneration* old_gen = heap->old_generation(); + ShenandoahGeneration* young_gen = heap->young_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t bytes_to_transfer = regions * region_size_bytes; + + if (old_gen->free_unaffiliated_regions() < regions) { + return false; + } else if (young_gen->max_capacity() + bytes_to_transfer > heap->max_size_for(young_gen)) { + return false; + } else if (old_gen->max_capacity() - bytes_to_transfer < heap->min_size_for(old_gen)) { + return false; + } else { + old_gen->decrease_capacity(bytes_to_transfer); + young_gen->increase_capacity(bytes_to_transfer); + size_t new_size = young_gen->max_capacity(); + log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s", + regions, old_gen->name(), young_gen->name(), + byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size)); + return true; + } +} + +size_t ShenandoahGenerationSizer::min_young_size() const { + return min_young_regions() * ShenandoahHeapRegion::region_size_bytes(); +} + +size_t ShenandoahGenerationSizer::max_young_size() const { + return max_young_regions() * ShenandoahHeapRegion::region_size_bytes(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp new file mode 100644 index 00000000000..8c52ae3605d --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMMUTRACKER_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHMMUTRACKER_HPP + +#include "runtime/mutex.hpp" +#include "utilities/numberSeq.hpp" + +class ShenandoahGeneration; +class ShenandoahMmuTask; + +/** + * This class is responsible for tracking and adjusting the minimum mutator + * utilization (MMU). MMU is defined as the percentage of CPU time available + * to mutator threads over an arbitrary, fixed interval of time. This interval + * defaults to 5 seconds and is configured by GCPauseIntervalMillis. The class + * maintains a decaying average of the last 10 values. The MMU is measured + * by summing all of the time given to the GC threads and comparing this to + * the total CPU time for the process. There are OS APIs to support this on + * all major platforms. + * + * The time spent by GC threads is attributed to the young or old generation. + * The time given to the controller and regulator threads is attributed to the + * global generation. At the end of every collection, the average MMU is inspected. + * If it is below `GCTimeRatio`, this class will attempt to increase the capacity + * of the generation that is consuming the most CPU time. The assumption being + * that increasing memory will reduce the collection frequency and raise the + * MMU. + */ +class ShenandoahMmuTracker { +private: + // These variables hold recent snapshots of cumulative quantities that are used for calculating + // CPU time consumed by GC and mutator threads during each GC cycle. + double _most_recent_timestamp; + double _most_recent_gc_time; + double _most_recent_gcu; + double _most_recent_mutator_time; + double _most_recent_mu; + + // These variables hold recent snapshots of cumulative quantities that are used for reporting + // periodic consumption of CPU time by GC and mutator threads. + double _most_recent_periodic_time_stamp; + double _most_recent_periodic_gc_time; + double _most_recent_periodic_mutator_time; + + size_t _most_recent_gcid; + uint _active_processors; + + bool _most_recent_is_full; + + ShenandoahMmuTask* _mmu_periodic_task; + TruncatedSeq _mmu_average; + + void update_utilization(size_t gcid, const char* msg); + static void fetch_cpu_times(double &gc_time, double &mutator_time); + +public: + explicit ShenandoahMmuTracker(); + ~ShenandoahMmuTracker(); + + // This enrolls the periodic task after everything is initialized. + void initialize(); + + // At completion of each GC cycle (not including interrupted cycles), we invoke one of the following to record the + // GC utilization during this cycle. Incremental efforts spent in an interrupted GC cycle will be accumulated into + // the CPU time reports for the subsequent completed [degenerated or full] GC cycle. + // + // We may redundantly record degen and full in the case that a degen upgrades to full. When this happens, we will invoke + // both record_full() and record_degenerated() with the same value of gcid. record_full() is called first and the log + // reports such a cycle as a FULL cycle. + void record_young(size_t gcid); + void record_global(size_t gcid); + void record_bootstrap(size_t gcid); + void record_old_marking_increment(bool old_marking_done); + void record_mixed(size_t gcid); + void record_full(size_t gcid); + void record_degenerated(size_t gcid, bool is_old_boostrap); + + // This is called by the periodic task timer. The interval is defined by + // GCPauseIntervalMillis and defaults to 5 seconds. This method computes + // the MMU over the elapsed interval and records it in a running average. + void report(); +}; + +class ShenandoahGenerationSizer { +private: + enum SizerKind { + SizerDefaults, + SizerNewSizeOnly, + SizerMaxNewSizeOnly, + SizerMaxAndNewSize, + SizerNewRatio + }; + SizerKind _sizer_kind; + + size_t _min_desired_young_regions; + size_t _max_desired_young_regions; + + static size_t calculate_min_young_regions(size_t heap_region_count); + static size_t calculate_max_young_regions(size_t heap_region_count); + + // Update the given values for minimum and maximum young gen length in regions + // given the number of heap regions depending on the kind of sizing algorithm. + void recalculate_min_max_young_length(size_t heap_region_count); + +public: + ShenandoahGenerationSizer(); + + // Calculate the maximum length of the young gen given the number of regions + // depending on the sizing algorithm. + void heap_size_changed(size_t heap_size); + + // Minimum size of young generation in bytes as multiple of region size. + size_t min_young_size() const; + size_t min_young_regions() const { + return _min_desired_young_regions; + } + + // Maximum size of young generation in bytes as multiple of region size. + size_t max_young_size() const; + size_t max_young_regions() const { + return _max_desired_young_regions; + } + + bool transfer_to_young(size_t regions) const; + bool transfer_to_old(size_t regions) const; + + // force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes + void force_transfer_to_old(size_t regions) const; +}; + +#endif //SHARE_GC_SHENANDOAH_SHENANDOAHMMUTRACKER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp index 980050b8b00..7be8cc53557 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp @@ -124,13 +124,13 @@ void ShenandoahNMethod::heal_nmethod(nmethod* nm) { assert(data->lock()->owned_by_self(), "Must hold the lock"); ShenandoahHeap* const heap = ShenandoahHeap::heap(); - if (heap->is_concurrent_mark_in_progress()) { - ShenandoahKeepAliveClosure cl; - data->oops_do(&cl); - } else if (heap->is_concurrent_weak_root_in_progress() || - heap->is_concurrent_strong_root_in_progress() ) { + if (heap->is_concurrent_weak_root_in_progress() || + heap->is_concurrent_strong_root_in_progress()) { ShenandoahEvacOOMScope evac_scope; heal_nmethod_metadata(data); + } else if (heap->is_concurrent_mark_in_progress()) { + ShenandoahKeepAliveClosure cl; + data->oops_do(&cl); } else { // There is possibility that GC is cancelled when it arrives final mark. // In this case, concurrent root phase is skipped and degenerated GC should be diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp index ec8f2231097..3c7ba8e4243 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,6 +121,70 @@ double HdrSeq::percentile(double level) const { return maximum(); } +void HdrSeq::add(const HdrSeq& other) { + if (other.num() == 0) { + // Other sequence is empty, return + return; + } + + for (int mag = 0; mag < MagBuckets; mag++) { + int* other_bucket = other._hdr[mag]; + if (other_bucket == nullptr) { + // Nothing to do + continue; + } + int* bucket = _hdr[mag]; + if (bucket != nullptr) { + // Add into our bucket + for (int val = 0; val < ValBuckets; val++) { + bucket[val] += other_bucket[val]; + } + } else { + // Create our bucket and copy the contents over + bucket = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal); + for (int val = 0; val < ValBuckets; val++) { + bucket[val] = other_bucket[val]; + } + _hdr[mag] = bucket; + } + } + + // This is a hacky way to only update the fields we want. + // This inlines NumberSeq code without going into AbsSeq and + // dealing with decayed average/variance, which we do not + // know how to compute yet. + _last = other._last; + _maximum = MAX2(_maximum, other._maximum); + _sum += other._sum; + _sum_of_squares += other._sum_of_squares; + _num += other._num; + + // Until JDK-8298902 is fixed, we taint the decaying statistics + _davg = NAN; + _dvariance = NAN; +} + +void HdrSeq::clear() { + // Clear the storage + for (int mag = 0; mag < MagBuckets; mag++) { + int* bucket = _hdr[mag]; + if (bucket != nullptr) { + for (int c = 0; c < ValBuckets; c++) { + bucket[c] = 0; + } + } + } + + // Clear other fields too + _last = 0; + _maximum = 0; + _sum = 0; + _sum_of_squares = 0; + _num = 0; + _davg = 0; + _dvariance = 0; +} + BinaryMagnitudeSeq::BinaryMagnitudeSeq() { _mags = NEW_C_HEAP_ARRAY(size_t, BitsPerSize_t, mtInternal); clear(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp index 42f91f6a9b8..68f3cfba97a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp @@ -49,7 +49,9 @@ class HdrSeq: public NumberSeq { ~HdrSeq(); virtual void add(double val); + void add(const HdrSeq& other); double percentile(double level) const; + void clear(); }; // Binary magnitude sequence stores the power-of-two histogram. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp new file mode 100644 index 00000000000..57663b48f04 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -0,0 +1,195 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahOldGC.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "utilities/events.hpp" + + + + +ShenandoahOldGC::ShenandoahOldGC(ShenandoahGeneration* generation, ShenandoahSharedFlag& allow_preemption) : + ShenandoahConcurrentGC(generation, false), _allow_preemption(allow_preemption) { +} + +// Final mark for old-gen is different than for young or old, so we +// override the implementation. +void ShenandoahOldGC::op_final_mark() { + + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); + assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); + + if (ShenandoahVerify) { + heap->verifier()->verify_roots_no_forwarded(); + } + + if (!heap->cancelled_gc()) { + assert(_mark.generation()->is_old(), "Generation of Old-Gen GC should be OLD"); + _mark.finish_mark(); + assert(!heap->cancelled_gc(), "STW mark cannot OOM"); + + // Old collection is complete, the young generation no longer needs this + // reference to the old concurrent mark so clean it up. + heap->young_generation()->set_old_gen_task_queues(nullptr); + + // We need to do this because weak root cleaning reports the number of dead handles + JvmtiTagMap::set_needs_cleaning(); + + _generation->prepare_regions_and_collection_set(true); + + heap->set_unload_classes(false); + heap->prepare_concurrent_roots(); + + // Believe verification following old-gen concurrent mark needs to be different than verification following + // young-gen concurrent mark, so am commenting this out for now: + // if (ShenandoahVerify) { + // heap->verifier()->verify_after_concmark(); + // } + + if (VerifyAfterGC) { + Universe::verify(); + } + } +} + +bool ShenandoahOldGC::collect(GCCause::Cause cause) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(!heap->doing_mixed_evacuations(), "Should not start an old gc with pending mixed evacuations"); + assert(!heap->is_prepare_for_old_mark_in_progress(), "Old regions need to be parsable during concurrent mark."); + + // Enable preemption of old generation mark. + _allow_preemption.set(); + + // Continue concurrent mark, do not reset regions, do not mark roots, do not collect $200. + entry_mark(); + + // If we failed to unset the preemption flag, it means another thread has already unset it. + if (!_allow_preemption.try_unset()) { + // The regulator thread has unset the preemption guard. That thread will shortly cancel + // the gc, but the control thread is now racing it. Wait until this thread sees the + // cancellation. + while (!heap->cancelled_gc()) { + SpinPause(); + } + } + + if (heap->cancelled_gc()) { + return false; + } + + // Complete marking under STW + vmop_entry_final_mark(); + + // We aren't dealing with old generation evacuation yet. Our heuristic + // should not have built a cset in final mark. + assert(!heap->is_evacuation_in_progress(), "Old gen evacuations are not supported"); + + // Process weak roots that might still point to regions that would be broken by cleanup + if (heap->is_concurrent_weak_root_in_progress()) { + entry_weak_refs(); + entry_weak_roots(); + } + + // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim + // the space. This would be the last action if there is nothing to evacuate. + entry_cleanup_early(); + + { + ShenandoahHeapLocker locker(heap->lock()); + heap->free_set()->log_status(); + } + + + // TODO: Old marking doesn't support class unloading yet + // Perform concurrent class unloading + // if (heap->unload_classes() && + // heap->is_concurrent_weak_root_in_progress()) { + // entry_class_unloading(); + // } + + + assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc."); + + // We must execute this vm operation if we completed final mark. We cannot + // return from here with weak roots in progress. This is not a valid gc state + // for any young collections (or allocation failures) that interrupt the old + // collection. + vmop_entry_final_roots(); + + // We do not rebuild_free following increments of old marking because memory has not been reclaimed.. However, we may + // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. + size_t allocation_runway = heap->young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); + heap->adjust_generation_sizes_for_next_cycle(allocation_runway, 0, 0); + + bool success; + size_t region_xfer; + const char* region_destination; + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahGeneration* old_gen = heap->old_generation(); + { + ShenandoahHeapLocker locker(heap->lock()); + + size_t old_region_surplus = heap->get_old_region_surplus(); + size_t old_region_deficit = heap->get_old_region_deficit(); + if (old_region_surplus) { + success = heap->generation_sizer()->transfer_to_young(old_region_surplus); + region_destination = "young"; + region_xfer = old_region_surplus; + } else if (old_region_deficit) { + success = heap->generation_sizer()->transfer_to_old(old_region_deficit); + region_destination = "old"; + region_xfer = old_region_deficit; + if (!success) { + ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand(); + } + } else { + region_destination = "none"; + region_xfer = 0; + success = true; + } + heap->set_old_region_surplus(0); + heap->set_old_region_deficit(0); + } + + // Report outside the heap lock + size_t young_available = young_gen->available(); + size_t old_available = old_gen->available(); + log_info(gc, ergo)("After old marking finished, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " + SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s", + success? "successfully transferred": "failed to transfer", region_xfer, region_destination, + byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), + byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); + return true; +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp new file mode 100644 index 00000000000..e6ca77226d2 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHOLDGC_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHOLDGC_HPP + +#include "gc/shared/gcCause.hpp" +#include "gc/shenandoah/shenandoahConcurrentGC.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" + +class ShenandoahGeneration; + +class ShenandoahOldGC : public ShenandoahConcurrentGC { + public: + ShenandoahOldGC(ShenandoahGeneration* generation, ShenandoahSharedFlag& allow_preemption); + bool collect(GCCause::Cause cause); + + protected: + virtual void op_final_mark(); + + private: + + ShenandoahSharedFlag& _allow_preemption; +}; + + +#endif //SHARE_GC_SHENANDOAH_SHENANDOAHOLDGC_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp new file mode 100644 index 00000000000..42060b184bc --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -0,0 +1,468 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" + +#include "gc/shared/strongRootsScope.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +#include "gc/shenandoah/shenandoahAsserts.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahMarkClosures.hpp" +#include "gc/shenandoah/shenandoahMark.inline.hpp" +#include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahReferenceProcessor.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "runtime/threads.hpp" +#include "utilities/events.hpp" + +class ShenandoahFlushAllSATB : public ThreadClosure { +private: + SATBMarkQueueSet& _satb_qset; + +public: + explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) : + _satb_qset(satb_qset) {} + + void do_thread(Thread* thread) { + // Transfer any partial buffer to the qset for completed buffer processing. + _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread)); + } +}; + +class ShenandoahProcessOldSATB : public SATBBufferClosure { +private: + ShenandoahObjToScanQueue* _queue; + ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; + size_t _trashed_oops; + +public: + explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) : + _queue(q), + _heap(ShenandoahHeap::heap()), + _mark_context(_heap->marking_context()), + _trashed_oops(0) {} + + void do_buffer(void** buffer, size_t size) { + assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here"); + for (size_t i = 0; i < size; ++i) { + oop *p = (oop *) &buffer[i]; + ShenandoahHeapRegion* region = _heap->heap_region_containing(*p); + if (region->is_old() && region->is_active()) { + ShenandoahMark::mark_through_ref(p, _queue, nullptr, _mark_context, false); + } else { + _trashed_oops++; + } + } + } + + size_t trashed_oops() { + return _trashed_oops; + } +}; + +class ShenandoahPurgeSATBTask : public WorkerTask { +private: + ShenandoahObjToScanQueueSet* _mark_queues; + volatile size_t _trashed_oops; + +public: + explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) : + WorkerTask("Purge SATB"), + _mark_queues(queues), + _trashed_oops(0) { + Threads::change_thread_claim_token(); + } + + ~ShenandoahPurgeSATBTask() { + if (_trashed_oops > 0) { + log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops); + } + } + + void work(uint worker_id) { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set(); + ShenandoahFlushAllSATB flusher(satb_queues); + Threads::possibly_parallel_threads_do(true /* is_par */, &flusher); + + ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id); + ShenandoahProcessOldSATB processor(mark_queue); + while (satb_queues.apply_closure_to_completed_buffer(&processor)) {} + + Atomic::add(&_trashed_oops, processor.trashed_oops()); + } +}; + +class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask { +private: + uint _nworkers; + ShenandoahHeapRegion** _coalesce_and_fill_region_array; + uint _coalesce_and_fill_region_count; + volatile bool _is_preempted; + +public: + ShenandoahConcurrentCoalesceAndFillTask(uint nworkers, + ShenandoahHeapRegion** coalesce_and_fill_region_array, + uint region_count) : + WorkerTask("Shenandoah Concurrent Coalesce and Fill"), + _nworkers(nworkers), + _coalesce_and_fill_region_array(coalesce_and_fill_region_array), + _coalesce_and_fill_region_count(region_count), + _is_preempted(false) { + } + + void work(uint worker_id) { + for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) { + ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx]; + if (r->is_humongous()) { + // There is only one object in this region and it is not garbage, + // so no need to coalesce or fill. + continue; + } + + if (!r->oop_fill_and_coalesce()) { + // Coalesce and fill has been preempted + Atomic::store(&_is_preempted, true); + return; + } + } + } + + // Value returned from is_completed() is only valid after all worker thread have terminated. + bool is_completed() { + return !Atomic::load(&_is_preempted); + } +}; + +ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity) + : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity), + _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)), + _state(WAITING_FOR_BOOTSTRAP), + _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION), + _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100) +{ + _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR; + // Always clear references for old generation + ref_processor()->set_soft_reference_policy(true); +} + +size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const { + return _live_bytes_after_last_mark; +} + +void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) { + _live_bytes_after_last_mark = bytes; + _growth_before_compaction /= 2; + if (_growth_before_compaction < _min_growth_before_compaction) { + _growth_before_compaction = _min_growth_before_compaction; + } +} + +size_t ShenandoahOldGeneration::usage_trigger_threshold() const { + size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR; + return result; +} + +bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const { + // TODO: Should this be region->is_old() instead? + return !region->is_young(); +} + +void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + ShenandoahGenerationRegionClosure old_regions(cl); + ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions); +} + +void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + ShenandoahGenerationRegionClosure old_regions(cl); + ShenandoahHeap::heap()->heap_region_iterate(&old_regions); +} + +void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) { + ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress); +} + +bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() { + return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(); +} + +void ShenandoahOldGeneration::cancel_marking() { + if (is_concurrent_mark_in_progress()) { + log_info(gc)("Abandon SATB buffers"); + ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); + } + + ShenandoahGeneration::cancel_marking(); +} + +void ShenandoahOldGeneration::prepare_gc() { + + // Now that we have made the old generation parsable, it is safe to reset the mark bitmap. + assert(state() != FILLING, "Cannot reset old without making it parsable"); + + ShenandoahGeneration::prepare_gc(); +} + +bool ShenandoahOldGeneration::entry_coalesce_and_fill() { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + + static const char* msg = "Coalescing and filling (OLD)"; + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill); + + // TODO: I don't think we're using these concurrent collection counters correctly. + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + EventMark em("%s", msg); + ShenandoahWorkerScope scope(heap->workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), + msg); + + return coalesce_and_fill(); +} + +// Make the old generation regions parsable, so they can be safely +// scanned when looking for objects in memory indicated by dirty cards. +bool ShenandoahOldGeneration::coalesce_and_fill() { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + transition_to(FILLING); + + ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics(); + WorkerThreads* workers = heap->workers(); + uint nworkers = workers->active_workers(); + + log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions"); + + // This code will see the same set of regions to fill on each resumption as it did + // on the initial run. That's okay because each region keeps track of its own coalesce + // and fill state. Regions that were filled on a prior attempt will not try to fill again. + uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array); + assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity"); + ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count); + + workers->run_task(&task); + if (task.is_completed()) { + old_heuristics->abandon_collection_candidates(); + return true; + } else { + // Coalesce-and-fill has been preempted. We'll finish that effort in the future. Do not invoke + // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap + // and invokes set_mark_incomplete(). Coalesce-and-fill depends on the mark bitmap. + log_debug(gc)("Suspending coalesce-and-fill of old heap regions"); + return false; + } +} + +void ShenandoahOldGeneration::transfer_pointers_from_satb() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + shenandoah_assert_safepoint(); + assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking."); + log_info(gc)("Transfer SATB buffers"); + uint nworkers = heap->workers()->active_workers(); + StrongRootsScope scope(nworkers); + + ShenandoahPurgeSATBTask purge_satb_task(task_queues()); + heap->workers()->run_task(&purge_satb_task); +} + +bool ShenandoahOldGeneration::contains(oop obj) const { + return ShenandoahHeap::heap()->is_in_old(obj); +} + +void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); + + { + ShenandoahGCPhase phase(concurrent ? + ShenandoahPhaseTimings::final_update_region_states : + ShenandoahPhaseTimings::degen_gc_final_update_region_states); + ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); + + parallel_heap_region_iterate(&cl); + heap->assert_pinned_region_status(); + } + + { + // This doesn't actually choose a collection set, but prepares a list of + // regions as 'candidates' for inclusion in a mixed collection. + ShenandoahGCPhase phase(concurrent ? + ShenandoahPhaseTimings::choose_cset : + ShenandoahPhaseTimings::degen_gc_choose_cset); + ShenandoahHeapLocker locker(heap->lock()); + _old_heuristics->prepare_for_old_collections(); + } + + { + // Though we did not choose a collection set above, we still may have + // freed up immediate garbage regions so proceed with rebuilding the free set. + ShenandoahGCPhase phase(concurrent ? + ShenandoahPhaseTimings::final_rebuild_freeset : + ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); + ShenandoahHeapLocker locker(heap->lock()); + size_t cset_young_regions, cset_old_regions; + size_t first_old, last_old, num_old; + heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); + // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here + // is in case there was any immediate old garbage identified. + heap->free_set()->rebuild(cset_young_regions, cset_old_regions); + } +} + +const char* ShenandoahOldGeneration::state_name(State state) { + switch (state) { + case WAITING_FOR_BOOTSTRAP: return "Waiting for Bootstrap"; + case FILLING: return "Coalescing"; + case BOOTSTRAPPING: return "Bootstrapping"; + case MARKING: return "Marking"; + case EVACUATING: return "Evacuating"; + default: + ShouldNotReachHere(); + return "Unknown"; + } +} + +void ShenandoahOldGeneration::transition_to(State new_state) { + if (_state != new_state) { + log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state)); + validate_transition(new_state); + _state = new_state; + } +} + +#ifdef ASSERT +// This diagram depicts the expected state transitions for marking the old generation +// and preparing for old collections. When a young generation cycle executes, the +// remembered set scan must visit objects in old regions. Visiting an object which +// has become dead on previous old cycles will result in crashes. To avoid visiting +// such objects, the remembered set scan will use the old generation mark bitmap when +// possible. It is _not_ possible to use the old generation bitmap when old marking +// is active (bitmap is not complete). For this reason, the old regions are made +// parsable _before_ the old generation bitmap is reset. The diagram does not depict +// cancellation of old collections by global or full collections. +// +// When a global collection supersedes an old collection, the global mark still +// "completes" the old mark bitmap. Subsequent remembered set scans may use the +// old generation mark bitmap, but any uncollected old regions must still be made parsable +// before the next old generation cycle begins. For this reason, a global collection may +// create mixed collection candidates and coalesce and fill candidates and will put +// the old generation in the respective states (EVACUATING or FILLING). After a Full GC, +// the mark bitmaps are all reset, all regions are parsable and the mark context will +// not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap +// and we expect the old generation to be waiting for bootstrap. +// +// +-----------------+ +// +------------> | FILLING | <---+ +// | +--------> | | | +// | | +-----------------+ | +// | | | | +// | | | Filling Complete | <-> A global collection may +// | | v | may move the old generation +// | | +-----------------+ | directly from waiting for +// | +--------> | WAITING | | bootstrap to filling or +// | | +---- | FOR BOOTSTRAP | ----+ evacuating. +// | | | +-----------------+ +// | | | | +// | | | | Reset Bitmap +// | | | v +// | | | +-----------------+ +----------------------+ +// | | | | BOOTSTRAP | <-> | YOUNG GC | +// | | | | | | (RSet Parses Region) | +// | | | +-----------------+ +----------------------+ +// | | | | +// | | | | Old Marking +// | | | v +// | | | +-----------------+ +----------------------+ +// | | | | MARKING | <-> | YOUNG GC | +// | +--------- | | | (RSet Parses Region) | +// | | +-----------------+ +----------------------+ +// | | | +// | | | Has Evacuation Candidates +// | | v +// | | +-----------------+ +--------------------+ +// | +---> | EVACUATING | <-> | YOUNG GC | +// +------------- | | | (RSet Uses Bitmap) | +// +-----------------+ +--------------------+ +// +// +// +void ShenandoahOldGeneration::validate_transition(State new_state) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + switch (new_state) { + case FILLING: + assert(_state != BOOTSTRAPPING, "Cannot beging making old regions parsable after bootstrapping"); + assert(heap->is_old_bitmap_stable(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state)); + assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill."); + break; + case WAITING_FOR_BOOTSTRAP: + // GC cancellation can send us back here from any state. + assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark."); + assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become ready for bootstrap with collection candidates"); + assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping."); + break; + case BOOTSTRAPPING: + assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state)); + assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates"); + assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable."); + break; + case MARKING: + assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state)); + assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues."); + assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now."); + break; + case EVACUATING: + assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state)); + assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here."); + break; + default: + fatal("Unknown new state"); + } +} +#endif + +ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { + _old_heuristics = new ShenandoahOldHeuristics(this); + _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval); + _heuristics = _old_heuristics; + return _heuristics; +} + +void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) { + heuristics()->record_success_concurrent(abbreviated); + ShenandoahHeap::heap()->shenandoah_policy()->record_success_old(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp new file mode 100644 index 00000000000..785d8281dd0 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP + +#include "gc/shenandoah/shenandoahGeneration.hpp" + +class ShenandoahHeapRegion; +class ShenandoahHeapRegionClosure; +class ShenandoahOldHeuristics; + +class ShenandoahOldGeneration : public ShenandoahGeneration { +private: + ShenandoahHeapRegion** _coalesce_and_fill_region_array; + ShenandoahOldHeuristics* _old_heuristics; + + bool coalesce_and_fill(); + +public: + ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity); + + virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override; + + const char* name() const override { + return "OLD"; + } + + void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + + bool contains(ShenandoahHeapRegion* region) const override; + bool contains(oop obj) const override; + + void set_concurrent_mark_in_progress(bool in_progress) override; + bool is_concurrent_mark_in_progress() override; + + bool entry_coalesce_and_fill(); + virtual void prepare_gc() override; + void prepare_regions_and_collection_set(bool concurrent) override; + virtual void record_success_concurrent(bool abbreviated) override; + virtual void cancel_marking() override; + + // We leave the SATB barrier on for the entirety of the old generation + // marking phase. In some cases, this can cause a write to a perfectly + // reachable oop to enqueue a pointer that later becomes garbage (because + // it points at an object that is later chosen for the collection set). There are + // also cases where the referent of a weak reference ends up in the SATB + // and is later collected. In these cases the oop in the SATB buffer becomes + // invalid and the _next_ cycle will crash during its marking phase. To + // avoid this problem, we "purge" the SATB buffers during the final update + // references phase if (and only if) an old generation mark is in progress. + // At this stage we can safely determine if any of the oops in the SATB + // buffer belong to trashed regions (before they are recycled). As it + // happens, flushing a SATB queue also filters out oops which have already + // been marked - which is the case for anything that is being evacuated + // from the collection set. + // + // Alternatively, we could inspect the state of the heap and the age of the + // object at the barrier, but we reject this approach because it is likely + // the performance impact would be too severe. + void transfer_pointers_from_satb(); + +public: + enum State { + FILLING, WAITING_FOR_BOOTSTRAP, BOOTSTRAPPING, MARKING, EVACUATING + }; + +private: + State _state; + + static const size_t FRACTIONAL_DENOMINATOR = 64536; + + // During initialization of the JVM, we search for the correct old-gen size by initially performing old-gen + // collection when old-gen usage is 50% more (INITIAL_GROWTH_BEFORE_COMPACTION) than the initial old-gen size + // estimate (3.125% of heap). The next old-gen trigger occurs when old-gen grows 25% larger than its live + // memory at the end of the first old-gen collection. Then we trigger again when old-gen grows 12.5% + // more than its live memory at the end of the previous old-gen collection. Thereafter, we trigger each time + // old-gen grows more than 12.5% following the end of its previous old-gen collection. + static const size_t INITIAL_GROWTH_BEFORE_COMPACTION = FRACTIONAL_DENOMINATOR / 2; // 50.0% + + // INITIAL_LIVE_FRACTION represents the initial guess of how large old-gen should be. We estimate that old-gen + // needs to consume 6.25% of the total heap size. And we "pretend" that we start out with this amount of live + // old-gen memory. The first old-collection trigger will occur when old-gen occupies 50% more than this initial + // approximation of the old-gen memory requirement, in other words when old-gen usage is 150% of 6.25%, which + // is 9.375% of the total heap size. + static const uint16_t INITIAL_LIVE_FRACTION = FRACTIONAL_DENOMINATOR / 16; // 6.25% + + size_t _live_bytes_after_last_mark; + + // How much growth in usage before we trigger old collection, per FRACTIONAL_DENOMINATOR (65_536) + size_t _growth_before_compaction; + const size_t _min_growth_before_compaction; // Default is 12.5% + + void validate_transition(State new_state) NOT_DEBUG_RETURN; + +public: + State state() const { + return _state; + } + + const char* state_name() const { + return state_name(_state); + } + + void transition_to(State new_state); + + size_t get_live_bytes_after_last_mark() const; + void set_live_bytes_after_last_mark(size_t new_live); + + size_t usage_trigger_threshold() const; + + bool can_start_gc() { + return _state == WAITING_FOR_BOOTSTRAP; + } + + static const char* state_name(State state); +}; + + +#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp index 11c70f2726a..f040cfe5e8e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,15 +43,16 @@ enum StringDedupMode { class ShenandoahMarkRefsSuperClosure : public MetadataVisitingOopIterateClosure { private: ShenandoahObjToScanQueue* _queue; + ShenandoahObjToScanQueue* _old_queue; ShenandoahMarkingContext* const _mark_context; bool _weak; protected: - template + template void work(T *p); public: - ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp); + ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q); bool is_weak() const { return _weak; @@ -70,44 +72,45 @@ class ShenandoahMarkUpdateRefsSuperClosure : public ShenandoahMarkRefsSuperClosu protected: ShenandoahHeap* const _heap; - template + template inline void work(T* p); public: - ShenandoahMarkUpdateRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : - ShenandoahMarkRefsSuperClosure(q, rp), + ShenandoahMarkUpdateRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : + ShenandoahMarkRefsSuperClosure(q, rp, old_q), _heap(ShenandoahHeap::heap()) { assert(_heap->is_stw_gc_in_progress(), "Can only be used for STW GC"); }; }; +template class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkUpdateRefsSuperClosure { private: template - inline void do_oop_work(T* p) { work(p); } + inline void do_oop_work(T* p) { work(p); } public: - ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : - ShenandoahMarkUpdateRefsSuperClosure(q, rp) {} + ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : + ShenandoahMarkUpdateRefsSuperClosure(q, rp, old_q) {} virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(oop* p) { do_oop_work(p); } }; +template class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure { private: template - inline void do_oop_work(T* p) { work(p); } + inline void do_oop_work(T* p) { work(p); } public: - ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : - ShenandoahMarkRefsSuperClosure(q, rp) {}; + ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : + ShenandoahMarkRefsSuperClosure(q, rp, old_q) {}; virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(oop* p) { do_oop_work(p); } }; - class ShenandoahUpdateRefsSuperClosure : public ShenandoahOopClosureBase { protected: ShenandoahHeap* _heap; @@ -142,4 +145,21 @@ class ShenandoahConcUpdateRefsClosure : public ShenandoahUpdateRefsSuperClosure virtual void do_oop(oop* p) { work(p); } }; +class ShenandoahSetRememberedCardsToDirtyClosure : public BasicOopIterateClosure { +protected: + ShenandoahHeap* const _heap; + RememberedScanner* const _scanner; + +public: + ShenandoahSetRememberedCardsToDirtyClosure() : + _heap(ShenandoahHeap::heap()), + _scanner(_heap->card_scan()) {} + + template + inline void work(T* p); + + virtual void do_oop(narrowOop* p) { work(p); } + virtual void do_oop(oop* p) { work(p); } +}; + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp index 1812b4e8f05..d257e91b4a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,18 +31,18 @@ #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" -template +template inline void ShenandoahMarkRefsSuperClosure::work(T* p) { - ShenandoahMark::mark_through_ref(p, _queue, _mark_context, _weak); + ShenandoahMark::mark_through_ref(p, _queue, _old_queue, _mark_context, _weak); } -template +template inline void ShenandoahMarkUpdateRefsSuperClosure::work(T* p) { // Update the location _heap->update_with_forwarded(p); // ...then do the usual thing - ShenandoahMarkRefsSuperClosure::work(p); + ShenandoahMarkRefsSuperClosure::work(p); } template @@ -54,4 +55,16 @@ inline void ShenandoahConcUpdateRefsClosure::work(T* p) { _heap->conc_update_with_forwarded(p); } +template +inline void ShenandoahSetRememberedCardsToDirtyClosure::work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (_heap->is_in_young(obj)) { + // Found interesting pointer. Mark the containing card as dirty. + _scanner->mark_card_as_dirty((HeapWord*) p); + } + } +} + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp index faf5172bec0..8dbd9c4d26f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/shenandoahNumberSeq.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" #include "memory/allocation.hpp" class ShenandoahHeap; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp index b908a0ede11..95531890a02 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +98,7 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) { assert(phase >= 0 && phase < _num_phases, "Out of bounds"); switch (phase) { case init_evac: + case init_scan_rset: case finish_mark: case purge_weak_par: case full_gc_mark: @@ -112,6 +114,7 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) { case degen_gc_purge_class_unload: case degen_gc_purge_weak_par: case heap_iteration_roots: + case conc_mark: case conc_mark_roots: case conc_thread_roots: case conc_weak_roots_work: @@ -308,17 +311,17 @@ void ShenandoahPhaseTimings::print_global_on(outputStream* out) const { } ShenandoahWorkerTimingsTracker::ShenandoahWorkerTimingsTracker(ShenandoahPhaseTimings::Phase phase, - ShenandoahPhaseTimings::ParPhase par_phase, uint worker_id) : + ShenandoahPhaseTimings::ParPhase par_phase, uint worker_id, bool cumulative) : _timings(ShenandoahHeap::heap()->phase_timings()), _phase(phase), _par_phase(par_phase), _worker_id(worker_id) { - assert(_timings->worker_data(_phase, _par_phase)->get(_worker_id) == ShenandoahWorkerData::uninitialized(), + assert(_timings->worker_data(_phase, _par_phase)->get(_worker_id) == ShenandoahWorkerData::uninitialized() || cumulative, "Should not be set yet: %s", ShenandoahPhaseTimings::phase_name(_timings->worker_par_phase(_phase, _par_phase))); _start_time = os::elapsedTime(); } ShenandoahWorkerTimingsTracker::~ShenandoahWorkerTimingsTracker() { - _timings->worker_data(_phase, _par_phase)->set(_worker_id, os::elapsedTime() - _start_time); + _timings->worker_data(_phase, _par_phase)->set_or_add(_worker_id, os::elapsedTime() - _start_time); if (ShenandoahPhaseTimings::is_root_work_phase(_phase)) { ShenandoahPhaseTimings::Phase root_phase = _phase; @@ -326,4 +329,3 @@ ShenandoahWorkerTimingsTracker::~ShenandoahWorkerTimingsTracker() { _event.commit(GCId::current(), _worker_id, ShenandoahPhaseTimings::phase_name(cur_phase)); } } - diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp index a6ca335a0d7..4bf9ed3e772 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,19 +45,26 @@ class outputStream; f(CNT_PREFIX ## CLDUnlink, DESC_PREFIX "Unlink CLDs") \ f(CNT_PREFIX ## WeakRefProc, DESC_PREFIX "Weak References") \ f(CNT_PREFIX ## ParallelMark, DESC_PREFIX "Parallel Mark") \ + f(CNT_PREFIX ## ScanClusters, DESC_PREFIX "Scan Clusters") \ // end #define SHENANDOAH_PHASE_DO(f) \ f(conc_reset, "Concurrent Reset") \ - \ + f(conc_reset_old, "Concurrent Reset (OLD)") \ f(init_mark_gross, "Pause Init Mark (G)") \ f(init_mark, "Pause Init Mark (N)") \ f(init_manage_tlabs, " Manage TLABs") \ + f(init_swap_rset, " Swap Remembered Set") \ + f(init_transfer_satb, " Transfer Old From SATB") \ f(init_update_region_states, " Update Region States") \ \ + f(init_scan_rset, "Concurrent Scan Remembered Set") \ + SHENANDOAH_PAR_PHASE_DO(init_scan_rset_, " RS: ", f) \ + \ f(conc_mark_roots, "Concurrent Mark Roots ") \ SHENANDOAH_PAR_PHASE_DO(conc_mark_roots, " CMR: ", f) \ f(conc_mark, "Concurrent Marking") \ + SHENANDOAH_PAR_PHASE_DO(conc_mark, " CM: ", f) \ \ f(final_mark_gross, "Pause Final Mark (G)") \ f(final_mark, "Pause Final Mark (N)") \ @@ -94,6 +102,8 @@ class outputStream; f(conc_class_unload_purge_ec, " Exception Caches") \ f(conc_strong_roots, "Concurrent Strong Roots") \ SHENANDOAH_PAR_PHASE_DO(conc_strong_roots_, " CSR: ", f) \ + f(coalesce_and_fill, "Coalesce and Fill Old Dead") \ + SHENANDOAH_PAR_PHASE_DO(coalesce_and_fill_, " CFOD: ", f) \ f(conc_evac, "Concurrent Evacuation") \ \ f(final_roots_gross, "Pause Final Roots (G)") \ @@ -169,8 +179,10 @@ class outputStream; f(full_gc_copy_objects, " Copy Objects") \ f(full_gc_copy_objects_regular, " Regular Objects") \ f(full_gc_copy_objects_humong, " Humongous Objects") \ + f(full_gc_recompute_generation_usage, " Recompute generation usage") \ f(full_gc_copy_objects_reset_complete, " Reset Complete Bitmap") \ f(full_gc_copy_objects_rebuild, " Rebuild Region Sets") \ + f(full_gc_reconstruct_remembered_set, " Reconstruct Remembered Set") \ f(full_gc_heapdump_post, " Post Heap Dump") \ \ f(conc_uncommit, "Concurrent Uncommit") \ @@ -249,7 +261,10 @@ class ShenandoahWorkerTimingsTracker : public StackObj { double _start_time; EventGCPhaseParallel _event; public: - ShenandoahWorkerTimingsTracker(ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase, uint worker_id); + ShenandoahWorkerTimingsTracker(ShenandoahPhaseTimings::Phase phase, + ShenandoahPhaseTimings::ParPhase par_phase, + uint worker_id, + bool cumulative = false); ~ShenandoahWorkerTimingsTracker(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp index 0bd92da1b5d..f643835bebe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +27,7 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "gc/shared/workerThread.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" @@ -57,17 +59,40 @@ static const char* reference_type_name(ReferenceType type) { } } +template +static void card_mark_barrier(T* field, oop value) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(heap->is_in_or_null(value), "Should be in heap"); + assert(ShenandoahCardBarrier, "Card-mark barrier should be on"); + if (heap->is_in_old(field) && heap->is_in_young(value)) { + // For Shenandoah, each generation collects all the _referents_ that belong to the + // collected generation. We can end up with discovered lists that contain a mixture + // of old and young _references_. These references are linked together through the + // discovered field in java.lang.Reference. In some cases, creating or editing this + // list may result in the creation of _new_ old-to-young pointers which must dirty + // the corresponding card. Failing to do this may cause heap verification errors and + // lead to incorrect GC behavior. + heap->card_scan()->mark_card_as_dirty(reinterpret_cast(field)); + } +} + template static void set_oop_field(T* field, oop value); template <> void set_oop_field(oop* field, oop value) { *field = value; + if (ShenandoahCardBarrier) { + card_mark_barrier(field, value); + } } template <> void set_oop_field(narrowOop* field, oop value) { *field = CompressedOops::encode(value); + if (ShenandoahCardBarrier) { + card_mark_barrier(field, value); + } } static oop lrb(oop obj) { @@ -257,6 +282,7 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); T heap_oop = RawAccess<>::oop_load(referent_addr); oop referent = CompressedOops::decode(heap_oop); + ShenandoahHeap* heap = ShenandoahHeap::heap(); if (is_inactive(reference, referent, type)) { log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); @@ -273,6 +299,11 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType return false; } + if (!heap->is_in_active_generation(referent)) { + log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); + return false; + } + return true; } @@ -338,6 +369,9 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u } // Add reference to discovered list + // Each worker thread has a private copy of refproc_data, which includes a private discovered list. This means + // there's no risk that a different worker thread will try to manipulate my discovered list head while I'm making + // reference the head of my discovered list. ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id]; oop discovered_head = refproc_data.discovered_list_head(); if (discovered_head == nullptr) { @@ -346,6 +380,18 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u discovered_head = reference; } if (reference_cas_discovered(reference, discovered_head)) { + // We successfully set this reference object's next pointer to discovered_head. This marks reference as discovered. + // If reference_cas_discovered fails, that means some other worker thread took credit for discovery of this reference, + // and that other thread will place reference on its discovered list, so I can ignore reference. + + // In case we have created an interesting pointer, mark the remembered set card as dirty. + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (ShenandoahCardBarrier) { + T* addr = reinterpret_cast(java_lang_ref_Reference::discovered_addr_raw(reference)); + card_mark_barrier(addr, discovered_head); + } + + // Make the discovered_list_head point to reference. refproc_data.set_discovered_list_head(reference); assert(refproc_data.discovered_list_head() == reference, "reference must be new discovered head"); log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); @@ -360,7 +406,8 @@ bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceTy return false; } - log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s, %s)", + p2i(reference), reference_type_name(type), ShenandoahHeap::heap()->heap_region_containing(reference)->affiliation_name()); uint worker_id = WorkerThread::worker_id(); _ref_proc_thread_locals[worker_id].inc_encountered(type); @@ -375,15 +422,21 @@ template oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); -#ifdef ASSERT + ShenandoahHeap* heap = ShenandoahHeap::heap(); oop referent = reference_referent(reference); - assert(referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(referent), - "only drop references with alive referents"); -#endif + assert(referent == nullptr || heap->marking_context()->is_marked(referent), "only drop references with alive referents"); // Unlink and return next in list oop next = reference_discovered(reference); reference_set_discovered(reference, nullptr); + // When this reference was discovered, it would not have been marked. If it ends up surviving + // the cycle, we need to dirty the card if the reference is old and the referent is young. Note + // that if the reference is not dropped, then its pointer to the referent will be nulled before + // evacuation begins so card does not need to be dirtied. + if (heap->mode()->is_generational() && heap->is_in_old(reference) && heap->is_in_young(referent)) { + // Note: would be sufficient to mark only the card that holds the start of this Reference object. + heap->card_scan()->mark_range_as_dirty(cast_from_oop(reference), reference->size()); + } return next; } @@ -435,11 +488,12 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc } // Prepend discovered references to internal pending list + // set_oop_field maintains the card mark barrier as this list is constructed. if (!CompressedOops::is_null(*list)) { oop head = lrb(CompressedOops::decode_not_null(*list)); shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); oop prev = Atomic::xchg(&_pending_list, head); - RawAccess<>::oop_store(p, prev); + set_oop_field(p, prev); if (prev == nullptr) { // First to prepend to list, record tail _pending_list_tail = reinterpret_cast(p); @@ -511,10 +565,23 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Ph void ShenandoahReferenceProcessor::enqueue_references_locked() { // Prepend internal pending list to external pending list shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); + + // During reference processing, we maintain a local list of references that are identified by + // _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on + // the local list. + // + // There is also a global list of reference identified by Universe::_reference_pending_list + + // The following code has the effect of: + // 1. Making the global Universe::_reference_pending_list point to my local list + // 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the + // global Universe::_reference_pending_list + + oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list); if (UseCompressedOops) { - *reinterpret_cast(_pending_list_tail) = CompressedOops::encode(Universe::swap_reference_pending_list(_pending_list)); + set_oop_field(reinterpret_cast(_pending_list_tail), former_head_of_global_list); } else { - *reinterpret_cast(_pending_list_tail) = Universe::swap_reference_pending_list(_pending_list); + set_oop_field(reinterpret_cast(_pending_list_tail), former_head_of_global_list); } } @@ -523,7 +590,6 @@ void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) { // Nothing to enqueue return; } - if (!concurrent) { // When called from mark-compact or degen-GC, the locking is done by the VMOperation, enqueue_references_locked(); @@ -601,4 +667,3 @@ void ShenandoahReferenceProcessor::collect_statistics() { log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); } - diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp new file mode 100644 index 00000000000..6ea35f6a06d --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp @@ -0,0 +1,191 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" +#include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahRegulatorThread.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "logging/log.hpp" + +static ShenandoahHeuristics* get_heuristics(ShenandoahGeneration* nullable) { + return nullable != nullptr ? nullable->heuristics() : nullptr; +} + +ShenandoahRegulatorThread::ShenandoahRegulatorThread(ShenandoahControlThread* control_thread) : + ConcurrentGCThread(), + _control_thread(control_thread), + _sleep(ShenandoahControlIntervalMin), + _last_sleep_adjust_time(os::elapsedTime()) { + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + _old_heuristics = get_heuristics(heap->old_generation()); + _young_heuristics = get_heuristics(heap->young_generation()); + _global_heuristics = get_heuristics(heap->global_generation()); + + create_and_start(); +} + +void ShenandoahRegulatorThread::run_service() { + if (ShenandoahHeap::heap()->mode()->is_generational()) { + if (ShenandoahAllowOldMarkingPreemption) { + regulate_young_and_old_cycles(); + } else { + regulate_young_and_global_cycles(); + } + } else { + regulate_global_cycles(); + } + + log_info(gc)("%s: Done.", name()); +} + +void ShenandoahRegulatorThread::regulate_young_and_old_cycles() { + assert(_young_heuristics != nullptr, "Need young heuristics."); + assert(_old_heuristics != nullptr, "Need old heuristics."); + + while (!should_terminate()) { + ShenandoahControlThread::GCMode mode = _control_thread->gc_mode(); + if (mode == ShenandoahControlThread::none) { + if (should_start_metaspace_gc()) { + if (request_concurrent_gc(ShenandoahControlThread::select_global_generation())) { + log_info(gc)("Heuristics request for global (unload classes) accepted."); + } + } else { + if (_young_heuristics->should_start_gc()) { + // Give the old generation a chance to run. The old generation cycle + // begins with a 'bootstrap' cycle that will also collect young. + if (start_old_cycle()) { + log_info(gc)("Heuristics request for old collection accepted"); + } else if (request_concurrent_gc(YOUNG)) { + log_info(gc)("Heuristics request for young collection accepted"); + } + } + } + } else if (mode == ShenandoahControlThread::servicing_old) { + if (start_young_cycle()) { + log_info(gc)("Heuristics request to interrupt old for young collection accepted"); + } + } + + regulator_sleep(); + } +} + + +void ShenandoahRegulatorThread::regulate_young_and_global_cycles() { + assert(_young_heuristics != nullptr, "Need young heuristics."); + assert(_global_heuristics != nullptr, "Need global heuristics."); + + while (!should_terminate()) { + if (_control_thread->gc_mode() == ShenandoahControlThread::none) { + if (start_global_cycle()) { + log_info(gc)("Heuristics request for global collection accepted."); + } else if (start_young_cycle()) { + log_info(gc)("Heuristics request for young collection accepted."); + } + } + + regulator_sleep(); + } +} + +void ShenandoahRegulatorThread::regulate_global_cycles() { + assert(_global_heuristics != nullptr, "Need global heuristics."); + + while (!should_terminate()) { + if (_control_thread->gc_mode() == ShenandoahControlThread::none) { + if (start_global_cycle()) { + log_info(gc)("Heuristics request for global collection accepted."); + } + } + + regulator_sleep(); + } +} + +void ShenandoahRegulatorThread::regulator_sleep() { + // Wait before performing the next action. If allocation happened during this wait, + // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, + // back off exponentially. + double current = os::elapsedTime(); + + if (_heap_changed.try_unset()) { + _sleep = ShenandoahControlIntervalMin; + } else if ((current - _last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ + _sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1, _sleep * 2)); + _last_sleep_adjust_time = current; + } + + os::naked_short_sleep(_sleep); + if (LogTarget(Debug, gc, thread)::is_enabled()) { + double elapsed = os::elapsedTime() - current; + double hiccup = elapsed - double(_sleep); + if (hiccup > 0.001) { + log_debug(gc, thread)("Regulator hiccup time: %.3fs", hiccup); + } + } +} + +bool ShenandoahRegulatorThread::start_old_cycle() { + return _old_heuristics->should_start_gc() && request_concurrent_gc(OLD); +} + +bool ShenandoahRegulatorThread::start_young_cycle() { + return _young_heuristics->should_start_gc() && request_concurrent_gc(YOUNG); +} + +bool ShenandoahRegulatorThread::start_global_cycle() { + return _global_heuristics->should_start_gc() && request_concurrent_gc(ShenandoahControlThread::select_global_generation()); +} + +bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGenerationType generation) { + double now = os::elapsedTime(); + bool accepted = _control_thread->request_concurrent_gc(generation); + if (LogTarget(Debug, gc, thread)::is_enabled() && accepted) { + double wait_time = os::elapsedTime() - now; + if (wait_time > 0.001) { + log_debug(gc, thread)("Regulator waited %.3fs for control thread to acknowledge request.", wait_time); + } + } + return accepted; +} + +void ShenandoahRegulatorThread::stop_service() { + log_info(gc)("%s: Stop requested.", name()); +} + +bool ShenandoahRegulatorThread::should_start_metaspace_gc() { + // The generational mode can, at present, only unload classes during a global + // cycle. For this reason, we treat an oom in metaspace as a _trigger_ for a + // global cycle. But, we check other prerequisites before starting a gc that won't + // unload anything. + return ClassUnloadingWithConcurrentMark + && _global_heuristics->can_unload_classes() + && _global_heuristics->has_metaspace_oom(); +} + diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp new file mode 100644 index 00000000000..31ea29e9b41 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHREGULATORTHREAD_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHREGULATORTHREAD_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/shared/gcCause.hpp" +#include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "runtime/mutex.hpp" + +class ShenandoahHeuristics; +class ShenandoahControlThread; + +/* + * The purpose of this class (and thread) is to allow us to continue + * to evaluate heuristics during a garbage collection. This is necessary + * to allow young generation collections to interrupt an old generation + * collection which is in-progress. This puts heuristic triggers on the + * same footing as other gc requests (alloc failure, System.gc, etc.). + * However, this regulator does not block after submitting a gc request. + * + * We could use a PeriodicTask for this, but this thread will sleep longer + * when the allocation rate is lower and PeriodicTasks cannot adjust their + * sleep time. + */ +class ShenandoahRegulatorThread: public ConcurrentGCThread { + friend class VMStructs; + + public: + explicit ShenandoahRegulatorThread(ShenandoahControlThread* control_thread); + + const char* name() const { return "ShenandoahRegulatorThread";} + + // This is called from allocation path, and thus should be fast. + void notify_heap_changed() { + // Notify that something had changed. + if (_heap_changed.is_unset()) { + _heap_changed.set(); + } + } + + protected: + void run_service(); + void stop_service(); + + private: + // When mode is generational + void regulate_young_and_old_cycles(); + // When mode is generational, but ShenandoahAllowOldMarkingPreemption is false + void regulate_young_and_global_cycles(); + // Default behavior for other modes (single generation). + void regulate_global_cycles(); + + // These return true if a cycle was started. + bool start_old_cycle(); + bool start_young_cycle(); + bool start_global_cycle(); + + // The generational mode can only unload classes in a global cycle. The regulator + // thread itself will trigger a global cycle if metaspace is out of memory. + bool should_start_metaspace_gc(); + + // Regulator will sleep longer when the allocation rate is lower. + void regulator_sleep(); + + // Provides instrumentation to track how long it takes to acknowledge a request. + bool request_concurrent_gc(ShenandoahGenerationType generation); + + ShenandoahSharedFlag _heap_changed; + ShenandoahControlThread* _control_thread; + ShenandoahHeuristics* _young_heuristics; + ShenandoahHeuristics* _old_heuristics; + ShenandoahHeuristics* _global_heuristics; + + int _sleep; + double _last_sleep_adjust_time; +}; + + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHREGULATORTHREAD_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp index 35e4b865d97..9766660138a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +33,7 @@ #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahRootVerifier.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shared/oopStorage.inline.hpp" @@ -53,7 +55,7 @@ ShenandoahGCStateResetter::~ShenandoahGCStateResetter() { assert(_heap->gc_state() == _gc_state, "Should be restored"); } -void ShenandoahRootVerifier::roots_do(OopClosure* oops) { +void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops) { ShenandoahGCStateResetter resetter; shenandoah_assert_safepoint(); @@ -67,13 +69,19 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) { OopStorageSet::storage(id)->oops_do(oops); } + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->mode()->is_generational() && heap->is_gc_generation_young()) { + shenandoah_assert_safepoint(); + heap->card_scan()->roots_do(oops); + } + // Do thread roots the last. This allows verification code to find // any broken objects from those special roots first, not the accidental // dangling reference from the thread root. Threads::possibly_parallel_oops_do(true, oops, nullptr); } -void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) { +void ShenandoahRootVerifier::strong_roots_do(OopIterateClosure* oops) { ShenandoahGCStateResetter resetter; shenandoah_assert_safepoint(); @@ -83,6 +91,12 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) { for (auto id : EnumRange()) { OopStorageSet::storage(id)->oops_do(oops); } + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->mode()->is_generational() && heap->is_gc_generation_young()) { + heap->card_scan()->roots_do(oops); + } + // Do thread roots the last. This allows verification code to find // any broken objects from those special roots first, not the accidental // dangling reference from the thread root. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp index 54c95512a9c..da7ca864dbb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,8 +42,8 @@ class ShenandoahGCStateResetter : public StackObj { class ShenandoahRootVerifier : public AllStatic { public: // Used to seed ShenandoahVerifier, do not honor root type filter - static void roots_do(OopClosure* cl); - static void strong_roots_do(OopClosure* cl); + static void roots_do(OopIterateClosure* cl); + static void strong_roots_do(OopIterateClosure* cl); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp index 1462bc052dc..eaa3e0260be 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +30,7 @@ #include "gc/shared/taskTerminator.hpp" #include "gc/shared/workerThread.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" @@ -36,6 +38,7 @@ #include "gc/shenandoah/shenandoahSTWMark.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" +template class ShenandoahInitMarkRootsClosure : public OopClosure { private: ShenandoahObjToScanQueue* const _queue; @@ -43,6 +46,7 @@ class ShenandoahInitMarkRootsClosure : public OopClosure { template inline void do_oop_work(T* p); + public: ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q); @@ -50,14 +54,17 @@ class ShenandoahInitMarkRootsClosure : public OopClosure { void do_oop(oop* p) { do_oop_work(p); } }; -ShenandoahInitMarkRootsClosure::ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : +template +ShenandoahInitMarkRootsClosure::ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : _queue(q), _mark_context(ShenandoahHeap::heap()->marking_context()) { } +template template -void ShenandoahInitMarkRootsClosure::do_oop_work(T* p) { - ShenandoahMark::mark_through_ref(p, _queue, _mark_context, false); +void ShenandoahInitMarkRootsClosure::do_oop_work(T* p) { + // Only called from STW mark, should not be used to bootstrap old generation marking. + ShenandoahMark::mark_through_ref(p, _queue, nullptr, _mark_context, false); } class ShenandoahSTWMarkTask : public WorkerTask { @@ -80,10 +87,10 @@ void ShenandoahSTWMarkTask::work(uint worker_id) { _mark->finish_mark(worker_id); } -ShenandoahSTWMark::ShenandoahSTWMark(bool full_gc) : - ShenandoahMark(), +ShenandoahSTWMark::ShenandoahSTWMark(ShenandoahGeneration* generation, bool full_gc) : + ShenandoahMark(generation), _root_scanner(full_gc ? ShenandoahPhaseTimings::full_gc_mark : ShenandoahPhaseTimings::degen_gc_stw_mark), - _terminator(ShenandoahHeap::heap()->workers()->active_workers(), ShenandoahHeap::heap()->marking_context()->task_queues()), + _terminator(ShenandoahHeap::heap()->workers()->active_workers(), task_queues()), _full_gc(full_gc) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a Shenandoah safepoint"); } @@ -96,7 +103,7 @@ void ShenandoahSTWMark::mark() { ShenandoahCodeRoots::arm_nmethods_for_mark(); // Weak reference processing - ShenandoahReferenceProcessor* rp = heap->ref_processor(); + ShenandoahReferenceProcessor* rp = heap->active_generation()->ref_processor(); rp->reset_thread_locals(); rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); @@ -115,6 +122,11 @@ void ShenandoahSTWMark::mark() { { // Mark + if (_generation->is_young()) { + // But only scan the remembered set for young generation. + _generation->scan_remembered_set(false /* is_concurrent */); + } + StrongRootsScope scope(nworkers); ShenandoahSTWMarkTask task(this); heap->workers()->run_task(&task); @@ -122,7 +134,7 @@ void ShenandoahSTWMark::mark() { assert(task_queues()->is_empty(), "Should be empty"); } - heap->mark_complete_marking_context(); + _generation->set_mark_complete(); end_mark(); // Mark is finished, can disarm the nmethods now. @@ -134,18 +146,35 @@ void ShenandoahSTWMark::mark() { } void ShenandoahSTWMark::mark_roots(uint worker_id) { - ShenandoahInitMarkRootsClosure init_mark(task_queues()->queue(worker_id)); - _root_scanner.roots_do(&init_mark, worker_id); + switch (_generation->type()) { + case GLOBAL_NON_GEN: { + ShenandoahInitMarkRootsClosure init_mark(task_queues()->queue(worker_id)); + _root_scanner.roots_do(&init_mark, worker_id); + break; + } + case GLOBAL_GEN: { + ShenandoahInitMarkRootsClosure init_mark(task_queues()->queue(worker_id)); + _root_scanner.roots_do(&init_mark, worker_id); + break; + } + case YOUNG: { + ShenandoahInitMarkRootsClosure init_mark(task_queues()->queue(worker_id)); + _root_scanner.roots_do(&init_mark, worker_id); + break; + } + default: + ShouldNotReachHere(); + } } void ShenandoahSTWMark::finish_mark(uint worker_id) { ShenandoahPhaseTimings::Phase phase = _full_gc ? ShenandoahPhaseTimings::full_gc_mark : ShenandoahPhaseTimings::degen_gc_stw_mark; ShenandoahWorkerTimingsTracker timer(phase, ShenandoahPhaseTimings::ParallelMark, worker_id); - ShenandoahReferenceProcessor* rp = ShenandoahHeap::heap()->ref_processor(); + ShenandoahReferenceProcessor* rp = ShenandoahHeap::heap()->active_generation()->ref_processor(); StringDedup::Requests requests; - mark_loop(worker_id, &_terminator, rp, + mark_loop(_generation->type(), + worker_id, &_terminator, rp, false /* not cancellable */, ShenandoahStringDedup::is_enabled() ? ALWAYS_DEDUP : NO_DEDUP, &requests); } - diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.hpp index 771e36e0ec1..59fafd36a0f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.hpp @@ -28,6 +28,7 @@ #include "gc/shenandoah/shenandoahMark.hpp" class ShenandoahSTWMarkTask; +class ShenandoahGeneration; class ShenandoahSTWMark : public ShenandoahMark { friend class ShenandoahSTWMarkTask; @@ -37,7 +38,7 @@ class ShenandoahSTWMark : public ShenandoahMark { TaskTerminator _terminator; bool _full_gc; public: - ShenandoahSTWMark(bool full_gc); + ShenandoahSTWMark(ShenandoahGeneration* generation, bool full_gc); void mark(); private: @@ -46,4 +47,3 @@ class ShenandoahSTWMark : public ShenandoahMark { }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSTWMARK_HPP - diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp new file mode 100644 index 00000000000..c773064fac0 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp @@ -0,0 +1,373 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" +#include "gc/shenandoah/shenandoahReferenceProcessor.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" +#include "logging/log.hpp" + +ShenandoahDirectCardMarkRememberedSet::ShenandoahDirectCardMarkRememberedSet(ShenandoahCardTable* card_table, size_t total_card_count) : + LogCardValsPerIntPtr(log2i_exact(sizeof(intptr_t)) - log2i_exact(sizeof(CardValue))), + LogCardSizeInWords(log2i_exact(CardTable::card_size_in_words())) { + + // Paranoid assert for LogCardsPerIntPtr calculation above + assert(sizeof(intptr_t) > sizeof(CardValue), "LogsCardValsPerIntPtr would underflow"); + + _heap = ShenandoahHeap::heap(); + _card_table = card_table; + _total_card_count = total_card_count; + _cluster_count = total_card_count / ShenandoahCardCluster::CardsPerCluster; + _card_shift = CardTable::card_shift(); + + _byte_map = _card_table->byte_for_index(0); + + _whole_heap_base = _card_table->addr_for(_byte_map); + _byte_map_base = _byte_map - (uintptr_t(_whole_heap_base) >> _card_shift); + + assert(total_card_count % ShenandoahCardCluster::CardsPerCluster == 0, "Invalid card count."); + assert(total_card_count > 0, "Card count cannot be zero."); +} + +// Merge any dirty values from write table into the read table, while leaving +// the write table unchanged. +void ShenandoahDirectCardMarkRememberedSet::merge_write_table(HeapWord* start, size_t word_count) { + size_t start_index = card_index_for_addr(start); +#ifdef ASSERT + // avoid querying card_index_for_addr() for an address past end of heap + size_t end_index = card_index_for_addr(start + word_count - 1) + 1; +#endif + assert(start_index % ((size_t)1 << LogCardValsPerIntPtr) == 0, "Expected a multiple of CardValsPerIntPtr"); + assert(end_index % ((size_t)1 << LogCardValsPerIntPtr) == 0, "Expected a multiple of CardValsPerIntPtr"); + + // We'll access in groups of intptr_t worth of card entries + intptr_t* const read_table = (intptr_t*) &(_card_table->read_byte_map())[start_index]; + intptr_t* const write_table = (intptr_t*) &(_card_table->write_byte_map())[start_index]; + + // Avoid division, use shift instead + assert(word_count % ((size_t)1 << (LogCardSizeInWords + LogCardValsPerIntPtr)) == 0, "Expected a multiple of CardSizeInWords*CardValsPerIntPtr"); + size_t const num = word_count >> (LogCardSizeInWords + LogCardValsPerIntPtr); + + for (size_t i = 0; i < num; i++) { + read_table[i] &= write_table[i]; + } +} + +// Destructively copy the write table to the read table, and clean the write table. +void ShenandoahDirectCardMarkRememberedSet::reset_remset(HeapWord* start, size_t word_count) { + size_t start_index = card_index_for_addr(start); +#ifdef ASSERT + // avoid querying card_index_for_addr() for an address past end of heap + size_t end_index = card_index_for_addr(start + word_count - 1) + 1; +#endif + assert(start_index % ((size_t)1 << LogCardValsPerIntPtr) == 0, "Expected a multiple of CardValsPerIntPtr"); + assert(end_index % ((size_t)1 << LogCardValsPerIntPtr) == 0, "Expected a multiple of CardValsPerIntPtr"); + + // We'll access in groups of intptr_t worth of card entries + intptr_t* const read_table = (intptr_t*) &(_card_table->read_byte_map())[start_index]; + intptr_t* const write_table = (intptr_t*) &(_card_table->write_byte_map())[start_index]; + + // Avoid division, use shift instead + assert(word_count % ((size_t)1 << (LogCardSizeInWords + LogCardValsPerIntPtr)) == 0, "Expected a multiple of CardSizeInWords*CardValsPerIntPtr"); + size_t const num = word_count >> (LogCardSizeInWords + LogCardValsPerIntPtr); + + for (size_t i = 0; i < num; i++) { + read_table[i] = write_table[i]; + write_table[i] = CardTable::clean_card_row_val(); + } +} + +ShenandoahScanRememberedTask::ShenandoahScanRememberedTask(ShenandoahObjToScanQueueSet* queue_set, + ShenandoahObjToScanQueueSet* old_queue_set, + ShenandoahReferenceProcessor* rp, + ShenandoahRegionChunkIterator* work_list, bool is_concurrent) : + WorkerTask("Scan Remembered Set"), + _queue_set(queue_set), _old_queue_set(old_queue_set), _rp(rp), _work_list(work_list), _is_concurrent(is_concurrent) { + log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(ShenandoahHeap::heap()->is_old_bitmap_stable())); +} + +void ShenandoahScanRememberedTask::work(uint worker_id) { + if (_is_concurrent) { + // This sets up a thread local reference to the worker_id which is needed by the weak reference processor. + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahSuspendibleThreadSetJoiner stsj; + do_work(worker_id); + } else { + // This sets up a thread local reference to the worker_id which is needed by the weak reference processor. + ShenandoahParallelWorkerSession worker_session(worker_id); + do_work(worker_id); + } +} + +void ShenandoahScanRememberedTask::do_work(uint worker_id) { + ShenandoahWorkerTimingsTracker x(ShenandoahPhaseTimings::init_scan_rset, ShenandoahPhaseTimings::ScanClusters, worker_id); + + ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id); + ShenandoahObjToScanQueue* old = _old_queue_set == nullptr ? nullptr : _old_queue_set->queue(worker_id); + ShenandoahMarkRefsClosure cl(q, _rp, old); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + RememberedScanner* scanner = heap->card_scan(); + + // set up thread local closure for shen ref processor + _rp->set_mark_closure(worker_id, &cl); + struct ShenandoahRegionChunk assignment; + while (_work_list->next(&assignment)) { + ShenandoahHeapRegion* region = assignment._r; + log_debug(gc)("ShenandoahScanRememberedTask::do_work(%u), processing slice of region " + SIZE_FORMAT " at offset " SIZE_FORMAT ", size: " SIZE_FORMAT, + worker_id, region->index(), assignment._chunk_offset, assignment._chunk_size); + if (region->is_old()) { + size_t cluster_size = + CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; + size_t clusters = assignment._chunk_size / cluster_size; + assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignments must align on cluster boundaries"); + HeapWord* end_of_range = region->bottom() + assignment._chunk_offset + assignment._chunk_size; + + // During concurrent mark, region->top() equals TAMS with respect to the current young-gen pass. + if (end_of_range > region->top()) { + end_of_range = region->top(); + } + scanner->process_region_slice(region, assignment._chunk_offset, clusters, end_of_range, &cl, false, worker_id); + } +#ifdef ENABLE_REMEMBERED_SET_CANCELLATION + // This check is currently disabled to avoid crashes that occur + // when we try to cancel remembered set scanning; it should be re-enabled + // after the issues are fixed, as it would allow more prompt cancellation and + // transition to degenerated / full GCs. Note that work that has been assigned/ + // claimed above must be completed before we return here upon cancellation. + if (heap->check_cancelled_gc_and_yield(_is_concurrent)) { + return; + } +#endif + } +} + +size_t ShenandoahRegionChunkIterator::calc_regular_group_size() { + // The group size is calculated from the number of regions. Suppose the heap has N regions. The first group processes + // N/2 regions. The second group processes N/4 regions, the third group N/8 regions and so on. + // Note that infinite series N/2 + N/4 + N/8 + N/16 + ... sums to N. + // + // The normal group size is the number of regions / 2. + // + // In the case that the region_size_words is greater than _maximum_chunk_size_words, the first group_size is + // larger than the normal group size because each chunk in the group will be smaller than the region size. + // + // The last group also has more than the normal entries because it finishes the total scanning effort. The chunk sizes are + // different for each group. The intention is that the first group processes roughly half of the heap, the second processes + // half of the remaining heap, the third processes half of what remains and so on. The smallest chunk size + // is represented by _smallest_chunk_size_words. We do not divide work any smaller than this. + // + + size_t group_size = _heap->num_regions() / 2; + return group_size; +} + +size_t ShenandoahRegionChunkIterator::calc_first_group_chunk_size_b4_rebalance() { + size_t words_in_first_chunk = ShenandoahHeapRegion::region_size_words(); + return words_in_first_chunk; +} + +size_t ShenandoahRegionChunkIterator::calc_num_groups() { + size_t total_heap_size = _heap->num_regions() * ShenandoahHeapRegion::region_size_words(); + size_t num_groups = 0; + size_t cumulative_group_span = 0; + size_t current_group_span = _first_group_chunk_size_b4_rebalance * _regular_group_size; + size_t smallest_group_span = smallest_chunk_size_words() * _regular_group_size; + while ((num_groups < _maximum_groups) && (cumulative_group_span + current_group_span <= total_heap_size)) { + num_groups++; + cumulative_group_span += current_group_span; + if (current_group_span <= smallest_group_span) { + break; + } else { + current_group_span /= 2; // Each group spans half of what the preceding group spanned. + } + } + // Loop post condition: + // num_groups <= _maximum_groups + // cumulative_group_span is the memory spanned by num_groups + // current_group_span is the span of the last fully populated group (assuming loop iterates at least once) + // each of num_groups is fully populated with _regular_group_size chunks in each + // Non post conditions: + // cumulative_group_span may be less than total_heap size for one or more of the folowing reasons + // a) The number of regions remaining to be spanned is smaller than a complete group, or + // b) We have filled up all groups through _maximum_groups and still have not spanned all regions + + if (cumulative_group_span < total_heap_size) { + // We've got more regions to span + if ((num_groups < _maximum_groups) && (current_group_span > smallest_group_span)) { + num_groups++; // Place all remaining regions into a new not-full group (chunk_size half that of previous group) + } + // Else we are unable to create a new group because we've exceed the number of allowed groups or have reached the + // minimum chunk size. + + // Any remaining regions will be treated as if they are part of the most recently created group. This group will + // have more than _regular_group_size chunks within it. + } + return num_groups; +} + +size_t ShenandoahRegionChunkIterator::calc_total_chunks() { + size_t region_size_words = ShenandoahHeapRegion::region_size_words(); + size_t unspanned_heap_size = _heap->num_regions() * region_size_words; + size_t num_chunks = 0; + size_t cumulative_group_span = 0; + size_t current_group_span = _first_group_chunk_size_b4_rebalance * _regular_group_size; + size_t smallest_group_span = smallest_chunk_size_words() * _regular_group_size; + + // The first group gets special handling because the first chunk size can be no larger than _largest_chunk_size_words + if (region_size_words > _maximum_chunk_size_words) { + // In the case that we shrink the first group's chunk size, certain other groups will also be subsumed within the first group + size_t effective_chunk_size = _first_group_chunk_size_b4_rebalance; + while (effective_chunk_size >= _maximum_chunk_size_words) { + num_chunks += current_group_span / _maximum_chunk_size_words; + unspanned_heap_size -= current_group_span; + effective_chunk_size /= 2; + current_group_span /= 2; + } + } else { + num_chunks = _regular_group_size; + unspanned_heap_size -= current_group_span; + current_group_span /= 2; + } + size_t spanned_groups = 1; + while (unspanned_heap_size > 0) { + if (current_group_span <= unspanned_heap_size) { + unspanned_heap_size -= current_group_span; + num_chunks += _regular_group_size; + spanned_groups++; + + // _num_groups is the number of groups required to span the configured heap size. We are not allowed + // to change the number of groups. The last group is responsible for spanning all chunks not spanned + // by previously processed groups. + if (spanned_groups >= _num_groups) { + // The last group has more than _regular_group_size entries. + size_t chunk_span = current_group_span / _regular_group_size; + size_t extra_chunks = unspanned_heap_size / chunk_span; + assert (extra_chunks * chunk_span == unspanned_heap_size, "Chunks must precisely span regions"); + num_chunks += extra_chunks; + return num_chunks; + } else if (current_group_span <= smallest_group_span) { + // We cannot introduce new groups because we've reached the lower bound on group size. So this last + // group may hold extra chunks. + size_t chunk_span = smallest_chunk_size_words(); + size_t extra_chunks = unspanned_heap_size / chunk_span; + assert (extra_chunks * chunk_span == unspanned_heap_size, "Chunks must precisely span regions"); + num_chunks += extra_chunks; + return num_chunks; + } else { + current_group_span /= 2; + } + } else { + // This last group has fewer than _regular_group_size entries. + size_t chunk_span = current_group_span / _regular_group_size; + size_t last_group_size = unspanned_heap_size / chunk_span; + assert (last_group_size * chunk_span == unspanned_heap_size, "Chunks must precisely span regions"); + num_chunks += last_group_size; + return num_chunks; + } + } + return num_chunks; +} + +ShenandoahRegionChunkIterator::ShenandoahRegionChunkIterator(size_t worker_count) : + ShenandoahRegionChunkIterator(ShenandoahHeap::heap(), worker_count) +{ +} + +ShenandoahRegionChunkIterator::ShenandoahRegionChunkIterator(ShenandoahHeap* heap, size_t worker_count) : + _heap(heap), + _regular_group_size(calc_regular_group_size()), + _first_group_chunk_size_b4_rebalance(calc_first_group_chunk_size_b4_rebalance()), + _num_groups(calc_num_groups()), + _total_chunks(calc_total_chunks()), + _index(0) +{ +#ifdef ASSERT + size_t expected_chunk_size_words = _clusters_in_smallest_chunk * CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; + assert(smallest_chunk_size_words() == expected_chunk_size_words, "_smallest_chunk_size (" SIZE_FORMAT") is not valid because it does not equal (" SIZE_FORMAT ")", + smallest_chunk_size_words(), expected_chunk_size_words); +#endif + assert(_num_groups <= _maximum_groups, + "The number of remembered set scanning groups must be less than or equal to maximum groups"); + assert(smallest_chunk_size_words() << (_maximum_groups - 1) == _maximum_chunk_size_words, + "Maximum number of groups needs to span maximum chunk size to smallest chunk size"); + + size_t words_in_region = ShenandoahHeapRegion::region_size_words(); + _region_index[0] = 0; + _group_offset[0] = 0; + if (words_in_region > _maximum_chunk_size_words) { + // In the case that we shrink the first group's chunk size, certain other groups will also be subsumed within the first group + size_t num_chunks = 0; + size_t effective_chunk_size = _first_group_chunk_size_b4_rebalance; + size_t current_group_span = effective_chunk_size * _regular_group_size; + while (effective_chunk_size >= _maximum_chunk_size_words) { + num_chunks += current_group_span / _maximum_chunk_size_words; + effective_chunk_size /= 2; + current_group_span /= 2; + } + _group_entries[0] = num_chunks; + _group_chunk_size[0] = _maximum_chunk_size_words; + } else { + _group_entries[0] = _regular_group_size; + _group_chunk_size[0] = _first_group_chunk_size_b4_rebalance; + } + + size_t previous_group_span = _group_entries[0] * _group_chunk_size[0]; + for (size_t i = 1; i < _num_groups; i++) { + size_t previous_group_entries = (i == 1)? _group_entries[0]: (_group_entries[i-1] - _group_entries[i-2]); + _group_chunk_size[i] = _group_chunk_size[i-1] / 2; + size_t chunks_in_group = _regular_group_size; + size_t this_group_span = _group_chunk_size[i] * chunks_in_group; + size_t total_span_of_groups = previous_group_span + this_group_span; + _region_index[i] = previous_group_span / words_in_region; + _group_offset[i] = previous_group_span % words_in_region; + _group_entries[i] = _group_entries[i-1] + _regular_group_size; + previous_group_span = total_span_of_groups; + } + if (_group_entries[_num_groups-1] < _total_chunks) { + assert((_total_chunks - _group_entries[_num_groups-1]) * _group_chunk_size[_num_groups-1] + previous_group_span == + heap->num_regions() * words_in_region, "Total region chunks (" SIZE_FORMAT + ") do not span total heap regions (" SIZE_FORMAT ")", _total_chunks, _heap->num_regions()); + previous_group_span += (_total_chunks - _group_entries[_num_groups-1]) * _group_chunk_size[_num_groups-1]; + _group_entries[_num_groups-1] = _total_chunks; + } + assert(previous_group_span == heap->num_regions() * words_in_region, "Total region chunks (" SIZE_FORMAT + ") do not span total heap regions (" SIZE_FORMAT "): " SIZE_FORMAT " does not equal " SIZE_FORMAT, + _total_chunks, _heap->num_regions(), previous_group_span, heap->num_regions() * words_in_region); + + // Not necessary, but keeps things tidy + for (size_t i = _num_groups; i < _maximum_groups; i++) { + _region_index[i] = 0; + _group_offset[i] = 0; + _group_entries[i] = _group_entries[i-1]; + _group_chunk_size[i] = 0; + } +} + +void ShenandoahRegionChunkIterator::reset() { + _index = 0; +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp new file mode 100644 index 00000000000..19289054c28 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp @@ -0,0 +1,1051 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP + +// Terminology used within this source file: +// +// Card Entry: This is the information that identifies whether a +// particular card-table entry is Clean or Dirty. A clean +// card entry denotes that the associated memory does not +// hold references to young-gen memory. +// +// Card Region, aka +// Card Memory: This is the region of memory that is assocated with a +// particular card entry. +// +// Card Cluster: A card cluster represents 64 card entries. A card +// cluster is the minimal amount of work performed at a +// time by a parallel thread. Note that the work required +// to scan a card cluster is somewhat variable in that the +// required effort depends on how many cards are dirty, how +// many references are held within the objects that span a +// DIRTY card's memory, and on the size of the object +// that spans the end of a DIRTY card's memory (because +// that object, if it's not an array, may need to be scanned in +// its entirety, when the object is imprecisely dirtied. Imprecise +// dirtying is when the card corresponding to the object header +// is dirtied, rather than the card on which the updated field lives). +// To better balance work amongst them, parallel worker threads dynamically +// claim clusters and are flexible in the number of clusters they +// process. +// +// A cluster represents a "natural" quantum of work to be performed by +// a parallel GC thread's background remembered set scanning efforts. +// The notion of cluster is similar to the notion of stripe in the +// implementation of parallel GC card scanning. However, a cluster is +// typically smaller than a stripe, enabling finer grain division of +// labor between multiple threads, and potentially better load balancing +// when dirty cards are not uniformly distributed in the heap, as is often +// the case with generational workloads where more recently promoted objects +// may be dirtied more frequently that older objects. +// +// For illustration, consider the following possible JVM configurations: +// +// Scenario 1: +// RegionSize is 128 MB +// Span of a card entry is 512 B +// Each card table entry consumes 1 B +// Assume one long word (8 B)of the card table represents a cluster. +// This long word holds 8 card table entries, spanning a +// total of 8*512 B = 4 KB of the heap +// The number of clusters per region is 128 MB / 4 KB = 32 K +// +// Scenario 2: +// RegionSize is 128 MB +// Span of each card entry is 128 B +// Each card table entry consumes 1 bit +// Assume one int word (4 B) of the card table represents a cluster. +// This int word holds 32 b/1 b = 32 card table entries, spanning a +// total of 32 * 128 B = 4 KB of the heap +// The number of clusters per region is 128 MB / 4 KB = 32 K +// +// Scenario 3: +// RegionSize is 128 MB +// Span of each card entry is 512 B +// Each card table entry consumes 1 bit +// Assume one long word (8 B) of card table represents a cluster. +// This long word holds 64 b/ 1 b = 64 card table entries, spanning a +// total of 64 * 512 B = 32 KB of the heap +// The number of clusters per region is 128 MB / 32 KB = 4 K +// +// At the start of a new young-gen concurrent mark pass, the gang of +// Shenandoah worker threads collaborate in performing the following +// actions: +// +// Let old_regions = number of ShenandoahHeapRegion comprising +// old-gen memory +// Let region_size = ShenandoahHeapRegion::region_size_bytes() +// represent the number of bytes in each region +// Let clusters_per_region = region_size / 512 +// Let rs represent the relevant RememberedSet implementation +// (an instance of ShenandoahDirectCardMarkRememberedSet or an instance +// of a to-be-implemented ShenandoahBufferWithSATBRememberedSet) +// +// for each ShenandoahHeapRegion old_region in the whole heap +// determine the cluster number of the first cluster belonging +// to that region +// for each cluster contained within that region +// Assure that exactly one worker thread processes each +// cluster, each thread making a series of invocations of the +// following: +// +// rs->process_clusters(worker_id, ReferenceProcessor *, +// ShenandoahConcurrentMark *, cluster_no, cluster_count, +// HeapWord *end_of_range, OopClosure *oops); +// +// For efficiency, divide up the clusters so that different threads +// are responsible for processing different clusters. Processing costs +// may vary greatly between clusters for the following reasons: +// +// a) some clusters contain mostly dirty cards and other +// clusters contain mostly clean cards +// b) some clusters contain mostly primitive data and other +// clusters contain mostly reference data +// c) some clusters are spanned by very large non-array objects that +// begin in some other cluster. When a large non-array object +// beginning in a preceding cluster spans large portions of +// this cluster, then because of imprecise dirtying, the +// portion of the object in this cluster may be clean, but +// will need to be processed by the worker responsible for +// this cluster, potentially increasing its work. +// d) in the case that the end of this cluster is spanned by a +// very large non-array object, the worker for this cluster will +// be responsible for processing the portion of the object +// in this cluster. +// +// Though an initial division of labor between marking threads may +// assign equal numbers of clusters to be scanned by each thread, it +// should be expected that some threads will finish their assigned +// work before others. Therefore, some amount of the full remembered +// set scanning effort should be held back and assigned incrementally +// to the threads that end up with excess capacity. Consider the +// following strategy for dividing labor: +// +// 1. Assume there are 8 marking threads and 1024 remembered +// set clusters to be scanned. +// 2. Assign each thread to scan 64 clusters. This leaves +// 512 (1024 - (8*64)) clusters to still be scanned. +// 3. As the 8 server threads complete previous cluster +// scanning assignments, issue each of the next 8 scanning +// assignments as units of 32 additional cluster each. +// In the case that there is high variance in effort +// associated with previous cluster scanning assignments, +// multiples of these next assignments may be serviced by +// the server threads that were previously assigned lighter +// workloads. +// 4. Make subsequent scanning assignments as follows: +// a) 8 assignments of size 16 clusters +// b) 8 assignments of size 8 clusters +// c) 16 assignments of size 4 clusters +// +// When there is no more remembered set processing work to be +// assigned to a newly idled worker thread, that thread can move +// on to work on other tasks associated with root scanning until such +// time as all clusters have been examined. +// +// Remembered set scanning is designed to run concurrently with +// mutator threads, with multiple concurrent workers. Furthermore, the +// current implementation of remembered set scanning never clears a +// card once it has been marked. +// +// These limitations will be addressed in future enhancements to the +// existing implementation. + +#include +#include "gc/shared/workerThread.hpp" +#include "gc/shenandoah/shenandoahCardStats.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" +#include "gc/shenandoah/shenandoahNumberSeq.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "memory/iterator.hpp" +#include "utilities/globalDefinitions.hpp" + +class ShenandoahReferenceProcessor; +class ShenandoahConcurrentMark; +class ShenandoahHeap; +class ShenandoahHeapRegion; +class ShenandoahRegionIterator; +class ShenandoahMarkingContext; + +class CardTable; +typedef CardTable::CardValue CardValue; + +class ShenandoahDirectCardMarkRememberedSet: public CHeapObj { + +private: + + // Use symbolic constants defined in cardTable.hpp + // CardTable::card_shift = 9; + // CardTable::card_size = 512; + // CardTable::card_size_in_words = 64; + // CardTable::clean_card_val() + // CardTable::dirty_card_val() + + const size_t LogCardValsPerIntPtr; // the number of card values (entries) in an intptr_t + const size_t LogCardSizeInWords; // the size of a card in heap word units + + ShenandoahHeap *_heap; + ShenandoahCardTable *_card_table; + size_t _card_shift; + size_t _total_card_count; + size_t _cluster_count; + HeapWord *_whole_heap_base; // Points to first HeapWord of data contained within heap memory + CardValue* _byte_map; // Points to first entry within the card table + CardValue* _byte_map_base; // Points to byte_map minus the bias computed from address of heap memory + +public: + + // count is the number of cards represented by the card table. + ShenandoahDirectCardMarkRememberedSet(ShenandoahCardTable *card_table, size_t total_card_count); + + // Card index is zero-based relative to _byte_map. + size_t last_valid_index() const; + size_t total_cards() const; + size_t card_index_for_addr(HeapWord *p) const; + HeapWord *addr_for_card_index(size_t card_index) const; + inline const CardValue* get_card_table_byte_map(bool write_table) const; + inline bool is_card_dirty(size_t card_index) const; + inline bool is_write_card_dirty(size_t card_index) const; + inline void mark_card_as_dirty(size_t card_index); + inline void mark_range_as_dirty(size_t card_index, size_t num_cards); + inline void mark_card_as_clean(size_t card_index); + inline void mark_range_as_clean(size_t card_index, size_t num_cards); + inline bool is_card_dirty(HeapWord *p) const; + inline void mark_card_as_dirty(HeapWord *p); + inline void mark_range_as_dirty(HeapWord *p, size_t num_heap_words); + inline void mark_card_as_clean(HeapWord *p); + inline void mark_range_as_clean(HeapWord *p, size_t num_heap_words); + inline size_t cluster_count() const; + + // Called by GC thread at start of concurrent mark to exchange roles of read and write remembered sets. + // Not currently used because mutator write barrier does not honor changes to the location of card table. + // Instead of swap_remset, the current implementation of concurrent remembered set scanning does reset_remset + // in parallel threads, each invocation processing one entire HeapRegion at a time. + void swap_remset() { _card_table->swap_card_tables(); } + + // Merge any dirty values from write table into the read table, while leaving + // the write table unchanged. + void merge_write_table(HeapWord* start, size_t word_count); + + // Destructively copy the write table to the read table, and clean the write table. + void reset_remset(HeapWord* start, size_t word_count); + + // Called by GC thread after scanning old remembered set in order to prepare for next GC pass + void clear_old_remset() { _card_table->clear_read_table(); } +}; + +// A ShenandoahCardCluster represents the minimal unit of work +// performed by independent parallel GC threads during scanning of +// remembered sets. +// +// The GC threads that perform card-table remembered set scanning may +// overwrite card-table entries to mark them as clean in the case that +// the associated memory no longer holds references to young-gen +// memory. Rather than access the card-table entries directly, all GC +// thread access to card-table information is made by way of the +// ShenandoahCardCluster data abstraction. This abstraction +// effectively manages access to multiple possible underlying +// remembered set implementations, including a traditional card-table +// approach and a SATB-based approach. +// +// The API services represent a compromise between efficiency and +// convenience. +// +// Multiple GC threads that scan the remembered set +// in parallel. The desire is to divide the complete scanning effort +// into multiple clusters of work that can be independently processed +// by individual threads without need for synchronizing efforts +// between the work performed by each task. The term "cluster" of +// work is similar to the term "stripe" as used in the implementation +// of Parallel GC. +// +// Complexity arises when an object to be scanned crosses the boundary +// between adjacent cluster regions. Here is the protocol that we currently +// follow: +// +// 1. The thread responsible for scanning the cards in a cluster modifies +// the associated card-table entries. Only cards that are dirty are +// processed, except as described below for the case of objects that +// straddle more than one card. +// 2. Object Arrays are precisely dirtied, so only the portion of the obj-array +// that overlaps the range of dirty cards in its cluster are scanned +// by each worker thread. This holds for portions of obj-arrays that extend +// over clusters processed by different workers, with each worked responsible +// for scanning the portion of the obj-array overlapping the dirty cards in +// its cluster. +// 3. Non-array objects are precisely dirtied by the interpreter and the compilers +// For such objects that extend over multiple cards, or even multiple clusters, +// the entire object is scanned by the worker that processes the (dirty) card on +// which the object's header lies. (However, GC workers should precisely dirty the +// cards with inter-regional/inter-generational pointers in the body of this object, +// thus making subsequent scans potentially less expensive.) Such larger non-array +// objects are relatively rare. +// +// A possible criticism: +// C. The representation of pointer location descriptive information +// within Klass representations is not designed for efficient +// "random access". An alternative approach to this design would +// be to scan very large objects multiple times, once for each +// cluster that is spanned by the object's range. This reduces +// unnecessary overscan, but it introduces different sorts of +// overhead effort: +// i) For each spanned cluster, we have to look up the start of +// the crossing object. +// ii) Each time we scan the very large object, we have to +// sequentially walk through its pointer location +// descriptors, skipping over all of the pointers that +// precede the start of the range of addresses that we +// consider relevant. + + +// Because old-gen heap memory is not necessarily contiguous, and +// because cards are not necessarily maintained for young-gen memory, +// consecutive card numbers do not necessarily correspond to consecutive +// address ranges. For the traditional direct-card-marking +// implementation of this interface, consecutive card numbers are +// likely to correspond to contiguous regions of memory, but this +// should not be assumed. Instead, rely only upon the following: +// +// 1. All card numbers for cards pertaining to the same +// ShenandoahHeapRegion are consecutively numbered. +// 2. In the case that neighboring ShenandoahHeapRegions both +// represent old-gen memory, the card regions that span the +// boundary between these neighboring heap regions will be +// consecutively numbered. +// 3. (A corollary) In the case that an old-gen object straddles the +// boundary between two heap regions, the card regions that +// correspond to the span of this object will be consecutively +// numbered. +// +// ShenandoahCardCluster abstracts access to the remembered set +// and also keeps track of crossing map information to allow efficient +// resolution of object start addresses. +// +// ShenandoahCardCluster supports all of the services of +// RememberedSet, plus it supports register_object() and lookup_object(). +// Note that we only need to register the start addresses of the object that +// overlays the first address of a card; we need to do this for every card. +// In other words, register_object() checks if the object crosses a card boundary, +// and updates the offset value for each card that the object crosses into. +// For objects that don't straddle cards, nothing needs to be done. +// +// The RememberedSet template parameter is intended to represent either +// ShenandoahDirectCardMarkRememberedSet, or a to-be-implemented +// ShenandoahBufferWithSATBRememberedSet. +template +class ShenandoahCardCluster: public CHeapObj { + +private: + RememberedSet *_rs; + +public: + static const size_t CardsPerCluster = 64; + +private: + typedef struct cross_map { uint8_t first; uint8_t last; } xmap; + typedef union crossing_info { uint16_t short_word; xmap offsets; } crossing_info; + + // ObjectStartsInCardRegion bit is set within a crossing_info.offsets.start iff at least one object starts within + // a particular card region. We pack this bit into start byte under assumption that start byte is accessed less + // frequently than last byte. This is true when number of clean cards is greater than number of dirty cards. + static const uint16_t ObjectStartsInCardRegion = 0x80; + static const uint16_t FirstStartBits = 0x7f; + + // Check that we have enough bits to store the largest possible offset into a card for an object start. + // The value for maximum card size is based on the constraints for GCCardSizeInBytes in gc_globals.hpp. + static const int MaxCardSize = NOT_LP64(512) LP64_ONLY(1024); + STATIC_ASSERT((MaxCardSize / HeapWordSize) - 1 <= FirstStartBits); + + crossing_info *object_starts; + +public: + // If we're setting first_start, assume the card has an object. + inline void set_first_start(size_t card_index, uint8_t value) { + object_starts[card_index].offsets.first = ObjectStartsInCardRegion | value; + } + + inline void set_last_start(size_t card_index, uint8_t value) { + object_starts[card_index].offsets.last = value; + } + + inline void set_starts_object_bit(size_t card_index) { + object_starts[card_index].offsets.first |= ObjectStartsInCardRegion; + } + + inline void clear_starts_object_bit(size_t card_index) { + object_starts[card_index].offsets.first &= ~ObjectStartsInCardRegion; + } + + // Returns true iff an object is known to start within the card memory associated with card card_index. + inline bool starts_object(size_t card_index) const { + return (object_starts[card_index].offsets.first & ObjectStartsInCardRegion) != 0; + } + + inline void clear_objects_in_range(HeapWord *addr, size_t num_words) { + size_t card_index = _rs->card_index_for_addr(addr); + size_t last_card_index = _rs->card_index_for_addr(addr + num_words - 1); + while (card_index <= last_card_index) + object_starts[card_index++].short_word = 0; + } + + ShenandoahCardCluster(RememberedSet *rs) { + _rs = rs; + // TODO: We don't really need object_starts entries for every card entry. We only need these for + // the card entries that correspond to old-gen memory. But for now, let's be quick and dirty. + object_starts = NEW_C_HEAP_ARRAY(crossing_info, rs->total_cards(), mtGC); + for (size_t i = 0; i < rs->total_cards(); i++) { + object_starts[i].short_word = 0; + } + } + + ~ShenandoahCardCluster() { + FREE_C_HEAP_ARRAY(crossing_info, object_starts); + object_starts = nullptr; + } + + // There is one entry within the object_starts array for each card entry. + // + // Suppose multiple garbage objects are coalesced during GC sweep + // into a single larger "free segment". As each two objects are + // coalesced together, the start information pertaining to the second + // object must be removed from the objects_starts array. If the + // second object had been been the first object within card memory, + // the new first object is the object that follows that object if + // that starts within the same card memory, or NoObject if the + // following object starts within the following cluster. If the + // second object had been the last object in the card memory, + // replace this entry with the newly coalesced object if it starts + // within the same card memory, or with NoObject if it starts in a + // preceding card's memory. + // + // Suppose a large free segment is divided into a smaller free + // segment and a new object. The second part of the newly divided + // memory must be registered as a new object, overwriting at most + // one first_start and one last_start entry. Note that one of the + // newly divided two objects might be a new GCLAB. + // + // Suppose postprocessing of a GCLAB finds that the original GCLAB + // has been divided into N objects. Each of the N newly allocated + // objects will be registered, overwriting at most one first_start + // and one last_start entries. + // + // No object registration operations are linear in the length of + // the registered objects. + // + // Consider further the following observations regarding object + // registration costs: + // + // 1. The cost is paid once for each old-gen object (Except when + // an object is demoted and repromoted, in which case we would + // pay the cost again). + // 2. The cost can be deferred so that there is no urgency during + // mutator copy-on-first-access promotion. Background GC + // threads will update the object_starts array by post- + // processing the contents of retired PLAB buffers. + // 3. The bet is that these costs are paid relatively rarely + // because: + // a) Most objects die young and objects that die in young-gen + // memory never need to be registered with the object_starts + // array. + // b) Most objects that are promoted into old-gen memory live + // there without further relocation for a relatively long + // time, so we get a lot of benefit from each investment + // in registering an object. + +public: + + // The starting locations of objects contained within old-gen memory + // are registered as part of the remembered set implementation. This + // information is required when scanning dirty card regions that are + // spanned by objects beginning within preceding card regions. It + // is necessary to find the first and last objects that begin within + // this card region. Starting addresses of objects are required to + // find the object headers, and object headers provide information + // about which fields within the object hold addresses. + // + // The old-gen memory allocator invokes register_object() for any + // object that is allocated within old-gen memory. This identifies + // the starting addresses of objects that span boundaries between + // card regions. + // + // It is not necessary to invoke register_object at the very instant + // an object is allocated. It is only necessary to invoke it + // prior to the next start of a garbage collection concurrent mark + // or concurrent update-references phase. An "ideal" time to register + // objects is during post-processing of a GCLAB after the GCLAB is + // retired due to depletion of its memory. + // + // register_object() does not perform synchronization. In the case + // that multiple threads are registering objects whose starting + // addresses are within the same cluster, races between these + // threads may result in corruption of the object-start data + // structures. Parallel GC threads should avoid registering objects + // residing within the same cluster by adhering to the following + // coordination protocols: + // + // 1. Align thread-local GCLAB buffers with some TBD multiple of + // card clusters. The card cluster size is 32 KB. If the + // desired GCLAB size is 128 KB, align the buffer on a multiple + // of 4 card clusters. + // 2. Post-process the contents of GCLAB buffers to register the + // objects allocated therein. Allow one GC thread at a + // time to do the post-processing of each GCLAB. + // 3. Since only one GC thread at a time is registering objects + // belonging to a particular allocation buffer, no locking + // is performed when registering these objects. + // 4. Any remnant of unallocated memory within an expended GC + // allocation buffer is not returned to the old-gen allocation + // pool until after the GC allocation buffer has been post + // processed. Before any remnant memory is returned to the + // old-gen allocation pool, the GC thread that scanned this GC + // allocation buffer performs a write-commit memory barrier. + // 5. Background GC threads that perform tenuring of young-gen + // objects without a GCLAB use a CAS lock before registering + // each tenured object. The CAS lock assures both mutual + // exclusion and memory coherency/visibility. Note that an + // object tenured by a background GC thread will not overlap + // with any of the clusters that are receiving tenured objects + // by way of GCLAB buffers. Multiple independent GC threads may + // attempt to tenure objects into a shared cluster. This is why + // sychronization may be necessary. Consider the following + // scenarios: + // + // a) If two objects are tenured into the same card region, each + // registration may attempt to modify the first-start or + // last-start information associated with that card region. + // Furthermore, because the representations of first-start + // and last-start information within the object_starts array + // entry uses different bits of a shared uint_16 to represent + // each, it is necessary to lock the entire card entry + // before modifying either the first-start or last-start + // information within the entry. + // b) Suppose GC thread X promotes a tenured object into + // card region A and this tenured object spans into + // neighboring card region B. Suppose GC thread Y (not equal + // to X) promotes a tenured object into cluster B. GC thread X + // will update the object_starts information for card A. No + // synchronization is required. + // c) In summary, when background GC threads register objects + // newly tenured into old-gen memory, they must acquire a + // mutual exclusion lock on the card that holds the starting + // address of the newly tenured object. This can be achieved + // by using a CAS instruction to assure that the previous + // values of first-offset and last-offset have not been + // changed since the same thread inquired as to their most + // current values. + // + // One way to minimize the need for synchronization between + // background tenuring GC threads is for each tenuring GC thread + // to promote young-gen objects into distinct dedicated cluster + // ranges. + // 6. The object_starts information is only required during the + // starting of concurrent marking and concurrent evacuation + // phases of GC. Before we start either of these GC phases, the + // JVM enters a safe point and all GC threads perform + // commit-write barriers to assure that access to the + // object_starts information is coherent. + + + // Notes on synchronization of register_object(): + // + // 1. For efficiency, there is no locking in the implementation of register_object() + // 2. Thus, it is required that users of this service assure that concurrent/parallel invocations of + // register_object() do pertain to the same card's memory range. See discussion below to understand + // the risks. + // 3. When allocating from a TLAB or GCLAB, the mutual exclusion can be guaranteed by assuring that each + // LAB's start and end are aligned on card memory boundaries. + // 4. Use the same lock that guarantees exclusivity when performing free-list allocation within heap regions. + // + // Register the newly allocated object while we're holding the global lock since there's no synchronization + // built in to the implementation of register_object(). There are potential races when multiple independent + // threads are allocating objects, some of which might span the same card region. For example, consider + // a card table's memory region within which three objects are being allocated by three different threads: + // + // objects being "concurrently" allocated: + // [-----a------][-----b-----][--------------c------------------] + // [---- card table memory range --------------] + // + // Before any objects are allocated, this card's memory range holds no objects. Note that: + // allocation of object a wants to set the has-object, first-start, and last-start attributes of the preceding card region. + // allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region. + // allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region. + // + // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as last-start + // representing object b while first-start represents object c. This is why we need to require all register_object() + // invocations associated with objects that are allocated from "free lists" to provide their own mutual exclusion locking + // mechanism. + + // Reset the starts_object() information to false for all cards in the range between from and to. + void reset_object_range(HeapWord *from, HeapWord *to); + + // register_object() requires that the caller hold the heap lock + // before calling it. + void register_object(HeapWord* address); + + // register_object_without_lock() does not require that the caller hold + // the heap lock before calling it, under the assumption that the + // caller has assure no other thread will endeavor to concurrently + // register objects that start within the same card's memory region + // as address. + void register_object_without_lock(HeapWord* address); + + // During the reference updates phase of GC, we walk through each old-gen memory region that was + // not part of the collection set and we invalidate all unmarked objects. As part of this effort, + // we coalesce neighboring dead objects in order to make future remembered set scanning more + // efficient (since future remembered set scanning of any card region containing consecutive + // dead objects can skip over all of them at once by reading only a single dead object header + // instead of having to read the header of each of the coalesced dead objects. + // + // At some future time, we may implement a further optimization: satisfy future allocation requests + // by carving new objects out of the range of memory that represents the coalesced dead objects. + // + // Suppose we want to combine several dead objects into a single coalesced object. How does this + // impact our representation of crossing map information? + // 1. If the newly coalesced range is contained entirely within a card range, that card's last + // start entry either remains the same or it is changed to the start of the coalesced region. + // 2. For the card that holds the start of the coalesced object, it will not impact the first start + // but it may impact the last start. + // 3. For following cards spanned entirely by the newly coalesced object, it will change starts_object + // to false (and make first-start and last-start "undefined"). + // 4. For a following card that is spanned patially by the newly coalesced object, it may change + // first-start value, but it will not change the last-start value. + // + // The range of addresses represented by the arguments to coalesce_objects() must represent a range + // of memory that was previously occupied exactly by one or more previously registered objects. For + // convenience, it is legal to invoke coalesce_objects() with arguments that span a single previously + // registered object. + // + // The role of coalesce_objects is to change the crossing map information associated with all of the coalesced + // objects. + void coalesce_objects(HeapWord* address, size_t length_in_words); + + // The typical use case is going to look something like this: + // for each heapregion that comprises old-gen memory + // for each card number that corresponds to this heap region + // scan the objects contained therein if the card is dirty + // To avoid excessive lookups in a sparse array, the API queries + // the card number pertaining to a particular address and then uses the + // card number for subsequent information lookups and stores. + + // If starts_object(card_index), this returns the word offset within this card + // memory at which the first object begins. If !starts_object(card_index), the + // result is a don't care value -- asserts in a debug build. + size_t get_first_start(size_t card_index) const; + + // If starts_object(card_index), this returns the word offset within this card + // memory at which the last object begins. If !starts_object(card_index), the + // result is a don't care value. + size_t get_last_start(size_t card_index) const; + + + // Given a card_index, return the starting address of the first block in the heap + // that straddles into the card. If the card is co-initial with an object, then + // this would return the starting address of the heap that this card covers. + // Expects to be called for a card affiliated with the old generation in + // generational mode. + HeapWord* block_start(size_t card_index) const; +}; + +// ShenandoahScanRemembered is a concrete class representing the +// ability to scan the old-gen remembered set for references to +// objects residing in young-gen memory. +// +// Scanning normally begins with an invocation of numRegions and ends +// after all clusters of all regions have been scanned. +// +// Throughout the scanning effort, the number of regions does not +// change. +// +// Even though the regions that comprise old-gen memory are not +// necessarily contiguous, the abstraction represented by this class +// identifies each of the old-gen regions with an integer value +// in the range from 0 to (numRegions() - 1) inclusive. +// + +template +class ShenandoahScanRemembered: public CHeapObj { + +private: + RememberedSet* _rs; + ShenandoahCardCluster* _scc; + + // Global card stats (cumulative) + HdrSeq _card_stats_scan_rs[MAX_CARD_STAT_TYPE]; + HdrSeq _card_stats_update_refs[MAX_CARD_STAT_TYPE]; + // Per worker card stats (multiplexed by phase) + HdrSeq** _card_stats; + + // The types of card metrics that we gather + const char* _card_stats_name[MAX_CARD_STAT_TYPE] = { + "dirty_run", "clean_run", + "dirty_cards", "clean_cards", + "max_dirty_run", "max_clean_run", + "dirty_scan_objs", + "alternations" + }; + + // The statistics are collected and logged separately for + // card-scans for initial marking, and for updating refs. + const char* _card_stat_log_type[MAX_CARD_STAT_LOG_TYPE] = { + "Scan Remembered Set", "Update Refs" + }; + + int _card_stats_log_counter[2] = {0, 0}; + +public: + // How to instantiate this object? + // ShenandoahDirectCardMarkRememberedSet *rs = + // new ShenandoahDirectCardMarkRememberedSet(); + // scr = new + // ShenandoahScanRememberd(rs); + // + // or, after the planned implementation of + // ShenandoahBufferWithSATBRememberedSet has been completed: + // + // ShenandoahBufferWithSATBRememberedSet *rs = + // new ShenandoahBufferWithSATBRememberedSet(); + // scr = new + // ShenandoahScanRememberd(rs); + + + ShenandoahScanRemembered(RememberedSet *rs) { + _rs = rs; + _scc = new ShenandoahCardCluster(rs); + + // We allocate ParallelGCThreads worth even though we usually only + // use up to ConcGCThreads, because degenerate collections may employ + // ParallelGCThreads for remembered set scanning. + if (ShenandoahEnableCardStats) { + _card_stats = NEW_C_HEAP_ARRAY(HdrSeq*, ParallelGCThreads, mtGC); + for (uint i = 0; i < ParallelGCThreads; i++) { + _card_stats[i] = new HdrSeq[MAX_CARD_STAT_TYPE]; + } + } else { + _card_stats = nullptr; + } + } + + ~ShenandoahScanRemembered() { + delete _scc; + if (ShenandoahEnableCardStats) { + for (uint i = 0; i < ParallelGCThreads; i++) { + delete _card_stats[i]; + } + FREE_C_HEAP_ARRAY(HdrSeq*, _card_stats); + _card_stats = nullptr; + } + assert(_card_stats == nullptr, "Error"); + } + + HdrSeq* card_stats(uint worker_id) { + assert(worker_id < ParallelGCThreads, "Error"); + assert(ShenandoahEnableCardStats == (_card_stats != nullptr), "Error"); + return ShenandoahEnableCardStats ? _card_stats[worker_id] : nullptr; + } + + HdrSeq* card_stats_for_phase(CardStatLogType t) { + switch (t) { + case CARD_STAT_SCAN_RS: + return _card_stats_scan_rs; + case CARD_STAT_UPDATE_REFS: + return _card_stats_update_refs; + default: + guarantee(false, "No such CardStatLogType"); + } + return nullptr; // Quiet compiler + } + + // TODO: We really don't want to share all of these APIs with arbitrary consumers of the ShenandoahScanRemembered abstraction. + // But in the spirit of quick and dirty for the time being, I'm going to go ahead and publish everything for right now. Some + // of existing code already depends on having access to these services (because existing code has not been written to honor + // full abstraction of remembered set scanning. In the not too distant future, we want to try to make most, if not all, of + // these services private. Two problems with publicizing: + // 1. Allowing arbitrary users to reach beneath the hood allows the users to make assumptions about underlying implementation. + // This will make it more difficult to change underlying implementation at a future time, such as when we eventually experiment + // with SATB-based implementation of remembered set representation. + // 2. If we carefully control sharing of certain of these services, we can reduce the overhead of synchronization by assuring + // that all users follow protocols that avoid contention that might require synchronization. When we publish these APIs, we + // lose control over who and how the data is accessed. As a result, we are required to insert more defensive measures into + // the implementation, including synchronization locks. + + + // Card index is zero-based relative to first spanned card region. + size_t last_valid_index(); + size_t total_cards(); + size_t card_index_for_addr(HeapWord *p); + HeapWord *addr_for_card_index(size_t card_index); + bool is_card_dirty(size_t card_index); + bool is_write_card_dirty(size_t card_index) { return _rs->is_write_card_dirty(card_index); } + void mark_card_as_dirty(size_t card_index); + void mark_range_as_dirty(size_t card_index, size_t num_cards); + void mark_card_as_clean(size_t card_index); + void mark_range_as_clean(size_t card_index, size_t num_cards); + bool is_card_dirty(HeapWord *p); + void mark_card_as_dirty(HeapWord *p); + void mark_range_as_dirty(HeapWord *p, size_t num_heap_words); + void mark_card_as_clean(HeapWord *p); + void mark_range_as_clean(HeapWord *p, size_t num_heap_words); + size_t cluster_count(); + + // Called by GC thread at start of concurrent mark to exchange roles of read and write remembered sets. + void swap_remset() { _rs->swap_remset(); } + + void reset_remset(HeapWord* start, size_t word_count) { _rs->reset_remset(start, word_count); } + + void merge_write_table(HeapWord* start, size_t word_count) { _rs->merge_write_table(start, word_count); } + + // Called by GC thread after scanning old remembered set in order to prepare for next GC pass + void clear_old_remset() { _rs->clear_old_remset(); } + + size_t cluster_for_addr(HeapWord *addr); + HeapWord* addr_for_cluster(size_t cluster_no); + + void reset_object_range(HeapWord *from, HeapWord *to); + void register_object(HeapWord *addr); + void register_object_without_lock(HeapWord *addr); + void coalesce_objects(HeapWord *addr, size_t length_in_words); + + HeapWord* first_object_in_card(size_t card_index) { + if (_scc->starts_object(card_index)) { + return addr_for_card_index(card_index) + _scc->get_first_start(card_index); + } else { + return nullptr; + } + } + + // Return true iff this object is "properly" registered. + bool verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx); + + // clear the cards to clean, and clear the object_starts info to no objects + void mark_range_as_empty(HeapWord *addr, size_t length_in_words); + + // process_clusters() scans a portion of the remembered set + // for references from old gen into young. Several worker threads + // scan different portions of the remembered set by making parallel invocations + // of process_clusters() with each invocation scanning different + // "clusters" of the remembered set. + // + // An invocation of process_clusters() examines all of the + // intergenerational references spanned by `count` clusters starting + // with `first_cluster`. The `oops` argument is a worker-thread-local + // OopClosure that is applied to all "valid" references in the remembered set. + // + // A side-effect of executing process_clusters() is to update the remembered + // set entries (e.g. marking dirty cards clean if they no longer + // hold references to young-gen memory). + // + // An implementation of process_clusters() may choose to efficiently + // address more typical scenarios in the structure of remembered sets. E.g. + // in the generational setting, one might expect remembered sets to be very sparse + // (low mutation rates in the old generation leading to sparse dirty cards, + // each with very few intergenerational pointers). Specific implementations + // may choose to degrade gracefully as the sparsity assumption fails to hold, + // such as when there are sudden spikes in (premature) promotion or in the + // case of an underprovisioned, poorly-tuned, or poorly-shaped heap. + // + // At the start of a concurrent young generation marking cycle, we invoke process_clusters + // with ClosureType ShenandoahInitMarkRootsClosure. + // + // At the start of a concurrent evacuation phase, we invoke process_clusters with + // ClosureType ShenandoahEvacuateUpdateRootsClosure. + + // All template expansions require methods to be defined in the inline.hpp file, but larger + // such methods need not be declared as inline. + template + void process_clusters(size_t first_cluster, size_t count, HeapWord *end_of_range, ClosureType *oops, + bool use_write_table, uint worker_id); + + template + inline void process_humongous_clusters(ShenandoahHeapRegion* r, size_t first_cluster, size_t count, + HeapWord *end_of_range, ClosureType *oops, bool use_write_table); + + template + inline void process_region_slice(ShenandoahHeapRegion* region, size_t offset, size_t clusters, HeapWord* end_of_range, + ClosureType *cl, bool use_write_table, uint worker_id); + + // To Do: + // Create subclasses of ShenandoahInitMarkRootsClosure and + // ShenandoahEvacuateUpdateRootsClosure and any other closures + // that need to participate in remembered set scanning. Within the + // subclasses, add a (probably templated) instance variable that + // refers to the associated ShenandoahCardCluster object. Use this + // ShenandoahCardCluster instance to "enhance" the do_oops + // processing so that we can: + // + // 1. Avoid processing references that correspond to clean card + // regions, and + // 2. Set card status to CLEAN when the associated card region no + // longer holds inter-generatioanal references. + // + // To enable efficient implementation of these behaviors, we + // probably also want to add a few fields into the + // ShenandoahCardCluster object that allow us to precompute and + // remember the addresses at which card status is going to change + // from dirty to clean and clean to dirty. The do_oops + // implementations will want to update this value each time they + // cross one of these boundaries. + void roots_do(OopIterateClosure* cl); + + // Log stats related to card/RS stats for given phase t + void log_card_stats(uint nworkers, CardStatLogType t) PRODUCT_RETURN; +private: + // Log stats for given worker id related into given summary card/RS stats + void log_worker_card_stats(uint worker_id, HdrSeq* sum_stats) PRODUCT_RETURN; + + // Log given stats + inline void log_card_stats(HdrSeq* stats) PRODUCT_RETURN; + + // Merge the stats from worked_id into the given summary stats, and clear the worker_id's stats. + void merge_worker_card_stats_cumulative(HdrSeq* worker_stats, HdrSeq* sum_stats) PRODUCT_RETURN; +}; + + +// A ShenandoahRegionChunk represents a contiguous interval of a ShenandoahHeapRegion, typically representing +// work to be done by a worker thread. +struct ShenandoahRegionChunk { + ShenandoahHeapRegion *_r; // The region of which this represents a chunk + size_t _chunk_offset; // HeapWordSize offset + size_t _chunk_size; // HeapWordSize qty +}; + +// ShenandoahRegionChunkIterator divides the total remembered set scanning effort into ShenandoahRegionChunks +// that are assigned one at a time to worker threads. (Here, we use the terms `assignments` and `chunks` +// interchangeably.) Note that the effort required to scan a range of memory is not necessarily a linear +// function of the size of the range. Some memory ranges hold only a small number of live objects. +// Some ranges hold primarily primitive (non-pointer) data. We start with larger chunk sizes because larger chunks +// reduce coordination overhead. We expect that the GC worker threads that receive more difficult assignments +// will work longer on those chunks. Meanwhile, other worker will threads repeatedly accept and complete multiple +// easier chunks. As the total amount of work remaining to be completed decreases, we decrease the size of chunks +// given to individual threads. This reduces the likelihood of significant imbalance between worker thread assignments +// when there is less meaningful work to be performed by the remaining worker threads while they wait for +// worker threads with difficult assignments to finish, reducing the overall duration of the phase. + +class ShenandoahRegionChunkIterator : public StackObj { +private: + // The largest chunk size is 4 MiB, measured in words. Otherwise, remembered set scanning may become too unbalanced. + // If the largest chunk size is too small, there is too much overhead sifting out assignments to individual worker threads. + static const size_t _maximum_chunk_size_words = (4 * 1024 * 1024) / HeapWordSize; + + static const size_t _clusters_in_smallest_chunk = 4; + + // smallest_chunk_size is 4 clusters. Each cluster spans 128 KiB. + // This is computed from CardTable::card_size_in_words() * + // ShenandoahCardCluster::CardsPerCluster; + static size_t smallest_chunk_size_words() { + return _clusters_in_smallest_chunk * CardTable::card_size_in_words() * + ShenandoahCardCluster::CardsPerCluster; + } + + // The total remembered set scanning effort is divided into chunks of work that are assigned to individual worker tasks. + // The chunks of assigned work are divided into groups, where the size of the typical group (_regular_group_size) is half the + // total number of regions. The first group may be larger than + // _regular_group_size in the case that the first group's chunk + // size is less than the region size. The last group may be larger + // than _regular_group_size because no group is allowed to + // have smaller assignments than _smallest_chunk_size, which is 128 KB. + + // Under normal circumstances, no configuration needs more than _maximum_groups (default value of 16). + // The first group "effectively" processes chunks of size 1 MiB (or smaller for smaller region sizes). + // The last group processes chunks of size 128 KiB. There are four groups total. + + // group[0] is 4 MiB chunk size (_maximum_chunk_size_words) + // group[1] is 2 MiB chunk size + // group[2] is 1 MiB chunk size + // group[3] is 512 KiB chunk size + // group[4] is 256 KiB chunk size + // group[5] is 128 Kib shunk size (_smallest_chunk_size_words = 4 * 64 * 64 + static const size_t _maximum_groups = 6; + + const ShenandoahHeap* _heap; + + const size_t _regular_group_size; // Number of chunks in each group + const size_t _first_group_chunk_size_b4_rebalance; + const size_t _num_groups; // Number of groups in this configuration + const size_t _total_chunks; + + shenandoah_padding(0); + volatile size_t _index; + shenandoah_padding(1); + + size_t _region_index[_maximum_groups]; // The region index for the first region spanned by this group + size_t _group_offset[_maximum_groups]; // The offset at which group begins within first region spanned by this group + size_t _group_chunk_size[_maximum_groups]; // The size of each chunk within this group + size_t _group_entries[_maximum_groups]; // Total chunks spanned by this group and the ones before it. + + // No implicit copying: iterators should be passed by reference to capture the state + NONCOPYABLE(ShenandoahRegionChunkIterator); + + // Makes use of _heap. + size_t calc_regular_group_size(); + + // Makes use of _regular_group_size, which must be initialized before call. + size_t calc_first_group_chunk_size_b4_rebalance(); + + // Makes use of _regular_group_size and _first_group_chunk_size_b4_rebalance, both of which must be initialized before call. + size_t calc_num_groups(); + + // Makes use of _regular_group_size, _first_group_chunk_size_b4_rebalance, which must be initialized before call. + size_t calc_total_chunks(); + +public: + ShenandoahRegionChunkIterator(size_t worker_count); + ShenandoahRegionChunkIterator(ShenandoahHeap* heap, size_t worker_count); + + // Reset iterator to default state + void reset(); + + // Fills in assignment with next chunk of work and returns true iff there is more work. + // Otherwise, returns false. This is multi-thread-safe. + inline bool next(struct ShenandoahRegionChunk *assignment); + + // This is *not* MT safe. However, in the absence of multithreaded access, it + // can be used to determine if there is more work to do. + inline bool has_next() const; +}; + +typedef ShenandoahScanRemembered RememberedScanner; + +class ShenandoahScanRememberedTask : public WorkerTask { + private: + ShenandoahObjToScanQueueSet* _queue_set; + ShenandoahObjToScanQueueSet* _old_queue_set; + ShenandoahReferenceProcessor* _rp; + ShenandoahRegionChunkIterator* _work_list; + bool _is_concurrent; + + public: + ShenandoahScanRememberedTask(ShenandoahObjToScanQueueSet* queue_set, + ShenandoahObjToScanQueueSet* old_queue_set, + ShenandoahReferenceProcessor* rp, + ShenandoahRegionChunkIterator* work_list, + bool is_concurrent); + + void work(uint worker_id); + void do_work(uint worker_id); +}; + + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBERED_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp new file mode 100644 index 00000000000..7c7b030da0d --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp @@ -0,0 +1,1013 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP + +#include "memory/iterator.hpp" +#include "oops/oop.hpp" +#include "oops/objArrayOop.hpp" +#include "gc/shared/collectorCounters.hpp" +#include "gc/shenandoah/shenandoahCardStats.hpp" +#include "gc/shenandoah/shenandoahCardTable.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahScanRemembered.hpp" +#include "gc/shenandoah/mode/shenandoahMode.hpp" + +inline size_t +ShenandoahDirectCardMarkRememberedSet::last_valid_index() const { + return _card_table->last_valid_index(); +} + +inline size_t +ShenandoahDirectCardMarkRememberedSet::total_cards() const { + return _total_card_count; +} + +inline size_t +ShenandoahDirectCardMarkRememberedSet::card_index_for_addr(HeapWord *p) const { + return _card_table->index_for(p); +} + +inline HeapWord* +ShenandoahDirectCardMarkRememberedSet::addr_for_card_index(size_t card_index) const { + return _whole_heap_base + CardTable::card_size_in_words() * card_index; +} + +inline const CardValue* +ShenandoahDirectCardMarkRememberedSet::get_card_table_byte_map(bool use_write_table) const { + return use_write_table ? + _card_table->write_byte_map() + : _card_table->read_byte_map(); +} + +inline bool +ShenandoahDirectCardMarkRememberedSet::is_write_card_dirty(size_t card_index) const { + CardValue* bp = &(_card_table->write_byte_map())[card_index]; + return (bp[0] == CardTable::dirty_card_val()); +} + +inline bool +ShenandoahDirectCardMarkRememberedSet::is_card_dirty(size_t card_index) const { + CardValue* bp = &(_card_table->read_byte_map())[card_index]; + return (bp[0] == CardTable::dirty_card_val()); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(size_t card_index) { + CardValue* bp = &(_card_table->write_byte_map())[card_index]; + bp[0] = CardTable::dirty_card_val(); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(size_t card_index, size_t num_cards) { + CardValue* bp = &(_card_table->write_byte_map())[card_index]; + while (num_cards-- > 0) { + *bp++ = CardTable::dirty_card_val(); + } +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(size_t card_index) { + CardValue* bp = &(_card_table->write_byte_map())[card_index]; + bp[0] = CardTable::clean_card_val(); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(size_t card_index, size_t num_cards) { + CardValue* bp = &(_card_table->write_byte_map())[card_index]; + while (num_cards-- > 0) { + *bp++ = CardTable::clean_card_val(); + } +} + +inline bool +ShenandoahDirectCardMarkRememberedSet::is_card_dirty(HeapWord *p) const { + size_t index = card_index_for_addr(p); + CardValue* bp = &(_card_table->read_byte_map())[index]; + return (bp[0] == CardTable::dirty_card_val()); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_card_as_dirty(HeapWord *p) { + size_t index = card_index_for_addr(p); + CardValue* bp = &(_card_table->write_byte_map())[index]; + bp[0] = CardTable::dirty_card_val(); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) { + CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift]; + CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift]; + // If (p + num_heap_words) is not aligned on card boundary, we also need to dirty last card. + if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) { + end_bp++; + } + while (bp < end_bp) { + *bp++ = CardTable::dirty_card_val(); + } +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_card_as_clean(HeapWord *p) { + size_t index = card_index_for_addr(p); + CardValue* bp = &(_card_table->write_byte_map())[index]; + bp[0] = CardTable::clean_card_val(); +} + +inline void +ShenandoahDirectCardMarkRememberedSet::mark_range_as_clean(HeapWord *p, size_t num_heap_words) { + CardValue* bp = &(_card_table->write_byte_map_base())[uintptr_t(p) >> _card_shift]; + CardValue* end_bp = &(_card_table->write_byte_map_base())[uintptr_t(p + num_heap_words) >> _card_shift]; + // If (p + num_heap_words) is not aligned on card boundary, we also need to clean last card. + if (((unsigned long long) (p + num_heap_words)) & (CardTable::card_size() - 1)) { + end_bp++; + } + while (bp < end_bp) { + *bp++ = CardTable::clean_card_val(); + } +} + +inline size_t +ShenandoahDirectCardMarkRememberedSet::cluster_count() const { + return _cluster_count; +} + +// No lock required because arguments align with card boundaries. +template +inline void +ShenandoahCardCluster::reset_object_range(HeapWord* from, HeapWord* to) { + assert(((((unsigned long long) from) & (CardTable::card_size() - 1)) == 0) && + ((((unsigned long long) to) & (CardTable::card_size() - 1)) == 0), + "reset_object_range bounds must align with card boundaries"); + size_t card_at_start = _rs->card_index_for_addr(from); + size_t num_cards = (to - from) / CardTable::card_size_in_words(); + + for (size_t i = 0; i < num_cards; i++) { + object_starts[card_at_start + i].short_word = 0; + } +} + +// Assume only one thread at a time registers objects pertaining to +// each card-table entry's range of memory. +template +inline void +ShenandoahCardCluster::register_object(HeapWord* address) { + shenandoah_assert_heaplocked(); + + register_object_without_lock(address); +} + +template +inline void +ShenandoahCardCluster::register_object_without_lock(HeapWord* address) { + size_t card_at_start = _rs->card_index_for_addr(address); + HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start); + uint8_t offset_in_card = address - card_start_address; + + if (!starts_object(card_at_start)) { + set_starts_object_bit(card_at_start); + set_first_start(card_at_start, offset_in_card); + set_last_start(card_at_start, offset_in_card); + } else { + if (offset_in_card < get_first_start(card_at_start)) + set_first_start(card_at_start, offset_in_card); + if (offset_in_card > get_last_start(card_at_start)) + set_last_start(card_at_start, offset_in_card); + } +} + +template +inline void +ShenandoahCardCluster::coalesce_objects(HeapWord* address, size_t length_in_words) { + + size_t card_at_start = _rs->card_index_for_addr(address); + HeapWord *card_start_address = _rs->addr_for_card_index(card_at_start); + size_t card_at_end = card_at_start + ((address + length_in_words) - card_start_address) / CardTable::card_size_in_words(); + + if (card_at_start == card_at_end) { + // There are no changes to the get_first_start array. Either get_first_start(card_at_start) returns this coalesced object, + // or it returns an object that precedes the coalesced object. + if (card_start_address + get_last_start(card_at_start) < address + length_in_words) { + uint8_t coalesced_offset = static_cast(address - card_start_address); + // The object that used to be the last object starting within this card is being subsumed within the coalesced + // object. Since we always coalesce entire objects, this condition only occurs if the last object ends before or at + // the end of the card's memory range and there is no object following this object. In this case, adjust last_start + // to represent the start of the coalesced range. + set_last_start(card_at_start, coalesced_offset); + } + // Else, no changes to last_starts information. Either get_last_start(card_at_start) returns the object that immediately + // follows the coalesced object, or it returns an object that follows the object immediately following the coalesced object. + } else { + uint8_t coalesced_offset = static_cast(address - card_start_address); + if (get_last_start(card_at_start) > coalesced_offset) { + // Existing last start is being coalesced, create new last start + set_last_start(card_at_start, coalesced_offset); + } + // otherwise, get_last_start(card_at_start) must equal coalesced_offset + + // All the cards between first and last get cleared. + for (size_t i = card_at_start + 1; i < card_at_end; i++) { + clear_starts_object_bit(i); + } + + uint8_t follow_offset = static_cast((address + length_in_words) - _rs->addr_for_card_index(card_at_end)); + if (starts_object(card_at_end) && (get_first_start(card_at_end) < follow_offset)) { + // It may be that after coalescing within this last card's memory range, the last card + // no longer holds an object. + if (get_last_start(card_at_end) >= follow_offset) { + set_first_start(card_at_end, follow_offset); + } else { + // last_start is being coalesced so this card no longer has any objects. + clear_starts_object_bit(card_at_end); + } + } + // else + // card_at_end did not have an object, so it still does not have an object, or + // card_at_end had an object that starts after the coalesced object, so no changes required for card_at_end + + } +} + + +template +inline size_t +ShenandoahCardCluster::get_first_start(size_t card_index) const { + assert(starts_object(card_index), "Can't get first start because no object starts here"); + return object_starts[card_index].offsets.first & FirstStartBits; +} + +template +inline size_t +ShenandoahCardCluster::get_last_start(size_t card_index) const { + assert(starts_object(card_index), "Can't get last start because no object starts here"); + return object_starts[card_index].offsets.last; +} + +// Given a card_index, return the starting address of the first block in the heap +// that straddles into this card. If this card is co-initial with an object, then +// this would return the first address of the range that this card covers, which is +// where the card's first object also begins. +// TODO: collect some stats for the size of walks backward over cards. +// For larger objects, a logarithmic BOT such as used by G1 might make the +// backwards walk potentially faster. +template +HeapWord* +ShenandoahCardCluster::block_start(const size_t card_index) const { + + HeapWord* left = _rs->addr_for_card_index(card_index); + +#ifdef ASSERT + assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode"); + ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left); + assert(region->is_old(), "Do not use for young regions"); + // For HumongousRegion:s it's more efficient to jump directly to the + // start region. + assert(!region->is_humongous(), "Use region->humongous_start_region() instead"); +#endif + if (starts_object(card_index) && get_first_start(card_index) == 0) { + // This card contains a co-initial object; a fortiori, it covers + // also the case of a card being the first in a region. + assert(oopDesc::is_oop(cast_to_oop(left)), "Should be an object"); + return left; + } + + HeapWord* p = nullptr; + oop obj = cast_to_oop(p); + ssize_t cur_index = (ssize_t)card_index; + assert(cur_index >= 0, "Overflow"); + assert(cur_index > 0, "Should have returned above"); + // Walk backwards over the cards... + while (--cur_index > 0 && !starts_object(cur_index)) { + // ... to the one that starts the object + } + // cur_index should start an object: we should not have walked + // past the left end of the region. + assert(cur_index >= 0 && (cur_index <= (ssize_t)card_index), "Error"); + assert(region->bottom() <= _rs->addr_for_card_index(cur_index), + "Fell off the bottom of containing region"); + assert(starts_object(cur_index), "Error"); + size_t offset = get_last_start(cur_index); + // can avoid call via card size arithmetic below instead + p = _rs->addr_for_card_index(cur_index) + offset; + // Recall that we already dealt with the co-initial object case above + assert(p < left, "obj should start before left"); + // While it is safe to ask an object its size in the loop that + // follows, the (ifdef'd out) loop should never be needed. + // 1. we ask this question only for regions in the old generation + // 2. there is no direct allocation ever by mutators in old generation + // regions. Only GC will ever allocate in old regions, and then + // too only during promotion/evacuation phases. Thus there is no danger + // of races between reading from and writing to the object start array, + // or of asking partially initialized objects their size (in the loop below). + // 3. only GC asks this question during phases when it is not concurrently + // evacuating/promoting, viz. during concurrent root scanning (before + // the evacuation phase) and during concurrent update refs (after the + // evacuation phase) of young collections. This is never called + // during old or global collections. + // 4. Every allocation under TAMS updates the object start array. + NOT_PRODUCT(obj = cast_to_oop(p);) + assert(oopDesc::is_oop(obj), "Should be an object"); +#define WALK_FORWARD_IN_BLOCK_START false + while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) { + p += obj->size(); + } +#undef WALK_FORWARD_IN_BLOCK_START // false + assert(p + obj->size() > left, "obj should end after left"); + return p; +} + +template +inline size_t +ShenandoahScanRemembered::last_valid_index() { return _rs->last_valid_index(); } + +template +inline size_t +ShenandoahScanRemembered::total_cards() { return _rs->total_cards(); } + +template +inline size_t +ShenandoahScanRemembered::card_index_for_addr(HeapWord *p) { return _rs->card_index_for_addr(p); } + +template +inline HeapWord * +ShenandoahScanRemembered::addr_for_card_index(size_t card_index) { return _rs->addr_for_card_index(card_index); } + +template +inline bool +ShenandoahScanRemembered::is_card_dirty(size_t card_index) { return _rs->is_card_dirty(card_index); } + +template +inline void +ShenandoahScanRemembered::mark_card_as_dirty(size_t card_index) { _rs->mark_card_as_dirty(card_index); } + +template +inline void +ShenandoahScanRemembered::mark_range_as_dirty(size_t card_index, size_t num_cards) { _rs->mark_range_as_dirty(card_index, num_cards); } + +template +inline void +ShenandoahScanRemembered::mark_card_as_clean(size_t card_index) { _rs->mark_card_as_clean(card_index); } + +template +inline void +ShenandoahScanRemembered::mark_range_as_clean(size_t card_index, size_t num_cards) { _rs->mark_range_as_clean(card_index, num_cards); } + +template +inline bool +ShenandoahScanRemembered::is_card_dirty(HeapWord *p) { return _rs->is_card_dirty(p); } + +template +inline void +ShenandoahScanRemembered::mark_card_as_dirty(HeapWord *p) { _rs->mark_card_as_dirty(p); } + +template +inline void +ShenandoahScanRemembered::mark_range_as_dirty(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_dirty(p, num_heap_words); } + +template +inline void +ShenandoahScanRemembered::mark_card_as_clean(HeapWord *p) { _rs->mark_card_as_clean(p); } + +template +inline void +ShenandoahScanRemembered:: mark_range_as_clean(HeapWord *p, size_t num_heap_words) { _rs->mark_range_as_clean(p, num_heap_words); } + +template +inline size_t +ShenandoahScanRemembered::cluster_count() { return _rs->cluster_count(); } + +template +inline void +ShenandoahScanRemembered::reset_object_range(HeapWord *from, HeapWord *to) { + _scc->reset_object_range(from, to); +} + +template +inline void +ShenandoahScanRemembered::register_object(HeapWord *addr) { + _scc->register_object(addr); +} + +template +inline void +ShenandoahScanRemembered::register_object_without_lock(HeapWord *addr) { + _scc->register_object_without_lock(addr); +} + +template +inline bool +ShenandoahScanRemembered::verify_registration(HeapWord* address, ShenandoahMarkingContext* ctx) { + + size_t index = card_index_for_addr(address); + if (!_scc->starts_object(index)) { + return false; + } + HeapWord* base_addr = addr_for_card_index(index); + size_t offset = _scc->get_first_start(index); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // Verify that I can find this object within its enclosing card by scanning forward from first_start. + while (base_addr + offset < address) { + oop obj = cast_to_oop(base_addr + offset); + if (!ctx || ctx->is_marked(obj)) { + offset += obj->size(); + } else { + // If this object is not live, don't trust its size(); all objects above tams are live. + ShenandoahHeapRegion* r = heap->heap_region_containing(obj); + HeapWord* tams = ctx->top_at_mark_start(r); + offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr; + } + } + if (base_addr + offset != address){ + return false; + } + + // At this point, offset represents object whose registration we are verifying. We know that at least this object resides + // within this card's memory. + + // Make sure that last_offset is properly set for the enclosing card, but we can't verify this for + // candidate collection-set regions during mixed evacuations, so disable this check in general + // during mixed evacuations. + + ShenandoahHeapRegion* r = heap->heap_region_containing(base_addr + offset); + size_t max_offset = r->top() - base_addr; + if (max_offset > CardTable::card_size_in_words()) { + max_offset = CardTable::card_size_in_words(); + } + size_t prev_offset; + if (!ctx) { + do { + oop obj = cast_to_oop(base_addr + offset); + prev_offset = offset; + offset += obj->size(); + } while (offset < max_offset); + if (_scc->get_last_start(index) != prev_offset) { + return false; + } + + // base + offset represents address of first object that starts on following card, if there is one. + + // Notes: base_addr is addr_for_card_index(index) + // base_addr + offset is end of the object we are verifying + // cannot use card_index_for_addr(base_addr + offset) because it asserts arg < end of whole heap + size_t end_card_index = index + offset / CardTable::card_size_in_words(); + + if (end_card_index > index && end_card_index <= _rs->last_valid_index()) { + // If there is a following object registered on the next card, it should begin where this object ends. + if (_scc->starts_object(end_card_index) && + ((addr_for_card_index(end_card_index) + _scc->get_first_start(end_card_index)) != (base_addr + offset))) { + return false; + } + } + + // Assure that no other objects are registered "inside" of this one. + for (index++; index < end_card_index; index++) { + if (_scc->starts_object(index)) { + return false; + } + } + } else { + // This is a mixed evacuation or a global collect: rely on mark bits to identify which objects need to be properly registered + assert(!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Cannot rely on mark context here."); + // If the object reaching or spanning the end of this card's memory is marked, then last_offset for this card + // should represent this object. Otherwise, last_offset is a don't care. + ShenandoahHeapRegion* region = heap->heap_region_containing(base_addr + offset); + HeapWord* tams = ctx->top_at_mark_start(region); + oop last_obj = nullptr; + do { + oop obj = cast_to_oop(base_addr + offset); + if (ctx->is_marked(obj)) { + prev_offset = offset; + offset += obj->size(); + last_obj = obj; + } else { + offset = ctx->get_next_marked_addr(base_addr + offset, tams) - base_addr; + // If there are no marked objects remaining in this region, offset equals tams - base_addr. If this offset is + // greater than max_offset, we will immediately exit this loop. Otherwise, the next iteration of the loop will + // treat the object at offset as marked and live (because address >= tams) and we will continue iterating object + // by consulting the size() fields of each. + } + } while (offset < max_offset); + if (last_obj != nullptr && prev_offset + last_obj->size() >= max_offset) { + // last marked object extends beyond end of card + if (_scc->get_last_start(index) != prev_offset) { + return false; + } + // otherwise, the value of _scc->get_last_start(index) is a don't care because it represents a dead object and we + // cannot verify its context + } + } + return true; +} + +template +inline void +ShenandoahScanRemembered::coalesce_objects(HeapWord *addr, size_t length_in_words) { + _scc->coalesce_objects(addr, length_in_words); +} + +template +inline void +ShenandoahScanRemembered::mark_range_as_empty(HeapWord *addr, size_t length_in_words) { + _rs->mark_range_as_clean(addr, length_in_words); + _scc->clear_objects_in_range(addr, length_in_words); +} + +// Process all objects starting within count clusters beginning with first_cluster and for which the start address is +// less than end_of_range. For any non-array object whose header lies on a dirty card, scan the entire object, +// even if its end reaches beyond end_of_range. Object arrays, on the other hand, are precisely dirtied and +// only the portions of the array on dirty cards need to be scanned. +// +// Do not CANCEL within process_clusters. It is assumed that if a worker thread accepts responsibility for processing +// a chunk of work, it will finish the work it starts. Otherwise, the chunk of work will be lost in the transition to +// degenerated execution, leading to dangling references. +template +template +void ShenandoahScanRemembered::process_clusters(size_t first_cluster, size_t count, HeapWord* end_of_range, + ClosureType* cl, bool use_write_table, uint worker_id) { + + // If old-gen evacuation is active, then MarkingContext for old-gen heap regions is valid. We use the MarkingContext + // bits to determine which objects within a DIRTY card need to be scanned. This is necessary because old-gen heap + // regions that are in the candidate collection set have not been coalesced and filled. Thus, these heap regions + // may contain zombie objects. Zombie objects are known to be dead, but have not yet been "collected". Scanning + // zombie objects is unsafe because the Klass pointer is not reliable, objects referenced from a zombie may have been + // collected (if dead), or relocated (if live), or if dead but not yet collected, we don't want to "revive" them + // by marking them (when marking) or evacuating them (when updating references). + + // start and end addresses of range of objects to be scanned, clipped to end_of_range + const size_t start_card_index = first_cluster * ShenandoahCardCluster::CardsPerCluster; + const HeapWord* start_addr = _rs->addr_for_card_index(start_card_index); + // clip at end_of_range (exclusive) + HeapWord* end_addr = MIN2(end_of_range, (HeapWord*)start_addr + (count * ShenandoahCardCluster::CardsPerCluster + * CardTable::card_size_in_words())); + assert(start_addr < end_addr, "Empty region?"); + + const size_t whole_cards = (end_addr - start_addr + CardTable::card_size_in_words() - 1)/CardTable::card_size_in_words(); + const size_t end_card_index = start_card_index + whole_cards - 1; + log_debug(gc, remset)("Worker %u: cluster = " SIZE_FORMAT " count = " SIZE_FORMAT " eor = " INTPTR_FORMAT + " start_addr = " INTPTR_FORMAT " end_addr = " INTPTR_FORMAT " cards = " SIZE_FORMAT, + worker_id, first_cluster, count, p2i(end_of_range), p2i(start_addr), p2i(end_addr), whole_cards); + + // use_write_table states whether we are using the card table that is being + // marked by the mutators. If false, we are using a snapshot of the card table + // that is not subject to modifications. Even when this arg is true, and + // the card table is being actively marked, SATB marking ensures that we need not + // worry about cards marked after the processing here has passed them. + const CardValue* const ctbm = _rs->get_card_table_byte_map(use_write_table); + + // If old gen evacuation is active, ctx will hold the completed marking of + // old generation objects. We'll only scan objects that are marked live by + // the old generation marking. These include objects allocated since the + // start of old generation marking (being those above TAMS). + const ShenandoahHeap* heap = ShenandoahHeap::heap(); + const ShenandoahMarkingContext* ctx = heap->is_old_bitmap_stable() ? + heap->marking_context() : nullptr; + + // The region we will scan is the half-open interval [start_addr, end_addr), + // and lies entirely within a single region. + const ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(start_addr); + assert(region->contains(end_addr - 1), "Slice shouldn't cross regions"); + + // This code may have implicit assumptions of examining only old gen regions. + assert(region->is_old(), "We only expect to be processing old regions"); + assert(!region->is_humongous(), "Humongous regions can be processed more efficiently;" + "see process_humongous_clusters()"); + // tams and ctx below are for old generation marking. As such, young gen roots must + // consider everything above tams, since it doesn't represent a TAMS for young gen's + // SATB marking. + const HeapWord* tams = (ctx == nullptr ? region->bottom() : ctx->top_at_mark_start(region)); + + NOT_PRODUCT(ShenandoahCardStats stats(whole_cards, card_stats(worker_id));) + + // In the case of imprecise marking, we remember the lowest address + // scanned in a range of dirty cards, as we work our way left from the + // highest end_addr. This serves as another upper bound on the address we will + // scan as we move left over each contiguous range of dirty cards. + HeapWord* upper_bound = nullptr; + + // Starting at the right end of the address range, walk backwards accumulating + // a maximal dirty range of cards, then process those cards. + ssize_t cur_index = (ssize_t) end_card_index; + assert(cur_index >= 0, "Overflow"); + assert(((ssize_t)start_card_index) >= 0, "Overflow"); + while (cur_index >= (ssize_t)start_card_index) { + + // We'll continue the search starting with the card for the upper bound + // address identified by the last dirty range that we processed, if any, + // skipping any cards at higher addresses. + if (upper_bound != nullptr) { + ssize_t right_index = _rs->card_index_for_addr(upper_bound); + assert(right_index >= 0, "Overflow"); + cur_index = MIN2(cur_index, right_index); + assert(upper_bound < end_addr, "Program logic"); + end_addr = upper_bound; // lower end_addr + upper_bound = nullptr; // and clear upper_bound + if (end_addr <= start_addr) { + assert(right_index <= (ssize_t)start_card_index, "Program logic"); + // We are done with our cluster + return; + } + } + + if (ctbm[cur_index] == CardTable::dirty_card_val()) { + // ==== BEGIN DIRTY card range processing ==== + + const size_t dirty_r = cur_index; // record right end of dirty range (inclusive) + while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::dirty_card_val()) { + // walk back over contiguous dirty cards to find left end of dirty range (inclusive) + } + // [dirty_l, dirty_r] is a "maximal" closed interval range of dirty card indices: + // it may not be maximal if we are using the write_table, because of concurrent + // mutations dirtying the card-table. It may also not be maximal if an upper bound + // was established by the scan of the previous chunk. + const size_t dirty_l = cur_index + 1; // record left end of dirty range (inclusive) + // Check that we identified a boundary on our left + assert(ctbm[dirty_l] == CardTable::dirty_card_val(), "First card in range should be dirty"); + assert(dirty_l == start_card_index || use_write_table + || ctbm[dirty_l - 1] == CardTable::clean_card_val(), + "Interval isn't maximal on the left"); + assert(dirty_r >= dirty_l, "Error"); + assert(ctbm[dirty_r] == CardTable::dirty_card_val(), "Last card in range should be dirty"); + // Record alternations, dirty run length, and dirty card count + NOT_PRODUCT(stats.record_dirty_run(dirty_r - dirty_l + 1);) + + // Find first object that starts this range: + // [left, right) is a maximal right-open interval of dirty cards + HeapWord* left = _rs->addr_for_card_index(dirty_l); // inclusive + HeapWord* right = _rs->addr_for_card_index(dirty_r + 1); // exclusive + // Clip right to end_addr established above (still exclusive) + right = MIN2(right, end_addr); + assert(right <= region->top() && end_addr <= region->top(), "Busted bounds"); + const MemRegion mr(left, right); + + // NOTE: We'll not call block_start() repeatedly + // on a very large object if its head card is dirty. If not, + // (i.e. the head card is clean) we'll call it each time we + // process a new dirty range on the object. This is always + // the case for large object arrays, which are typically more + // common. + // TODO: It is worthwhile to memoize this, so as to avoid that + // overhead, and it is easy to do, but deferred to a follow-up. + HeapWord* p = _scc->block_start(dirty_l); + oop obj = cast_to_oop(p); + + // PREFIX: The object that straddles into this range of dirty cards + // from the left may be subject to special treatment unless + // it is an object array. + if (p < left && !obj->is_objArray()) { + // The mutator (both compiler and interpreter, but not JNI?) + // typically dirty imprecisely (i.e. only the head of an object), + // but GC closures typically dirty the object precisely. (It would + // be nice to have everything be precise for maximum efficiency.) + // + // To handle this, we check the head card of the object here and, + // if dirty, (arrange to) scan the object in its entirety. If we + // find the head card clean, we'll scan only the portion of the + // object lying in the dirty card range below, assuming this was + // the result of precise marking by GC closures. + + // index of the "head card" for p + const size_t hc_index = _rs->card_index_for_addr(p); + if (ctbm[hc_index] == CardTable::dirty_card_val()) { + // Scan or skip the object, depending on location of its + // head card, and remember that we'll have processed all + // the objects back up to p, which is thus an upper bound + // for the next iteration of a dirty card loop. + upper_bound = p; // remember upper bound for next chunk + if (p < start_addr) { + // if object starts in a previous slice, it'll be handled + // in its entirety by the thread processing that slice; we can + // skip over it and avoid an unnecessary extra scan. + assert(obj == cast_to_oop(p), "Inconsistency detected"); + p += obj->size(); + } else { + // the object starts in our slice, we scan it in its entirety + assert(obj == cast_to_oop(p), "Inconsistency detected"); + if (ctx == nullptr || ctx->is_marked(obj)) { + // Scan the object in its entirety + p += obj->oop_iterate_size(cl); + } else { + assert(p < tams, "Error 1 in ctx/marking/tams logic"); + // Skip over any intermediate dead objects + p = ctx->get_next_marked_addr(p, tams); + assert(p <= tams, "Error 2 in ctx/marking/tams logic"); + } + } + assert(p > left, "Should have processed into interior of dirty range"); + } + } + + size_t i = 0; + HeapWord* last_p = nullptr; + + // BODY: Deal with (other) objects in this dirty card range + while (p < right) { + obj = cast_to_oop(p); + // walk right scanning eligible objects + if (ctx == nullptr || ctx->is_marked(obj)) { + // we need to remember the last object ptr we scanned, in case we need to + // complete a partial suffix scan after mr, see below + last_p = p; + // apply the closure to the oops in the portion of + // the object within mr. + p += obj->oop_iterate_size(cl, mr); + NOT_PRODUCT(i++); + } else { + // forget the last object pointer we remembered + last_p = nullptr; + assert(p < tams, "Tams and above are implicitly marked in ctx"); + // object under tams isn't marked: skip to next live object + p = ctx->get_next_marked_addr(p, tams); + assert(p <= tams, "Error 3 in ctx/marking/tams logic"); + } + } + + // TODO: if an objArray then only use mr, else just iterate over entire object; + // that would avoid the special treatment of suffix below. + + // SUFFIX: Fix up a possible incomplete scan at right end of window + // by scanning the portion of a non-objArray that wasn't done. + if (p > right && last_p != nullptr) { + assert(last_p < right, "Error"); + // check if last_p suffix needs scanning + const oop last_obj = cast_to_oop(last_p); + if (!last_obj->is_objArray()) { + // scan the remaining suffix of the object + const MemRegion last_mr(right, p); + assert(p == last_p + last_obj->size(), "Would miss portion of last_obj"); + last_obj->oop_iterate(cl, last_mr); + log_debug(gc, remset)("Fixed up non-objArray suffix scan in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", + p2i(last_mr.start()), p2i(last_mr.end())); + } else { + log_debug(gc, remset)("Skipped suffix scan of objArray in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", + p2i(right), p2i(p)); + } + } + NOT_PRODUCT(stats.record_scan_obj_cnt(i);) + + // ==== END DIRTY card range processing ==== + } else { + // ==== BEGIN CLEAN card range processing ==== + + assert(ctbm[cur_index] == CardTable::clean_card_val(), "Error"); + // walk back over contiguous clean cards + size_t i = 0; + while (--cur_index >= (ssize_t)start_card_index && ctbm[cur_index] == CardTable::clean_card_val()) { + NOT_PRODUCT(i++); + } + // Record alternations, clean run length, and clean card count + NOT_PRODUCT(stats.record_clean_run(i);) + + // ==== END CLEAN card range processing ==== + } + } +} + +// Given that this range of clusters is known to span a humongous object spanned by region r, scan the +// portion of the humongous object that corresponds to the specified range. +template +template +inline void +ShenandoahScanRemembered::process_humongous_clusters(ShenandoahHeapRegion* r, size_t first_cluster, size_t count, + HeapWord *end_of_range, ClosureType *cl, bool use_write_table) { + ShenandoahHeapRegion* start_region = r->humongous_start_region(); + HeapWord* p = start_region->bottom(); + oop obj = cast_to_oop(p); + assert(r->is_humongous(), "Only process humongous regions here"); + assert(start_region->is_humongous_start(), "Should be start of humongous region"); + assert(p + obj->size() >= end_of_range, "Humongous object ends before range ends"); + + size_t first_card_index = first_cluster * ShenandoahCardCluster::CardsPerCluster; + HeapWord* first_cluster_addr = _rs->addr_for_card_index(first_card_index); + size_t spanned_words = count * ShenandoahCardCluster::CardsPerCluster * CardTable::card_size_in_words(); + start_region->oop_iterate_humongous_slice(cl, true, first_cluster_addr, spanned_words, use_write_table); +} + + +// This method takes a region & determines the end of the region that the worker can scan. +template +template +inline void +ShenandoahScanRemembered::process_region_slice(ShenandoahHeapRegion *region, size_t start_offset, size_t clusters, + HeapWord *end_of_range, ClosureType *cl, bool use_write_table, + uint worker_id) { + + // This is called only for young gen collection, when we scan old gen regions + assert(region->is_old(), "Expecting an old region"); + HeapWord *start_of_range = region->bottom() + start_offset; + size_t start_cluster_no = cluster_for_addr(start_of_range); + assert(addr_for_cluster(start_cluster_no) == start_of_range, "process_region_slice range must align on cluster boundary"); + + // region->end() represents the end of memory spanned by this region, but not all of this + // memory is eligible to be scanned because some of this memory has not yet been allocated. + // + // region->top() represents the end of allocated memory within this region. Any addresses + // beyond region->top() should not be scanned as that memory does not hold valid objects. + + if (use_write_table) { + // This is update-refs servicing. + if (end_of_range > region->get_update_watermark()) { + end_of_range = region->get_update_watermark(); + } + } else { + // This is concurrent mark servicing. Note that TAMS for this region is TAMS at start of old-gen + // collection. Here, we need to scan up to TAMS for most recently initiated young-gen collection. + // Since all LABs are retired at init mark, and since replacement LABs are allocated lazily, and since no + // promotions occur until evacuation phase, TAMS for most recent young-gen is same as top(). + if (end_of_range > region->top()) { + end_of_range = region->top(); + } + } + + log_debug(gc)("Remembered set scan processing Region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT ", using %s table", + region->index(), p2i(start_of_range), p2i(end_of_range), + use_write_table? "read/write (updating)": "read (marking)"); + + // Note that end_of_range may point to the middle of a cluster because we limit scanning to + // region->top() or region->get_update_watermark(). We avoid processing past end_of_range. + // Objects that start between start_of_range and end_of_range, including humongous objects, will + // be fully processed by process_clusters. In no case should we need to scan past end_of_range. + if (start_of_range < end_of_range) { + if (region->is_humongous()) { + ShenandoahHeapRegion* start_region = region->humongous_start_region(); + // TODO: ysr : This will be called multiple times with same start_region, but different start_cluster_no. + // Check that it does the right thing here, and doesn't do redundant work. Also see if the call API/interface + // can be simplified. + process_humongous_clusters(start_region, start_cluster_no, clusters, end_of_range, cl, use_write_table); + } else { + // TODO: ysr The start_of_range calculated above is discarded and may be calculated again in process_clusters(). + // See if the redundant and wasted calculations can be avoided, and if the call parameters can be cleaned up. + // It almost sounds like this set of methods needs a working class to stash away some useful info that can be + // efficiently passed around amongst these methods, as well as related state. Note that we can't use + // ShenandoahScanRemembered as there seems to be only one instance of that object for the heap which is shared + // by all workers. Note that there are also task methods which call these which may have per worker storage. + // We need to be careful however that if the number of workers changes dynamically that state isn't sequestered + // and become obsolete. + process_clusters(start_cluster_no, clusters, end_of_range, cl, use_write_table, worker_id); + } + } +} + +template +inline size_t +ShenandoahScanRemembered::cluster_for_addr(HeapWordImpl **addr) { + size_t card_index = _rs->card_index_for_addr(addr); + size_t result = card_index / ShenandoahCardCluster::CardsPerCluster; + return result; +} + +template +inline HeapWord* +ShenandoahScanRemembered::addr_for_cluster(size_t cluster_no) { + size_t card_index = cluster_no * ShenandoahCardCluster::CardsPerCluster; + return addr_for_card_index(card_index); +} + +// This is used only for debug verification so don't worry about making the scan parallel. +template +void ShenandoahScanRemembered::roots_do(OopIterateClosure* cl) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(heap->is_old_bitmap_stable())); + for (size_t i = 0, n = heap->num_regions(); i < n; ++i) { + ShenandoahHeapRegion* region = heap->get_region(i); + if (region->is_old() && region->is_active() && !region->is_cset()) { + HeapWord* start_of_range = region->bottom(); + HeapWord* end_of_range = region->top(); + size_t start_cluster_no = cluster_for_addr(start_of_range); + size_t num_heapwords = end_of_range - start_of_range; + unsigned int cluster_size = CardTable::card_size_in_words() * + ShenandoahCardCluster::CardsPerCluster; + size_t num_clusters = (size_t) ((num_heapwords - 1 + cluster_size) / cluster_size); + + // Remembered set scanner + if (region->is_humongous()) { + process_humongous_clusters(region->humongous_start_region(), start_cluster_no, num_clusters, end_of_range, cl, + false /* use_write_table */); + } else { + process_clusters(start_cluster_no, num_clusters, end_of_range, cl, + false /* use_write_table */, 0 /* fake worker id */); + } + } + } +} + +#ifndef PRODUCT +// Log given card stats +template +inline void ShenandoahScanRemembered::log_card_stats(HdrSeq* stats) { + for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) { + log_info(gc, remset)("%18s: [ %8.2f %8.2f %8.2f %8.2f %8.2f ]", + _card_stats_name[i], + stats[i].percentile(0), stats[i].percentile(25), + stats[i].percentile(50), stats[i].percentile(75), + stats[i].maximum()); + } +} + +// Log card stats for all nworkers for a specific phase t +template +void ShenandoahScanRemembered::log_card_stats(uint nworkers, CardStatLogType t) { + assert(ShenandoahEnableCardStats, "Do not call"); + HdrSeq* sum_stats = card_stats_for_phase(t); + log_info(gc, remset)("%s", _card_stat_log_type[t]); + for (uint i = 0; i < nworkers; i++) { + log_worker_card_stats(i, sum_stats); + } + + // Every so often, log the cumulative global stats + if (++_card_stats_log_counter[t] >= ShenandoahCardStatsLogInterval) { + _card_stats_log_counter[t] = 0; + log_info(gc, remset)("Cumulative stats"); + log_card_stats(sum_stats); + } +} + +// Log card stats for given worker_id, & clear them after merging into given cumulative stats +template +void ShenandoahScanRemembered::log_worker_card_stats(uint worker_id, HdrSeq* sum_stats) { + assert(ShenandoahEnableCardStats, "Do not call"); + + HdrSeq* worker_card_stats = card_stats(worker_id); + log_info(gc, remset)("Worker %u Card Stats: ", worker_id); + log_card_stats(worker_card_stats); + // Merge worker stats into the cumulative stats & clear worker stats + merge_worker_card_stats_cumulative(worker_card_stats, sum_stats); +} + +template +void ShenandoahScanRemembered::merge_worker_card_stats_cumulative( + HdrSeq* worker_stats, HdrSeq* sum_stats) { + for (int i = 0; i < MAX_CARD_STAT_TYPE; i++) { + sum_stats[i].add(worker_stats[i]); + worker_stats[i].clear(); + } +} +#endif + +inline bool ShenandoahRegionChunkIterator::has_next() const { + return _index < _total_chunks; +} + +inline bool ShenandoahRegionChunkIterator::next(struct ShenandoahRegionChunk *assignment) { + if (_index >= _total_chunks) { + return false; + } + size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); + if (new_index > _total_chunks) { + // First worker that hits new_index == _total_chunks continues, other + // contending workers return false. + return false; + } + // convert to zero-based indexing + new_index--; + assert(new_index < _total_chunks, "Error"); + + // Find the group number for the assigned chunk index + size_t group_no; + for (group_no = 0; new_index >= _group_entries[group_no]; group_no++) + ; + assert(group_no < _num_groups, "Cannot have group no greater or equal to _num_groups"); + + // All size computations measured in HeapWord + size_t region_size_words = ShenandoahHeapRegion::region_size_words(); + size_t group_region_index = _region_index[group_no]; + size_t group_region_offset = _group_offset[group_no]; + + size_t index_within_group = (group_no == 0)? new_index: new_index - _group_entries[group_no - 1]; + size_t group_chunk_size = _group_chunk_size[group_no]; + size_t offset_of_this_chunk = group_region_offset + index_within_group * group_chunk_size; + size_t regions_spanned_by_chunk_offset = offset_of_this_chunk / region_size_words; + size_t offset_within_region = offset_of_this_chunk % region_size_words; + + size_t region_index = group_region_index + regions_spanned_by_chunk_offset; + + assignment->_r = _heap->get_region(region_index); + assignment->_chunk_offset = offset_within_region; + assignment->_chunk_size = group_chunk_size; + return true; +} + +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSCANREMEMBEREDINLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp index 80c1d3417b2..3d5a2b149a7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp @@ -121,14 +121,15 @@ typedef struct ShenandoahSharedBitmap { ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; while (true) { ShenandoahSharedValue ov = Atomic::load_acquire(&value); - if ((ov & mask_val) != 0) { + // We require all bits of mask_val to be set + if ((ov & mask_val) == mask_val) { // already set return; } ShenandoahSharedValue nv = ov | mask_val; if (Atomic::cmpxchg(&value, ov, nv) == ov) { - // successfully set + // successfully set: if value returned from cmpxchg equals ov, then nv has overwritten value. return; } } @@ -156,10 +157,19 @@ typedef struct ShenandoahSharedBitmap { Atomic::release_store_fence(&value, (ShenandoahSharedValue)0); } + // Returns true iff any bit set in mask is set in this.value. bool is_set(uint mask) const { return !is_unset(mask); } + // Returns true iff all bits set in mask are set in this.value. + bool is_set_exactly(uint mask) const { + assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); + uint uvalue = Atomic::load_acquire(&value); + return (uvalue & mask) == mask; + } + + // Returns true iff all bits set in mask are unset in this.value. bool is_unset(uint mask) const { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStackWatermark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahStackWatermark.cpp index 8a5b4c29539..d3b5bd509f5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahStackWatermark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahStackWatermark.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2021, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,11 +76,11 @@ OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) { assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); return reinterpret_cast(context); } else { - if (_heap->is_concurrent_mark_in_progress()) { - return &_keep_alive_cl; - } else if (_heap->is_concurrent_weak_root_in_progress()) { + if (_heap->is_concurrent_weak_root_in_progress()) { assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate"); return &_evac_update_oop_cl; + } else if (_heap->is_concurrent_mark_in_progress()) { + return &_keep_alive_cl; } else { ShouldNotReachHere(); return nullptr; @@ -92,14 +93,7 @@ void ShenandoahStackWatermark::start_processing_impl(void* context) { ShenandoahHeap* const heap = ShenandoahHeap::heap(); // Process the non-frame part of the thread - if (heap->is_concurrent_mark_in_progress()) { - // We need to reset all TLABs because they might be below the TAMS, and we need to mark - // the objects in them. Do not let mutators allocate any new objects in their current TLABs. - // It is also a good place to resize the TLAB sizes for future allocations. - retire_tlab(); - - _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl); - } else if (heap->is_concurrent_weak_root_in_progress()) { + if (heap->is_concurrent_weak_root_in_progress()) { assert(heap->is_evacuation_in_progress(), "Should not be armed"); // Retire the TLABs, which will force threads to reacquire their TLABs. // This is needed for two reasons. Strong one: new allocations would be with new freeset, @@ -108,6 +102,13 @@ void ShenandoahStackWatermark::start_processing_impl(void* context) { // be needed for reference updates (would update the large filler instead). retire_tlab(); + _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl); + } else if (heap->is_concurrent_mark_in_progress()) { + // We need to reset all TLABs because they might be below the TAMS, and we need to mark + // the objects in them. Do not let mutators allocate any new objects in their current TLABs. + // It is also a good place to resize the TLAB sizes for future allocations. + retire_tlab(); + _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl); } else { ShouldNotReachHere(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.inline.hpp index b6c82e5af48..042143254bc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.inline.hpp @@ -25,9 +25,10 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP -#include "gc/shenandoah/shenandoahStringDedup.hpp" - #include "classfile/javaClasses.inline.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahStringDedup.hpp" +#include "oops/markWord.hpp" bool ShenandoahStringDedup::is_string_candidate(oop obj) { assert(Thread::current()->is_Worker_thread(), @@ -45,22 +46,10 @@ bool ShenandoahStringDedup::is_candidate(oop obj) { return false; } - const markWord mark = obj->mark(); - - // Having/had displaced header, too risky to deal with them, skip - if (mark == markWord::INFLATING() || mark.has_displaced_mark_helper()) { - return false; - } - - if (StringDedup::is_below_threshold_age(mark.age())) { - // Increase string age and enqueue it when it reaches age threshold - markWord new_mark = mark.incr_age(); - if (mark == obj->cas_set_mark(new_mark, mark)) { - return StringDedup::is_threshold_age(new_mark.age()) && - !dedup_requested(obj); - } - } - return false; + uint age = ShenandoahHeap::get_object_age(obj); + return (age <= markWord::max_age) && + StringDedup::is_below_threshold_age(age) && + !dedup_requested(obj); } #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index 8a84b4eaa66..90b51160e7a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -34,6 +34,8 @@ #include "runtime/mutex.hpp" #include "utilities/debug.hpp" +class ShenandoahHeap; + template class BufferedOverflowTaskQueue: public OverflowTaskQueue { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp new file mode 100644 index 00000000000..42e99ae0026 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shenandoah/mode/shenandoahMode.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahThreadLocalData.hpp" + +ShenandoahThreadLocalData::ShenandoahThreadLocalData() : + _gc_state(0), + _oom_scope_nesting_level(0), + _oom_during_evac(false), + _satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()), + _gclab(nullptr), + _gclab_size(0), + _paced_time(0), + _plab(nullptr), + _plab_size(0), + _plab_evacuated(0), + _plab_promoted(0), + _plab_preallocated_promoted(0), + _plab_allows_promotion(true), + _plab_retries_enabled(true), + _evacuation_stats(nullptr) { + bool gen_mode = ShenandoahHeap::heap()->mode()->is_generational(); + _evacuation_stats = new ShenandoahEvacuationStats(gen_mode); +} + +ShenandoahThreadLocalData::~ShenandoahThreadLocalData() { + if (_gclab != nullptr) { + delete _gclab; + } + if (_plab != nullptr) { + ShenandoahHeap::heap()->retire_plab(_plab); + delete _plab; + } + + // TODO: Preserve these stats somewhere for mutator threads. + delete _evacuation_stats; +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp index 422595e9313..ce88e8904dd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +31,7 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "gc/shenandoah/shenandoahCodeRoots.hpp" +#include "gc/shenandoah/shenandoahEvacTracker.hpp" #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" #include "runtime/javaThread.hpp" #include "utilities/debug.hpp" @@ -41,26 +43,33 @@ class ShenandoahThreadLocalData { // Evacuation OOM state uint8_t _oom_scope_nesting_level; bool _oom_during_evac; + SATBMarkQueue _satb_mark_queue; + + // Thread-local allocation buffer for object evacuations. + // In generational mode, it is exclusive to the young generation. PLAB* _gclab; size_t _gclab_size; + double _paced_time; - ShenandoahThreadLocalData() : - _gc_state(0), - _oom_scope_nesting_level(0), - _oom_during_evac(false), - _satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()), - _gclab(nullptr), - _gclab_size(0), - _paced_time(0) { - } + // Thread-local allocation buffer only used in generational mode. + // Used both by mutator threads and by GC worker threads + // for evacuations within the old generation and + // for promotions from the young generation into the old generation. + PLAB* _plab; + size_t _plab_size; - ~ShenandoahThreadLocalData() { - if (_gclab != nullptr) { - delete _gclab; - } - } + size_t _plab_evacuated; + size_t _plab_promoted; + size_t _plab_preallocated_promoted; + bool _plab_allows_promotion; // If false, no more promotion by this thread during this evacuation phase. + bool _plab_retries_enabled; + + ShenandoahEvacuationStats* _evacuation_stats; + + ShenandoahThreadLocalData(); + ~ShenandoahThreadLocalData(); static ShenandoahThreadLocalData* data(Thread* thread) { assert(UseShenandoahGC, "Sanity"); @@ -97,6 +106,8 @@ class ShenandoahThreadLocalData { assert(data(thread)->_gclab == nullptr, "Only initialize once"); data(thread)->_gclab = new PLAB(PLAB::min_size()); data(thread)->_gclab_size = 0; + data(thread)->_plab = new PLAB(PLAB::min_size()); + data(thread)->_plab_size = 0; } static PLAB* gclab(Thread* thread) { @@ -111,6 +122,100 @@ class ShenandoahThreadLocalData { data(thread)->_gclab_size = v; } + static void begin_evacuation(Thread* thread, size_t bytes) { + data(thread)->_evacuation_stats->begin_evacuation(bytes); + } + + static void end_evacuation(Thread* thread, size_t bytes) { + data(thread)->_evacuation_stats->end_evacuation(bytes); + } + + static void record_age(Thread* thread, size_t bytes, uint age) { + data(thread)->_evacuation_stats->record_age(bytes, age); + } + + static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) { + return data(thread)->_evacuation_stats; + } + + static PLAB* plab(Thread* thread) { + return data(thread)->_plab; + } + + static size_t plab_size(Thread* thread) { + return data(thread)->_plab_size; + } + + static void set_plab_size(Thread* thread, size_t v) { + data(thread)->_plab_size = v; + } + + static void enable_plab_retries(Thread* thread) { + data(thread)->_plab_retries_enabled = true; + } + + static void disable_plab_retries(Thread* thread) { + data(thread)->_plab_retries_enabled = false; + } + + static bool plab_retries_enabled(Thread* thread) { + return data(thread)->_plab_retries_enabled; + } + + static void enable_plab_promotions(Thread* thread) { + data(thread)->_plab_allows_promotion = true; + } + + static void disable_plab_promotions(Thread* thread) { + data(thread)->_plab_allows_promotion = false; + } + + static bool allow_plab_promotions(Thread* thread) { + return data(thread)->_plab_allows_promotion; + } + + static void reset_plab_evacuated(Thread* thread) { + data(thread)->_plab_evacuated = 0; + } + + static void add_to_plab_evacuated(Thread* thread, size_t increment) { + data(thread)->_plab_evacuated += increment; + } + + static void subtract_from_plab_evacuated(Thread* thread, size_t increment) { + // TODO: Assert underflow + data(thread)->_plab_evacuated -= increment; + } + + static size_t get_plab_evacuated(Thread* thread) { + return data(thread)->_plab_evacuated; + } + + static void reset_plab_promoted(Thread* thread) { + data(thread)->_plab_promoted = 0; + } + + static void add_to_plab_promoted(Thread* thread, size_t increment) { + data(thread)->_plab_promoted += increment; + } + + static void subtract_from_plab_promoted(Thread* thread, size_t increment) { + // TODO: Assert underflow + data(thread)->_plab_promoted -= increment; + } + + static size_t get_plab_promoted(Thread* thread) { + return data(thread)->_plab_promoted; + } + + static void set_plab_preallocated_promoted(Thread* thread, size_t value) { + data(thread)->_plab_preallocated_promoted = value; + } + + static size_t get_plab_preallocated_promoted(Thread* thread) { + return data(thread)->_plab_preallocated_promoted; + } + static void add_paced_time(Thread* thread, double v) { data(thread)->_paced_time += v; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index afd10efdfdd..6072f605ea4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -49,7 +49,8 @@ class ShenandoahIsUnloadingOopClosure : public OopClosure { public: ShenandoahIsUnloadingOopClosure() : - _marking_context(ShenandoahHeap::heap()->complete_marking_context()), + // TODO: In non-generational mode, this should still be complete_marking_context() + _marking_context(ShenandoahHeap::heap()->marking_context()), _is_unloading(false) { } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp index 711d906ec7c..1c1f653377d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,25 +33,27 @@ #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "utilities/debug.hpp" ShenandoahPhaseTimings::Phase ShenandoahTimingsTracker::_current_phase = ShenandoahPhaseTimings::_invalid_phase; -ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : +ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation) : _heap(ShenandoahHeap::heap()), + _generation(generation), _timer(_heap->gc_timer()), _tracer(_heap->tracer()) { assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); - _heap->set_gc_cause(cause); + _heap->on_cycle_start(cause, _generation); + _timer->register_gc_start(); _tracer->report_gc_start(cause, _timer->gc_start()); _heap->trace_heap_before_gc(_tracer); - _heap->shenandoah_policy()->record_cycle_start(); - _heap->heuristics()->record_cycle_start(); _trace_cycle.initialize(_heap->cycle_memory_manager(), cause, "end of GC cycle", /* allMemoryPoolsAffected */ true, @@ -65,13 +68,13 @@ ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : } ShenandoahGCSession::~ShenandoahGCSession() { - _heap->heuristics()->record_cycle_end(); + _heap->on_cycle_end(_generation); _timer->register_gc_end(); _heap->trace_heap_after_gc(_tracer); - _tracer->report_gc_reference_stats(_heap->ref_processor()->reference_process_stats()); + _tracer->report_gc_reference_stats(_generation->ref_processor()->reference_process_stats()); _tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions()); assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); - _heap->set_gc_cause(GCCause::_no_gc); + } ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, const char* notification_message, SvcGCMarker::reason_type type) : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp index af32a20013a..f1298ec4263 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,16 +42,33 @@ #include "services/memoryService.hpp" class GCTimer; +class ShenandoahGeneration; + +#define SHENANDOAH_RETURN_EVENT_MESSAGE(generation_type, prefix, postfix) \ + switch (generation_type) { \ + case GLOBAL_NON_GEN: \ + return prefix "" postfix; \ + case GLOBAL_GEN: \ + return prefix " (GLOBAL)" postfix; \ + case YOUNG: \ + return prefix " (YOUNG)" postfix; \ + case OLD: \ + return prefix " (OLD)" postfix; \ + default: \ + ShouldNotReachHere(); \ + return prefix " (?)" postfix; \ + } \ class ShenandoahGCSession : public StackObj { private: ShenandoahHeap* const _heap; + ShenandoahGeneration* const _generation; GCTimer* const _timer; GCTracer* const _tracer; TraceMemoryManagerStats _trace_cycle; public: - ShenandoahGCSession(GCCause::Cause cause); + ShenandoahGCSession(GCCause::Cause cause, ShenandoahGeneration* generation); ~ShenandoahGCSession(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index 4a97e599f3e..555325ab266 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" #include "gc/shenandoah/shenandoahFullGC.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMark.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 1d5d962a4ec..1f76e670cd8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,12 +28,15 @@ #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahForwarding.inline.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" #include "gc/shenandoah/shenandoahRootProcessor.hpp" #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "memory/allocation.hpp" #include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" @@ -68,6 +72,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { ShenandoahLivenessData* _ld; void* _interior_loc; oop _loc; + ShenandoahGeneration* _generation; public: ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld, @@ -79,11 +84,18 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { _map(map), _ld(ld), _interior_loc(nullptr), - _loc(nullptr) { + _loc(nullptr), + _generation(nullptr) { if (options._verify_marked == ShenandoahVerifier::_verify_marked_complete_except_references || + options._verify_marked == ShenandoahVerifier::_verify_marked_complete_satb_empty || options._verify_marked == ShenandoahVerifier::_verify_marked_disable) { set_ref_discoverer_internal(new ShenandoahIgnoreReferenceDiscoverer()); } + + if (_heap->mode()->is_generational()) { + _generation = _heap->active_generation(); + assert(_generation != nullptr, "Expected active generation in this mode"); + } } private: @@ -107,13 +119,24 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // For performance reasons, only fully verify non-marked field values. // We are here when the host object for *p is already marked. - if (_map->par_mark(obj)) { + // TODO: We should consider specializing this closure by generation ==/!= null, + // to avoid in_generation check on fast path here for non-generational mode. + if (in_generation(obj) && _map->par_mark(obj)) { verify_oop_at(p, obj); _stack->push(ShenandoahVerifierTask(obj)); } } } + bool in_generation(oop obj) { + if (_generation == nullptr) { + return true; + } + + ShenandoahHeapRegion* region = _heap->heap_region_containing(obj); + return _generation->contains(region); + } + void verify_oop(oop obj) { // Perform consistency checks with gradually decreasing safety level. This guarantees // that failure report would not try to touch something that was not yet verified to be @@ -164,7 +187,8 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: - check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), + check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live() || + (obj_reg->is_old() && ShenandoahHeap::heap()->is_gc_generation_young()), "Object must belong to region with live data"); break; default: @@ -213,21 +237,29 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { } // ------------ obj and fwd are safe at this point -------------- - + // We allow for marked or old here for two reasons: + // 1. If this is a young collect, old objects wouldn't be marked. We've + // recently change the verifier traversal to only follow young objects + // during a young collect so this _shouldn't_ be necessary. + // 2. At present, we do not clear dead objects from the remembered set. + // Everything in the remembered set is old (ipso facto), so allowing for + // 'marked_or_old' covers the case of stale objects in rset. + // TODO: Just use 'is_marked' here. switch (_options._verify_marked) { case ShenandoahVerifier::_verify_marked_disable: // skip break; case ShenandoahVerifier::_verify_marked_incomplete: - check(ShenandoahAsserts::_safe_all, obj, _heap->marking_context()->is_marked(obj), + check(ShenandoahAsserts::_safe_all, obj, _heap->marking_context()->is_marked_or_old(obj), "Must be marked in incomplete bitmap"); break; case ShenandoahVerifier::_verify_marked_complete: - check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj), + check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked_or_old(obj), "Must be marked in complete bitmap"); break; case ShenandoahVerifier::_verify_marked_complete_except_references: - check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj), + case ShenandoahVerifier::_verify_marked_complete_satb_empty: + check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked_or_old(obj), "Must be marked in complete bitmap, except j.l.r.Reference referents"); break; default: @@ -313,21 +345,100 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { virtual void do_oop(narrowOop* p) { do_oop_work(p); } }; +// This closure computes the amounts of used, committed, and garbage memory and the number of regions contained within +// a subset (e.g. the young generation or old generation) of the total heap. class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { private: - size_t _used, _committed, _garbage; + size_t _used, _committed, _garbage, _regions, _humongous_waste; public: - ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0) {}; + ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0) {}; void heap_region_do(ShenandoahHeapRegion* r) { _used += r->used(); _garbage += r->garbage(); _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0; + if (r->is_humongous()) { + _humongous_waste += r->free(); + } + _regions++; + log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding " SIZE_FORMAT " for %s Region " SIZE_FORMAT ", yielding: " SIZE_FORMAT, + r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used); } size_t used() { return _used; } size_t committed() { return _committed; } size_t garbage() { return _garbage; } + size_t regions() { return _regions; } + size_t waste() { return _humongous_waste; } + + // span is the total memory affiliated with these stats (some of which is in use and other is available) + size_t span() { return _regions * ShenandoahHeapRegion::region_size_bytes(); } +}; + +class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { + public: + ShenandoahCalculateRegionStatsClosure old; + ShenandoahCalculateRegionStatsClosure young; + ShenandoahCalculateRegionStatsClosure global; + + void heap_region_do(ShenandoahHeapRegion* r) override { + switch (r->affiliation()) { + case FREE: + return; + case YOUNG_GENERATION: + young.heap_region_do(r); + global.heap_region_do(r); + break; + case OLD_GENERATION: + old.heap_region_do(r); + global.heap_region_do(r); + break; + default: + ShouldNotReachHere(); + } + } + + static void log_usage(ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { + log_debug(gc)("Safepoint verification: %s verified usage: " SIZE_FORMAT "%s, recorded usage: " SIZE_FORMAT "%s", + generation->name(), + byte_size_in_proper_unit(generation->used()), proper_unit_for_byte_size(generation->used()), + byte_size_in_proper_unit(stats.used()), proper_unit_for_byte_size(stats.used())); + } + + static void validate_usage(const bool adjust_for_padding, + const char* label, ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t generation_used = generation->used(); + size_t generation_used_regions = generation->used_regions(); + if (adjust_for_padding && (generation->is_young() || generation->is_global())) { + size_t pad = ShenandoahHeap::heap()->get_pad_for_promote_in_place(); + generation_used += pad; + } + + guarantee(stats.used() == generation_used, + "%s: generation (%s) used size must be consistent: generation-used: " SIZE_FORMAT "%s, regions-used: " SIZE_FORMAT "%s", + label, generation->name(), + byte_size_in_proper_unit(generation_used), proper_unit_for_byte_size(generation_used), + byte_size_in_proper_unit(stats.used()), proper_unit_for_byte_size(stats.used())); + + guarantee(stats.regions() == generation_used_regions, + "%s: generation (%s) used regions (" SIZE_FORMAT ") must equal regions that are in use (" SIZE_FORMAT ")", + label, generation->name(), generation->used_regions(), stats.regions()); + + size_t generation_capacity = generation->max_capacity(); + size_t humongous_regions_promoted = 0; + guarantee(stats.span() <= generation_capacity, + "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") must not exceed current capacity (" SIZE_FORMAT "%s)", + label, generation->name(), stats.regions(), + byte_size_in_proper_unit(generation_capacity), proper_unit_for_byte_size(generation_capacity)); + + size_t humongous_waste = generation->get_humongous_waste(); + guarantee(stats.waste() == humongous_waste, + "%s: generation (%s) humongous waste must be consistent: generation: " SIZE_FORMAT "%s, regions: " SIZE_FORMAT "%s", + label, generation->name(), + byte_size_in_proper_unit(humongous_waste), proper_unit_for_byte_size(humongous_waste), + byte_size_in_proper_unit(stats.waste()), proper_unit_for_byte_size(stats.waste())); + } }; class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { @@ -411,8 +522,11 @@ class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { verify(r, r->get_gclab_allocs() <= r->capacity(), "GCLAB alloc count should not be larger than capacity"); - verify(r, r->get_shared_allocs() + r->get_tlab_allocs() + r->get_gclab_allocs() == r->used(), - "Accurate accounting: shared + TLAB + GCLAB = used"); + verify(r, r->get_plab_allocs() <= r->capacity(), + "PLAB alloc count should not be larger than capacity"); + + verify(r, r->get_shared_allocs() + r->get_tlab_allocs() + r->get_gclab_allocs() + r->get_plab_allocs() == r->used(), + "Accurate accounting: shared + TLAB + GCLAB + PLAB = used"); verify(r, !r->is_empty() || !r->has_live(), "Empty regions should not have live data"); @@ -485,6 +599,20 @@ class ShenandoahVerifierReachableTask : public WorkerTask { } }; +class ShenandoahVerifyNoIncompleteSatbBuffers : public ThreadClosure { +public: + virtual void do_thread(Thread* thread) { + SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); + if (!is_empty(queue)) { + fatal("All SATB buffers should have been flushed during mark"); + } + } +private: + bool is_empty(SATBMarkQueue& queue) { + return queue.buffer() == nullptr || queue.index() == queue.capacity(); + } +}; + class ShenandoahVerifierMarkedRegionTask : public WorkerTask { private: const char* _label; @@ -494,6 +622,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { ShenandoahLivenessData* _ld; volatile size_t _claimed; volatile size_t _processed; + ShenandoahGeneration* _generation; public: ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap, @@ -507,13 +636,28 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { _bitmap(bitmap), _ld(ld), _claimed(0), - _processed(0) {}; + _processed(0), + _generation(nullptr) { + if (_options._verify_marked == ShenandoahVerifier::_verify_marked_complete_satb_empty) { + Threads::change_thread_claim_token(); + } + + if (_heap->mode()->is_generational()) { + _generation = _heap->active_generation(); + assert(_generation != nullptr, "Expected active generation in this mode."); + } + }; size_t processed() { return Atomic::load(&_processed); } virtual void work(uint worker_id) { + if (_options._verify_marked == ShenandoahVerifier::_verify_marked_complete_satb_empty) { + ShenandoahVerifyNoIncompleteSatbBuffers verify_satb; + Threads::possibly_parallel_threads_do(true, &verify_satb); + } + ShenandoahVerifierStack stack; ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, ShenandoahMessageBuffer("%s, Marked", _label), @@ -523,6 +667,10 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { size_t v = Atomic::fetch_then_add(&_claimed, 1u, memory_order_relaxed); if (v < _heap->num_regions()) { ShenandoahHeapRegion* r = _heap->get_region(v); + if (!in_generation(r)) { + continue; + } + if (!r->is_humongous() && !r->is_trash()) { work_regular(r, stack, cl); } else if (r->is_humongous_start()) { @@ -534,6 +682,10 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { } } + bool in_generation(ShenandoahHeapRegion* r) { + return _generation == nullptr || _generation->contains(r); + } + virtual void work_humongous(ShenandoahHeapRegion *r, ShenandoahVerifierStack& stack, ShenandoahVerifyOopClosure& cl) { size_t processed = 0; HeapWord* obj = r->bottom(); @@ -606,16 +758,28 @@ class VerifyThreadGCState : public ThreadClosure { VerifyThreadGCState(const char* label, char expected) : _label(label), _expected(expected) {} void do_thread(Thread* t) { char actual = ShenandoahThreadLocalData::gc_state(t); - if (actual != _expected) { + if (!verify_gc_state(actual, _expected)) { fatal("%s: Thread %s: expected gc-state %d, actual %d", _label, t->name(), _expected, actual); } } + + static bool verify_gc_state(char actual, char expected) { + // Old generation marking is allowed in all states. + if (ShenandoahHeap::heap()->mode()->is_generational()) { + return ((actual & ~(ShenandoahHeap::OLD_MARKING | ShenandoahHeap::MARKING)) == expected); + } else { + assert((actual & ShenandoahHeap::OLD_MARKING) == 0, "Should not mark old in non-generational mode"); + return (actual == expected); + } + } }; -void ShenandoahVerifier::verify_at_safepoint(const char *label, +void ShenandoahVerifier::verify_at_safepoint(const char* label, + VerifyRememberedSet remembered, VerifyForwarded forwarded, VerifyMarked marked, VerifyCollectionSet cset, VerifyLiveness liveness, VerifyRegions regions, + VerifySize sizeness, VerifyGCState gcstate) { guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize"); @@ -639,12 +803,16 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, break; case _verify_gcstate_evacuation: enabled = true; - expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION; + expected = ShenandoahHeap::EVACUATION; if (!_heap->is_stw_gc_in_progress()) { // Only concurrent GC sets this. expected |= ShenandoahHeap::WEAK_ROOTS; } break; + case _verify_gcstate_updating: + enabled = true; + expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::UPDATEREFS; + break; case _verify_gcstate_stable: enabled = true; expected = ShenandoahHeap::STABLE; @@ -664,7 +832,13 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, if (enabled) { char actual = _heap->gc_state(); - if (actual != expected) { + + bool is_marking = (actual & ShenandoahHeap::MARKING); + bool is_marking_young_or_old = (actual & (ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING)); + assert(is_marking == is_marking_young_or_old, "MARKING iff (YOUNG_MARKING or OLD_MARKING), gc_state is: %x", actual); + + // Old generation marking is allowed in all states. + if (!VerifyThreadGCState::verify_gc_state(actual, expected)) { fatal("%s: Global gc-state: expected %d, actual %d", label, expected, actual); } @@ -682,13 +856,20 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, ShenandoahCalculateRegionStatsClosure cl; _heap->heap_region_iterate(&cl); - size_t heap_used = _heap->used(); - guarantee(cl.used() == heap_used, - "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", - label, - byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), - byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); - + size_t heap_used; + if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) { + // Prior to evacuation, regular regions that are to be evacuated in place are padded to prevent further allocations + heap_used = _heap->used() + _heap->get_pad_for_promote_in_place(); + } else if (sizeness != _verify_size_disable) { + heap_used = _heap->used(); + } + if (sizeness != _verify_size_disable) { + guarantee(cl.used() == heap_used, + "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", + label, + byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), + byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); + } size_t heap_committed = _heap->committed(); guarantee(cl.committed() == heap_committed, "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s", @@ -697,12 +878,72 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, byte_size_in_proper_unit(cl.committed()), proper_unit_for_byte_size(cl.committed())); } + log_debug(gc)("Safepoint verification finished heap usage verification"); + + ShenandoahGeneration* generation; + if (_heap->mode()->is_generational()) { + generation = _heap->active_generation(); + guarantee(generation != nullptr, "Need to know which generation to verify."); + } else { + generation = nullptr; + } + + if (generation != nullptr) { + ShenandoahHeapLocker lock(_heap->lock()); + + switch (remembered) { + case _verify_remembered_disable: + break; + case _verify_remembered_before_marking: + log_debug(gc)("Safepoint verification of remembered set at mark"); + verify_rem_set_before_mark(); + break; + case _verify_remembered_before_updating_references: + log_debug(gc)("Safepoint verification of remembered set at update ref"); + verify_rem_set_before_update_ref(); + break; + case _verify_remembered_after_full_gc: + log_debug(gc)("Safepoint verification of remembered set after full gc"); + verify_rem_set_after_full_gc(); + break; + default: + fatal("Unhandled remembered set verification mode"); + } + + ShenandoahGenerationStatsClosure cl; + _heap->heap_region_iterate(&cl); + + if (LogTarget(Debug, gc)::is_enabled()) { + ShenandoahGenerationStatsClosure::log_usage(_heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global); + } + if (sizeness == _verify_size_adjusted_for_padding) { + ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->global_generation(), cl.global); + } else if (sizeness == _verify_size_exact) { + ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->global_generation(), cl.global); + } + // else: sizeness must equal _verify_size_disable + } + + log_debug(gc)("Safepoint verification finished remembered set verification"); + // Internal heap region checks if (ShenandoahVerifyLevel >= 1) { ShenandoahVerifyHeapRegionClosure cl(label, regions); - _heap->heap_region_iterate(&cl); + if (generation != nullptr) { + generation->heap_region_iterate(&cl); + } else { + _heap->heap_region_iterate(&cl); + } } + log_debug(gc)("Safepoint verification finished heap region closure verification"); + OrderAccess::fence(); if (UseTLAB) { @@ -727,6 +968,8 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, count_reachable = task.processed(); } + log_debug(gc)("Safepoint verification finished getting initial reachable set"); + // Step 3. Walk marked objects. Marked objects might be unreachable. This verifies what collector, // not the application, can see during the region scans. There is no reason to process the objects // that were already verified, e.g. those marked in verification bitmap. There is interaction with TAMS: @@ -735,7 +978,10 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, // version size_t count_marked = 0; - if (ShenandoahVerifyLevel >= 4 && (marked == _verify_marked_complete || marked == _verify_marked_complete_except_references)) { + if (ShenandoahVerifyLevel >= 4 && + (marked == _verify_marked_complete || + marked == _verify_marked_complete_except_references || + marked == _verify_marked_complete_satb_empty)) { guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete"); ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options); _heap->workers()->run_task(&task); @@ -744,12 +990,17 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, guarantee(ShenandoahVerifyLevel < 4 || marked == _verify_marked_incomplete || marked == _verify_marked_disable, "Should be"); } + log_debug(gc)("Safepoint verification finished walking marked objects"); + // Step 4. Verify accumulated liveness data, if needed. Only reliable if verification level includes // marked objects. if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete && liveness == _verify_liveness_complete) { for (size_t i = 0; i < _heap->num_regions(); i++) { ShenandoahHeapRegion* r = _heap->get_region(i); + if (generation != nullptr && !generation->contains(r)) { + continue; + } juint verf_live = 0; if (r->is_humongous()) { @@ -773,6 +1024,9 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, } } + log_debug(gc)("Safepoint verification finished accumulation of liveness data"); + + log_info(gc)("Verify %s, Level " INTX_FORMAT " (" SIZE_FORMAT " reachable, " SIZE_FORMAT " marked)", label, ShenandoahVerifyLevel, count_reachable, count_marked); @@ -782,11 +1036,13 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, void ShenandoahVerifier::verify_generic(VerifyOption vo) { verify_at_safepoint( "Generic Verification", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_allow, // conservatively allow forwarded _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations _verify_cset_disable, // cset may be inconsistent _verify_liveness_disable, // no reliable liveness data _verify_regions_disable, // no reliable region data + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_disable // no data about gcstate ); } @@ -794,11 +1050,14 @@ void ShenandoahVerifier::verify_generic(VerifyOption vo) { void ShenandoahVerifier::verify_before_concmark() { verify_at_safepoint( "Before Mark", + _verify_remembered_before_marking, + // verify read-only remembered set from bottom() to top() _verify_forwarded_none, // UR should have fixed up _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations _verify_cset_none, // UR should have fixed this _verify_liveness_disable, // no reliable liveness data _verify_regions_notrash, // no trash regions + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_stable // there are no forwarded objects ); } @@ -806,11 +1065,14 @@ void ShenandoahVerifier::verify_before_concmark() { void ShenandoahVerifier::verify_after_concmark() { verify_at_safepoint( "After Mark", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_none, // no forwarded references - _verify_marked_complete_except_references, // bitmaps as precise as we can get, except dangling j.l.r.Refs + _verify_marked_complete_satb_empty, + // bitmaps as precise as we can get, except dangling j.l.r.Refs _verify_cset_none, // no references to cset anymore _verify_liveness_complete, // liveness data must be complete here _verify_regions_disable, // trash regions not yet recycled + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_stable_weakroots // heap is still stable, weakroots are in progress ); } @@ -818,11 +1080,14 @@ void ShenandoahVerifier::verify_after_concmark() { void ShenandoahVerifier::verify_before_evacuation() { verify_at_safepoint( "Before Evacuation", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_none, // no forwarded references _verify_marked_complete_except_references, // walk over marked objects too _verify_cset_disable, // non-forwarded references to cset expected _verify_liveness_complete, // liveness data must be complete here _verify_regions_disable, // trash regions not yet recycled + _verify_size_adjusted_for_padding, // expect generation and heap sizes to match after adjustments + // for promote in place padding _verify_gcstate_stable_weakroots // heap is still stable, weakroots are in progress ); } @@ -830,11 +1095,13 @@ void ShenandoahVerifier::verify_before_evacuation() { void ShenandoahVerifier::verify_during_evacuation() { verify_at_safepoint( "During Evacuation", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_allow, // some forwarded references are allowed _verify_marked_disable, // walk only roots _verify_cset_disable, // some cset references are not forwarded yet _verify_liveness_disable, // liveness data might be already stale after pre-evacs _verify_regions_disable, // trash regions not yet recycled + _verify_size_disable, // we don't know how much of promote-in-place work has been completed _verify_gcstate_evacuation // evacuation is in progress ); } @@ -842,11 +1109,13 @@ void ShenandoahVerifier::verify_during_evacuation() { void ShenandoahVerifier::verify_after_evacuation() { verify_at_safepoint( "After Evacuation", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_allow, // objects are still forwarded _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well _verify_cset_forwarded, // all cset refs are fully forwarded _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash, // trash regions have been recycled already + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_forwarded // evacuation produced some forwarded objects ); } @@ -854,23 +1123,28 @@ void ShenandoahVerifier::verify_after_evacuation() { void ShenandoahVerifier::verify_before_updaterefs() { verify_at_safepoint( "Before Updating References", + _verify_remembered_before_updating_references, // verify read-write remembered set _verify_forwarded_allow, // forwarded references allowed _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well _verify_cset_forwarded, // all cset refs are fully forwarded _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash, // trash regions have been recycled already - _verify_gcstate_forwarded // evacuation should have produced some forwarded objects + _verify_size_exact, // expect generation and heap sizes to match exactly + _verify_gcstate_updating // evacuation should have produced some forwarded objects ); } +// We have not yet cleanup (reclaimed) the collection set void ShenandoahVerifier::verify_after_updaterefs() { verify_at_safepoint( "After Updating References", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_none, // no forwarded references _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well _verify_cset_none, // no cset references, all updated _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_nocset, // no cset regions, trash regions have appeared + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_stable // update refs had cleaned up forwarded objects ); } @@ -878,11 +1152,13 @@ void ShenandoahVerifier::verify_after_updaterefs() { void ShenandoahVerifier::verify_after_degenerated() { verify_at_safepoint( "After Degenerated GC", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_none, // all objects are non-forwarded _verify_marked_complete, // all objects are marked in complete bitmap _verify_cset_none, // no cset references _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash_nocset, // no trash, no cset + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_stable // degenerated refs had cleaned up forwarded objects ); } @@ -890,11 +1166,13 @@ void ShenandoahVerifier::verify_after_degenerated() { void ShenandoahVerifier::verify_before_fullgc() { verify_at_safepoint( "Before Full GC", + _verify_remembered_disable, // do not verify remembered set _verify_forwarded_allow, // can have forwarded objects _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations _verify_cset_disable, // cset might be foobared _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_disable, // no reliable region data here + _verify_size_disable, // if we degenerate during evacuation, usage not valid: padding and deferred accounting _verify_gcstate_disable // no reliable gcstate data ); } @@ -902,16 +1180,19 @@ void ShenandoahVerifier::verify_before_fullgc() { void ShenandoahVerifier::verify_after_fullgc() { verify_at_safepoint( "After Full GC", + _verify_remembered_after_full_gc, // verify read-write remembered set _verify_forwarded_none, // all objects are non-forwarded _verify_marked_complete, // all objects are marked in complete bitmap _verify_cset_none, // no cset references _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash_nocset, // no trash, no cset + _verify_size_exact, // expect generation and heap sizes to match exactly _verify_gcstate_stable // full gc cleaned up everything ); } -class ShenandoahVerifyNoForwared : public OopClosure { +// TODO: Why this closure does not visit metadata? +class ShenandoahVerifyNoForwared : public BasicOopIterateClosure { private: template void do_oop_work(T* p) { @@ -931,7 +1212,8 @@ class ShenandoahVerifyNoForwared : public OopClosure { void do_oop(oop* p) { do_oop_work(p); } }; -class ShenandoahVerifyInToSpaceClosure : public OopClosure { +// TODO: Why this closure does not visit metadata? +class ShenandoahVerifyInToSpaceClosure : public BasicOopIterateClosure { private: template void do_oop_work(T* p) { @@ -940,7 +1222,7 @@ class ShenandoahVerifyInToSpaceClosure : public OopClosure { oop obj = CompressedOops::decode_not_null(o); ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->marking_context()->is_marked(obj)) { + if (!heap->marking_context()->is_marked_or_old(obj)) { ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr, "Verify Roots In To-Space", "Should be marked", __FILE__, __LINE__); } @@ -972,3 +1254,215 @@ void ShenandoahVerifier::verify_roots_no_forwarded() { ShenandoahVerifyNoForwared cl; ShenandoahRootVerifier::roots_do(&cl); } + +class ShenandoahVerifyRemSetClosure : public BasicOopIterateClosure { +protected: + bool const _init_mark; + ShenandoahHeap* const _heap; + RememberedScanner* const _scanner; + +public: + // Argument distinguishes between initial mark or start of update refs verification. + ShenandoahVerifyRemSetClosure(bool init_mark) : + _init_mark(init_mark), + _heap(ShenandoahHeap::heap()), + _scanner(_heap->card_scan()) {} + + template + inline void work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (_heap->is_in_young(obj)) { + size_t card_index = _scanner->card_index_for_addr((HeapWord*) p); + if (_init_mark && !_scanner->is_card_dirty(card_index)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr, + "Verify init-mark remembered set violation", "clean card should be dirty", __FILE__, __LINE__); + } else if (!_init_mark && !_scanner->is_write_card_dirty(card_index)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr, + "Verify init-update-refs remembered set violation", "clean card should be dirty", __FILE__, __LINE__); + } + } + } + } + + virtual void do_oop(narrowOop* p) { work(p); } + virtual void do_oop(oop* p) { work(p); } +}; + +void ShenandoahVerifier::help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, HeapWord* from, + HeapWord* top, HeapWord* registration_watermark, const char* message) { + RememberedScanner* scanner = _heap->card_scan(); + ShenandoahVerifyRemSetClosure check_interesting_pointers(false); + + HeapWord* obj_addr = from; + if (r->is_humongous_start()) { + oop obj = cast_to_oop(obj_addr); + if ((ctx == nullptr) || ctx->is_marked(obj)) { + size_t card_index = scanner->card_index_for_addr(obj_addr); + // For humongous objects, the typical object is an array, so the following checks may be overkill + // For regular objects (not object arrays), if the card holding the start of the object is dirty, + // we do not need to verify that cards spanning interesting pointers within this object are dirty. + if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) { + obj->oop_iterate(&check_interesting_pointers); + } + // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered + } + // else, this humongous object is not live so no need to verify its internal pointers + + if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message, + "object not properly registered", __FILE__, __LINE__); + } + } else if (!r->is_humongous()) { + while (obj_addr < top) { + oop obj = cast_to_oop(obj_addr); + // ctx->is_marked() returns true if mark bit set or if obj above TAMS. + if ((ctx == nullptr) || ctx->is_marked(obj)) { + size_t card_index = scanner->card_index_for_addr(obj_addr); + // For regular objects (not object arrays), if the card holding the start of the object is dirty, + // we do not need to verify that cards spanning interesting pointers within this object are dirty. + if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) { + obj->oop_iterate(&check_interesting_pointers); + } + // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered + + if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message, + "object not properly registered", __FILE__, __LINE__); + } + obj_addr += obj->size(); + } else { + // This object is not live so we don't verify dirty cards contained therein + HeapWord* tams = ctx->top_at_mark_start(r); + obj_addr = ctx->get_next_marked_addr(obj_addr, tams); + } + } + } +} + +// Assure that the remember set has a dirty card everywhere there is an interesting pointer. +// This examines the read_card_table between bottom() and top() since all PLABS are retired +// before the safepoint for init_mark. Actually, we retire them before update-references and don't +// restore them until the start of evacuation. +void ShenandoahVerifier::verify_rem_set_before_mark() { + shenandoah_assert_safepoint(); + assert(_heap->mode()->is_generational(), "Only verify remembered set for generational operational modes"); + + ShenandoahRegionIterator iterator; + RememberedScanner* scanner = _heap->card_scan(); + ShenandoahVerifyRemSetClosure check_interesting_pointers(true); + ShenandoahMarkingContext* ctx; + + log_debug(gc)("Verifying remembered set at %s mark", _heap->doing_mixed_evacuations()? "mixed": "young"); + + if (_heap->is_old_bitmap_stable() || _heap->active_generation()->is_global()) { + ctx = _heap->complete_marking_context(); + } else { + ctx = nullptr; + } + + while (iterator.has_next()) { + ShenandoahHeapRegion* r = iterator.next(); + if (r == nullptr) { + // TODO: Can this really happen? + break; + } + + HeapWord* tams = (ctx != nullptr) ? ctx->top_at_mark_start(r) : nullptr; + + // TODO: Is this replaceable with call to help_verify_region_rem_set? + + if (r->is_old() && r->is_active()) { + HeapWord* obj_addr = r->bottom(); + if (r->is_humongous_start()) { + oop obj = cast_to_oop(obj_addr); + if ((ctx == nullptr) || ctx->is_marked(obj)) { + // For humongous objects, the typical object is an array, so the following checks may be overkill + // For regular objects (not object arrays), if the card holding the start of the object is dirty, + // we do not need to verify that cards spanning interesting pointers within this object are dirty. + if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) { + obj->oop_iterate(&check_interesting_pointers); + } + // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered + } + // else, this humongous object is not marked so no need to verify its internal pointers + if (!scanner->verify_registration(obj_addr, ctx)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, nullptr, nullptr, + "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__); + } + } else if (!r->is_humongous()) { + HeapWord* top = r->top(); + while (obj_addr < top) { + oop obj = cast_to_oop(obj_addr); + // ctx->is_marked() returns true if mark bit set (TAMS not relevant during init mark) + if ((ctx == nullptr) || ctx->is_marked(obj)) { + // For regular objects (not object arrays), if the card holding the start of the object is dirty, + // we do not need to verify that cards spanning interesting pointers within this object are dirty. + if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) { + obj->oop_iterate(&check_interesting_pointers); + } + // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered + if (!scanner->verify_registration(obj_addr, ctx)) { + ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, nullptr, nullptr, + "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__); + } + obj_addr += obj->size(); + } else { + // This object is not live so we don't verify dirty cards contained therein + assert(tams != nullptr, "If object is not live, ctx and tams should be non-null"); + obj_addr = ctx->get_next_marked_addr(obj_addr, tams); + } + } + } // else, we ignore humongous continuation region + } // else, this is not an OLD region so we ignore it + } // all regions have been processed +} + +void ShenandoahVerifier::verify_rem_set_after_full_gc() { + shenandoah_assert_safepoint(); + assert(_heap->mode()->is_generational(), "Only verify remembered set for generational operational modes"); + + ShenandoahRegionIterator iterator; + + while (iterator.has_next()) { + ShenandoahHeapRegion* r = iterator.next(); + if (r == nullptr) { + // TODO: Can this really happen? + break; + } + if (r->is_old() && !r->is_cset()) { + help_verify_region_rem_set(r, nullptr, r->bottom(), r->top(), r->top(), "Remembered set violation at end of Full GC"); + } + } +} + +// Assure that the remember set has a dirty card everywhere there is an interesting pointer. Even though +// the update-references scan of remembered set only examines cards up to update_watermark, the remembered +// set should be valid through top. This examines the write_card_table between bottom() and top() because +// all PLABS are retired immediately before the start of update refs. +void ShenandoahVerifier::verify_rem_set_before_update_ref() { + shenandoah_assert_safepoint(); + assert(_heap->mode()->is_generational(), "Only verify remembered set for generational operational modes"); + + ShenandoahRegionIterator iterator; + ShenandoahMarkingContext* ctx; + + if (_heap->is_old_bitmap_stable() || _heap->active_generation()->is_global()) { + ctx = _heap->complete_marking_context(); + } else { + ctx = nullptr; + } + + while (iterator.has_next()) { + ShenandoahHeapRegion* r = iterator.next(); + if (r == nullptr) { + // TODO: Can this really happen? + break; + } + if (r->is_old() && !r->is_cset()) { + help_verify_region_rem_set(r, ctx, r->bottom(), r->top(), r->get_update_watermark(), + "Remembered set violation at init-update-references"); + } + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index 2bbe5ae68b2..6fbdd8515ed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +58,24 @@ class ShenandoahVerifier : public CHeapObj { ShenandoahHeap* _heap; MarkBitMap* _verification_bit_map; public: + typedef enum { + // Disable remembered set verification. + _verify_remembered_disable, + + // Old objects should be registered and RS cards within *read-only* RS are dirty for all + // inter-generational pointers. + _verify_remembered_before_marking, + + // Old objects should be registered and RS cards within *read-write* RS are dirty for all + // inter-generational pointers. + _verify_remembered_before_updating_references, + + // Old objects should be registered and RS cards within *read-write* RS are dirty for all + // inter-generational pointers. + // TODO: This differs from the previous mode by update-watermark() vs top() end range? + _verify_remembered_after_full_gc + } VerifyRememberedSet; + typedef enum { // Disable marked objects verification. _verify_marked_disable, @@ -69,7 +88,12 @@ class ShenandoahVerifier : public CHeapObj { // Objects should be marked in "complete" bitmap, except j.l.r.Reference referents, which // may be dangling after marking but before conc-weakrefs-processing. - _verify_marked_complete_except_references + _verify_marked_complete_except_references, + + // Objects should be marked in "complete" bitmap, except j.l.r.Reference referents, which + // may be dangling after marking but before conc-weakrefs-processing. All SATB buffers must + // be empty. + _verify_marked_complete_satb_empty, } VerifyMarked; typedef enum { @@ -122,6 +146,17 @@ class ShenandoahVerifier : public CHeapObj { _verify_regions_notrash_nocset } VerifyRegions; + typedef enum { + // Disable size verification + _verify_size_disable, + + // Enforce exact consistency + _verify_size_exact, + + // Expect promote-in-place adjustments: padding inserted to temporarily prevent further allocation in regular regions + _verify_size_adjusted_for_padding + } VerifySize; + typedef enum { // Disable gc-state verification _verify_gcstate_disable, @@ -136,7 +171,10 @@ class ShenandoahVerifier : public CHeapObj { _verify_gcstate_forwarded, // Evacuation is in progress, some objects are forwarded - _verify_gcstate_evacuation + _verify_gcstate_evacuation, + + // Evacuation is done, some objects are forwarded, updating is in progress + _verify_gcstate_updating } VerifyGCState; struct VerifyOptions { @@ -160,12 +198,14 @@ class ShenandoahVerifier : public CHeapObj { }; private: - void verify_at_safepoint(const char *label, + void verify_at_safepoint(const char* label, + VerifyRememberedSet remembered, VerifyForwarded forwarded, VerifyMarked marked, VerifyCollectionSet cset, VerifyLiveness liveness, VerifyRegions regions, + VerifySize sizeness, VerifyGCState gcstate); public: @@ -188,6 +228,14 @@ class ShenandoahVerifier : public CHeapObj { void verify_roots_in_to_space(); void verify_roots_no_forwarded(); + +private: + void help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, + HeapWord* from, HeapWord* top, HeapWord* update_watermark, const char* message); + + void verify_rem_set_before_mark(); + void verify_rem_set_before_update_ref(); + void verify_rem_set_after_full_gc(); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp index 5c06bdbf9b4..3ea4e17ca60 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp @@ -32,6 +32,7 @@ uint ShenandoahWorkerPolicy::_prev_par_marking = 0; uint ShenandoahWorkerPolicy::_prev_conc_marking = 0; +uint ShenandoahWorkerPolicy::_prev_conc_rs_scanning = 0; uint ShenandoahWorkerPolicy::_prev_conc_evac = 0; uint ShenandoahWorkerPolicy::_prev_conc_root_proc = 0; uint ShenandoahWorkerPolicy::_prev_conc_refs_proc = 0; @@ -61,6 +62,15 @@ uint ShenandoahWorkerPolicy::calc_workers_for_conc_marking() { return _prev_conc_marking; } +uint ShenandoahWorkerPolicy::calc_workers_for_rs_scanning() { + uint active_workers = (_prev_conc_rs_scanning == 0) ? ConcGCThreads : _prev_conc_rs_scanning; + _prev_conc_rs_scanning = + WorkerPolicy::calc_active_conc_workers(ConcGCThreads, + active_workers, + Threads::number_of_non_daemon_threads()); + return _prev_conc_rs_scanning; +} + // Reuse the calculation result from init marking uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() { return _prev_par_marking; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp index 3f47822f220..489be9723dd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp @@ -31,6 +31,7 @@ class ShenandoahWorkerPolicy : AllStatic { private: static uint _prev_par_marking; static uint _prev_conc_marking; + static uint _prev_conc_rs_scanning; static uint _prev_conc_root_proc; static uint _prev_conc_refs_proc; static uint _prev_conc_evac; @@ -48,6 +49,9 @@ class ShenandoahWorkerPolicy : AllStatic { // Calculate the number of workers for concurrent marking static uint calc_workers_for_conc_marking(); + // Calculate the number of workers for remembered set scanning + static uint calc_workers_for_rs_scanning(); + // Calculate the number of workers for final marking static uint calc_workers_for_final_marking(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp new file mode 100644 index 00000000000..9d0f664c4a6 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp @@ -0,0 +1,100 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahUtils.hpp" +#include "gc/shenandoah/shenandoahVerifier.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" +#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" + +ShenandoahYoungGeneration::ShenandoahYoungGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity) : + ShenandoahGeneration(YOUNG, max_queues, max_capacity, soft_max_capacity), + _old_gen_task_queues(nullptr) { +} + +void ShenandoahYoungGeneration::set_concurrent_mark_in_progress(bool in_progress) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + heap->set_concurrent_young_mark_in_progress(in_progress); + if (is_bootstrap_cycle() && in_progress && !heap->is_prepare_for_old_mark_in_progress()) { + // This is not a bug. When the bootstrapping marking phase is complete, + // the old generation marking is still in progress, unless it's not. + // In the case that old-gen preparation for mixed evacuation has been + // preempted, we do not want to set concurrent old mark to be in progress. + heap->set_concurrent_old_mark_in_progress(in_progress); + } +} + +bool ShenandoahYoungGeneration::contains(ShenandoahHeapRegion* region) const { + // TODO: why not test for equals YOUNG_GENERATION? As written, returns true for regions that are FREE + return !region->is_old(); +} + +void ShenandoahYoungGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + // Just iterate over the young generation here. + ShenandoahGenerationRegionClosure young_regions(cl); + ShenandoahHeap::heap()->parallel_heap_region_iterate(&young_regions); +} + +void ShenandoahYoungGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) { + ShenandoahGenerationRegionClosure young_regions(cl); + ShenandoahHeap::heap()->heap_region_iterate(&young_regions); +} + +bool ShenandoahYoungGeneration::is_concurrent_mark_in_progress() { + return ShenandoahHeap::heap()->is_concurrent_young_mark_in_progress(); +} + +void ShenandoahYoungGeneration::reserve_task_queues(uint workers) { + ShenandoahGeneration::reserve_task_queues(workers); + if (is_bootstrap_cycle()) { + _old_gen_task_queues->reserve(workers); + } +} + +bool ShenandoahYoungGeneration::contains(oop obj) const { + return ShenandoahHeap::heap()->is_in_young(obj); +} + +ShenandoahHeuristics* ShenandoahYoungGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { + _heuristics = new ShenandoahYoungHeuristics(this); + _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedYoungGCInterval); + confirm_heuristics_mode(); + return _heuristics; +} + +size_t ShenandoahYoungGeneration::available() const { + // The collector reserve may eat into what the mutator is allowed to use. Make sure we are looking + // at what is available to the mutator when reporting how much memory is available. + size_t available = this->ShenandoahGeneration::available(); + return MIN2(available, ShenandoahHeap::heap()->free_set()->available()); +} + +size_t ShenandoahYoungGeneration::soft_available() const { + size_t available = this->ShenandoahGeneration::soft_available(); + return MIN2(available, ShenandoahHeap::heap()->free_set()->available()); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp new file mode 100644 index 00000000000..e7846f6e864 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHYOUNGGENERATION_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHYOUNGGENERATION_HPP + +#include "gc/shenandoah/shenandoahGeneration.hpp" + +class ShenandoahYoungGeneration : public ShenandoahGeneration { +private: + ShenandoahObjToScanQueueSet* _old_gen_task_queues; + +public: + ShenandoahYoungGeneration(uint max_queues, size_t max_capacity, size_t max_soft_capacity); + + virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override; + + const char* name() const override { + return "YOUNG"; + } + + void set_concurrent_mark_in_progress(bool in_progress) override; + bool is_concurrent_mark_in_progress() override; + + void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; + + bool contains(ShenandoahHeapRegion* region) const override; + bool contains(oop obj) const override; + + void reserve_task_queues(uint workers) override; + void set_old_gen_task_queues(ShenandoahObjToScanQueueSet* old_gen_queues) { + _old_gen_task_queues = old_gen_queues; + } + ShenandoahObjToScanQueueSet* old_gen_task_queues() const override { + return _old_gen_task_queues; + } + + // Returns true if the young generation is configured to enqueue old + // oops for the old generation mark queues. + bool is_bootstrap_cycle() { + return _old_gen_task_queues != nullptr; + } + + size_t available() const override; + + // Do not override available_with_reserve() because that needs to see memory reserved for Collector + + size_t soft_available() const override; +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHYOUNGGENERATION_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 14212d48b09..cd5a2dc6a11 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -1,6 +1,7 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +35,86 @@ range, \ constraint) \ \ + product(uintx, ShenandoahGenerationalHumongousReserve, 0, EXPERIMENTAL, \ + "(Generational mode only) What percent of the heap should be " \ + "reserved for humongous objects if possible. Old-generation " \ + "collections will endeavor to evacuate old-gen regions within " \ + "this reserved area even if these regions do not contain high " \ + "percentage of garbage. Setting a larger value will cause " \ + "more frequent old-gen collections. A smaller value will " \ + "increase the likelihood that humongous object allocations " \ + "fail, resulting in stop-the-world full GCs.") \ + range(0,100) \ + \ + product(double, ShenandoahMinOldGenGrowthPercent, 12.5, EXPERIMENTAL, \ + "(Generational mode only) If the usage within old generation " \ + "has grown by at least this percent of its live memory size " \ + "at completion of the most recent old-generation marking " \ + "effort, heuristics may trigger the start of a new old-gen " \ + "collection.") \ + range(0.0,100.0) \ + \ + product(uintx, ShenandoahIgnoreOldGrowthBelowPercentage,10, EXPERIMENTAL, \ + "(Generational mode only) If the total usage of the old " \ + "generation is smaller than this percent, we do not trigger " \ + "old gen collections even if old has grown, except when " \ + "ShenandoahGenerationalDoNotIgnoreGrowthAfterYoungCycles " \ + "consecutive cycles have been completed following the " \ + "preceding old-gen collection.") \ + range(0,100) \ + \ + product(uintx, ShenandoahDoNotIgnoreGrowthAfterYoungCycles, \ + 50, EXPERIMENTAL, \ + "(Generational mode only) Even if the usage of old generation " \ + "is below ShenandoahIgnoreOldGrowthBelowPercentage, " \ + "trigger an old-generation mark if old has grown and this " \ + "many consecutive young-gen collections have been " \ + "completed following the preceding old-gen collection.") \ + \ + product(bool, ShenandoahGenerationalCensusAtEvac, false, EXPERIMENTAL, \ + "(Generational mode only) Object age census at evacuation, " \ + "rather than during marking.") \ + \ + product(bool, ShenandoahGenerationalAdaptiveTenuring, true, EXPERIMENTAL, \ + "(Generational mode only) Dynamically adapt tenuring age.") \ + \ + product(bool, ShenandoahGenerationalCensusIgnoreOlderCohorts, true, \ + EXPERIMENTAL,\ + "(Generational mode only) Ignore mortality rates older than the " \ + "oldest cohort under the tenuring age for the last cycle." ) \ + \ + product(uintx, ShenandoahGenerationalMinTenuringAge, 1, EXPERIMENTAL, \ + "(Generational mode only) Floor for adaptive tenuring age. " \ + "Setting floor and ceiling to the same value fixes the tenuring " \ + "age; setting both to 1 simulates a poor approximation to " \ + "AlwaysTenure, and setting both to 16 simulates NeverTenure.") \ + range(1,16) \ + \ + product(uintx, ShenandoahGenerationalMaxTenuringAge, 15, EXPERIMENTAL, \ + "(Generational mode only) Ceiling for adaptive tenuring age. " \ + "Setting floor and ceiling to the same value fixes the tenuring " \ + "age; setting both to 1 simulates a poor approximation to " \ + "AlwaysTenure, and setting both to 16 simulates NeverTenure.") \ + range(1,16) \ + \ + product(double, ShenandoahGenerationalTenuringMortalityRateThreshold, \ + 0.1, EXPERIMENTAL, \ + "(Generational mode only) Cohort mortality rates below this " \ + "value will be treated as indicative of longevity, leading to " \ + "tenuring. A lower value delays tenuring, a higher value hastens "\ + "it. Used only when ShenandoahGenerationalhenAdaptiveTenuring is "\ + "enabled.") \ + range(0.001,0.999) \ + \ + product(size_t, ShenandoahGenerationalTenuringCohortPopulationThreshold, \ + 4*K, EXPERIMENTAL, \ + "(Generational mode only) Cohorts whose population is lower than "\ + "this value in the previous census are ignored wrt tenuring " \ + "decisions. Effectively this makes then tenurable as soon as all "\ + "older cohorts are. Set this value to the largest cohort " \ + "population volume that you are comfortable ignoring when making "\ + "tenuring decisions.") \ + \ product(size_t, ShenandoahRegionSize, 0, EXPERIMENTAL, \ "Static heap region size. Set zero to enable automatic sizing.") \ \ @@ -62,7 +143,8 @@ "barriers are in in use. Possible values are:" \ " satb - snapshot-at-the-beginning concurrent GC (three pass mark-evac-update);" \ " iu - incremental-update concurrent GC (three pass mark-evac-update);" \ - " passive - stop the world GC only (either degenerated or full)") \ + " passive - stop the world GC only (either degenerated or full);" \ + " generational - generational concurrent GC") \ \ product(ccstr, ShenandoahGCHeuristics, "adaptive", \ "GC heuristics to use. This fine-tunes the GC mode selected, " \ @@ -76,6 +158,16 @@ " compact - run GC more frequently and with deeper targets to " \ "free up more memory.") \ \ + product(uintx, ShenandoahExpeditePromotionsThreshold, 5, EXPERIMENTAL, \ + "When Shenandoah expects to promote at least this percentage " \ + "of the young generation, trigger a young collection to " \ + "expedite these promotions.") \ + range(0,100) \ + \ + product(uintx, ShenandoahExpediteMixedThreshold, 10, EXPERIMENTAL, \ + "When there are this many old regions waiting to be collected, " \ + "trigger a mixed collection immediately.") \ + \ product(uintx, ShenandoahGarbageThreshold, 25, EXPERIMENTAL, \ "How much garbage a region has to contain before it would be " \ "taken for collection. This a guideline only, as GC heuristics " \ @@ -84,17 +176,35 @@ "collector accepts. In percents of heap region size.") \ range(0,100) \ \ + product(uintx, ShenandoahOldGarbageThreshold, 15, EXPERIMENTAL, \ + "How much garbage an old region has to contain before it would " \ + "be taken for collection.") \ + range(0,100) \ + \ + product(uintx, ShenandoahIgnoreGarbageThreshold, 5, EXPERIMENTAL, \ + "When less than this amount of garbage (as a percentage of " \ + "region size) exists within a region, the region will not be " \ + "added to the collection set, even when the heuristic has " \ + "chosen to aggressively add regions with less than " \ + "ShenandoahGarbageThreshold amount of garbage into the " \ + "collection set.") \ + range(0,100) \ + \ product(uintx, ShenandoahInitFreeThreshold, 70, EXPERIMENTAL, \ - "How much heap should be free before some heuristics trigger the "\ - "initial (learning) cycles. Affects cycle frequency on startup " \ - "and after drastic state changes, e.g. after degenerated/full " \ - "GC cycles. In percents of (soft) max heap size.") \ + "When less than this amount of memory is free within the" \ + "heap or generation, trigger a learning cycle if we are " \ + "in learning mode. Learning mode happens during initialization " \ + "and following a drastic state change, such as following a " \ + "degenerated or Full GC cycle. In percents of soft max " \ + "heap size.") \ range(0,100) \ \ product(uintx, ShenandoahMinFreeThreshold, 10, EXPERIMENTAL, \ - "How much heap should be free before most heuristics trigger the "\ - "collection, even without other triggers. Provides the safety " \ - "margin for many heuristics. In percents of (soft) max heap size.")\ + "Percentage of free heap memory (or young generation, in " \ + "generational mode) below which most heuristics trigger " \ + "collection independent of other triggers. Provides a safety " \ + "margin for many heuristics. In percents of (soft) max heap " \ + "size.") \ range(0,100) \ \ product(uintx, ShenandoahAllocationThreshold, 0, EXPERIMENTAL, \ @@ -110,12 +220,12 @@ "cases. In percents of (soft) max heap size.") \ range(0,100) \ \ - product(uintx, ShenandoahLearningSteps, 5, EXPERIMENTAL, \ + product(uintx, ShenandoahLearningSteps, 10, EXPERIMENTAL, \ "The number of cycles some heuristics take to collect in order " \ "to learn application and GC performance.") \ range(0,100) \ \ - product(uintx, ShenandoahImmediateThreshold, 90, EXPERIMENTAL, \ + product(uintx, ShenandoahImmediateThreshold, 70, EXPERIMENTAL, \ "The cycle may shortcut when enough garbage can be reclaimed " \ "from the immediate garbage (completely garbage regions). " \ "In percents of total garbage found. Setting this threshold " \ @@ -144,12 +254,22 @@ "the heuristic is to allocation spikes. Decreasing this number " \ "increases the sensitivity. ") \ \ - product(double, ShenandoahAdaptiveDecayFactor, 0.5, EXPERIMENTAL, \ + product(double, ShenandoahAdaptiveDecayFactor, 0.1, EXPERIMENTAL, \ "The decay factor (alpha) used for values in the weighted " \ "moving average of cycle time and allocation rate. " \ "Larger values give more weight to recent values.") \ range(0,1.0) \ \ + product(bool, ShenandoahAdaptiveIgnoreShortCycles, true, EXPERIMENTAL, \ + "The adaptive heuristic tracks a moving average of cycle " \ + "times in order to start a gc before memory is exhausted. " \ + "In some cases, Shenandoah may skip the evacuation and update " \ + "reference phases, resulting in a shorter cycle. These may skew " \ + "the average cycle time downward and may cause the heuristic " \ + "to wait too long to start a cycle. Disabling this will have " \ + "the gc run less often, which will reduce CPU utilization, but" \ + "increase the risk of degenerated cycles.") \ + \ product(uintx, ShenandoahGuaranteedGCInterval, 5*60*1000, EXPERIMENTAL, \ "Many heuristics would guarantee a concurrent GC cycle at " \ "least with this interval. This is useful when large idle " \ @@ -157,6 +277,16 @@ "time from active application. Time is in milliseconds. " \ "Setting this to 0 disables the feature.") \ \ + product(uintx, ShenandoahGuaranteedOldGCInterval, 10*60*1000, EXPERIMENTAL, \ + "Run a collection of the old generation at least this often. " \ + "Heuristics may trigger collections more frequently. Time is in " \ + "milliseconds. Setting this to 0 disables the feature.") \ + \ + product(uintx, ShenandoahGuaranteedYoungGCInterval, 5*60*1000, EXPERIMENTAL, \ + "Run a collection of the young generation at least this often. " \ + "Heuristics may trigger collections more frequently. Time is in " \ + "milliseconds. Setting this to 0 disables the feature.") \ + \ product(bool, ShenandoahAlwaysClearSoftRefs, false, EXPERIMENTAL, \ "Unconditionally clear soft references, instead of using any " \ "other cleanup policy. This minimizes footprint at expense of" \ @@ -216,25 +346,105 @@ " 4 = previous level, plus all marked objects") \ \ product(uintx, ShenandoahEvacReserve, 5, EXPERIMENTAL, \ - "How much of heap to reserve for evacuations. Larger values make "\ - "GC evacuate more live objects on every cycle, while leaving " \ - "less headroom for application to allocate in. In percents of " \ - "total heap size.") \ + "How much of (young-generation) heap to reserve for " \ + "(young-generation) evacuations. Larger values allow GC to " \ + "evacuate more live objects on every cycle, while leaving " \ + "less headroom for application to allocate while GC is " \ + "evacuating and updating references. This parameter is " \ + "consulted at the end of marking, before selecting the " \ + "collection set. If available memory at this time is smaller " \ + "than the indicated reserve, the bound on collection set size is "\ + "adjusted downward. The size of a generational mixed " \ + "evacuation collection set (comprised of both young and old " \ + "regions) is also bounded by this parameter. In percents of " \ + "total (young-generation) heap size.") \ range(1,100) \ \ product(double, ShenandoahEvacWaste, 1.2, EXPERIMENTAL, \ "How much waste evacuations produce within the reserved space. " \ "Larger values make evacuations more resilient against " \ "evacuation conflicts, at expense of evacuating less on each " \ - "GC cycle.") \ + "GC cycle. Smaller values increase the risk of evacuation " \ + "failures, which will trigger stop-the-world Full GC passes.") \ range(1.0,100.0) \ \ + product(double, ShenandoahOldEvacWaste, 1.4, EXPERIMENTAL, \ + "How much waste evacuations produce within the reserved space. " \ + "Larger values make evacuations more resilient against " \ + "evacuation conflicts, at expense of evacuating less on each " \ + "GC cycle. Smaller values increase the risk of evacuation " \ + "failures, which will trigger stop-the-world Full GC passes.") \ + range(1.0,100.0) \ + \ + product(double, ShenandoahPromoEvacWaste, 1.2, EXPERIMENTAL, \ + "How much waste promotions produce within the reserved space. " \ + "Larger values make evacuations more resilient against " \ + "evacuation conflicts, at expense of promoting less on each " \ + "GC cycle. Smaller values increase the risk of evacuation " \ + "failures, which will trigger stop-the-world Full GC passes.") \ + range(1.0,100.0) \ + \ + product(uintx, ShenandoahMaxEvacLABRatio, 0, EXPERIMENTAL, \ + "Potentially, each running thread maintains a PLAB for " \ + "evacuating objects into old-gen memory and a GCLAB for " \ + "evacuating objects into young-gen memory. Each time a thread " \ + "exhausts its PLAB or GCLAB, a new local buffer is allocated. " \ + "By default, the new buffer is twice the size of the previous " \ + "buffer. The sizes are reset to the minimum at the start of " \ + "each GC pass. This parameter limits the growth of evacuation " \ + "buffer sizes to its value multiplied by the minimum buffer " \ + "size. A higher value allows evacuation allocations to be more " \ + "efficient because less synchronization is required by " \ + "individual threads. However, a larger value increases the " \ + "likelihood of evacuation failures, leading to long " \ + "stop-the-world pauses. This is because a large value " \ + "allows individual threads to consume large percentages of " \ + "the total evacuation budget without necessarily effectively " \ + "filling their local evacuation buffers with evacuated " \ + "objects. A value of zero means no maximum size is enforced.") \ + range(0, 1024) \ + \ product(bool, ShenandoahEvacReserveOverflow, true, EXPERIMENTAL, \ "Allow evacuations to overflow the reserved space. Enabling it " \ "will make evacuations more resilient when evacuation " \ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ + product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ + "The maximum proportion of evacuation from old-gen memory, " \ + "expressed as a percentage. The default value 75 denotes that no" \ + "more than 75% of the collection set evacuation workload may be " \ + "towards evacuation of old-gen heap regions. This limits both the"\ + "promotion of aged regions and the compaction of existing old " \ + "regions. A value of 75 denotes that the total evacuation work" \ + "may increase to up to four times the young gen evacuation work." \ + "A larger value allows quicker promotion and allows" \ + "a smaller number of mixed evacuations to process " \ + "the entire list of old-gen collection candidates at the cost " \ + "of an increased disruption of the normal cadence of young-gen " \ + "collections. A value of 100 allows a mixed evacuation to " \ + "focus entirely on old-gen memory, allowing no young-gen " \ + "regions to be collected, likely resulting in subsequent " \ + "allocation failures because the allocation pool is not " \ + "replenished. A value of 0 allows a mixed evacuation to" \ + "focus entirely on young-gen memory, allowing no old-gen " \ + "regions to be collected, likely resulting in subsequent " \ + "promotion failures and triggering of stop-the-world full GC " \ + "events.") \ + range(0,100) \ + \ + product(uintx, ShenandoahMinYoungPercentage, 20, EXPERIMENTAL, \ + "The minimum percentage of the heap to use for the young " \ + "generation. Heuristics will not adjust the young generation " \ + "to be less than this.") \ + range(0, 100) \ + \ + product(uintx, ShenandoahMaxYoungPercentage, 100, EXPERIMENTAL, \ + "The maximum percentage of the heap to use for the young " \ + "generation. Heuristics will not adjust the young generation " \ + "to be more than this.") \ + range(0, 100) \ + \ product(bool, ShenandoahPacing, true, EXPERIMENTAL, \ "Pace application allocations to give GC chance to start " \ "and complete before allocation failure is reached.") \ @@ -286,6 +496,10 @@ "How many back-to-back Degenerated GCs should happen before " \ "going to a Full GC.") \ \ + product(uintx, ShenandoahOOMGCRetries, 3, EXPERIMENTAL, \ + "How many GCs should happen before we throw OutOfMemoryException "\ + "for allocation request, including at least one Full GC.") \ + \ product(bool, ShenandoahImplicitGCInvokesConcurrent, false, EXPERIMENTAL, \ "Should internally-caused GC requests invoke concurrent cycles, " \ "should they do the stop-the-world (Degenerated / Full GC)? " \ @@ -304,6 +518,15 @@ product(bool, ShenandoahAllocFailureALot, false, DIAGNOSTIC, \ "Testing: make lots of artificial allocation failures.") \ \ + product(uintx, ShenandoahCoalesceChance, 0, DIAGNOSTIC, \ + "Testing: Abandon remaining mixed collections with this " \ + "likelihood. Following each mixed collection, abandon all " \ + "remaining mixed collection candidate regions with likelihood " \ + "ShenandoahCoalesceChance. Abandoning a mixed collection will " \ + "cause the old regions to be made parsable, rather than being " \ + "evacuated.") \ + range(0, 100) \ + \ product(intx, ShenandoahMarkScanPrefetch, 32, EXPERIMENTAL, \ "How many objects to prefetch ahead when traversing mark bitmaps."\ "Set to 0 to disable prefetching.") \ @@ -332,6 +555,10 @@ product(bool, ShenandoahIUBarrier, false, DIAGNOSTIC, \ "Turn on/off I-U barriers barriers in Shenandoah") \ \ + product(bool, ShenandoahCardBarrier, false, DIAGNOSTIC, \ + "Turn on/off card-marking post-write barrier in Shenandoah: " \ + " true when ShenandoahGCMode is generational, false otherwise") \ + \ product(bool, ShenandoahCASBarrier, true, DIAGNOSTIC, \ "Turn on/off CAS barriers in Shenandoah") \ \ @@ -347,7 +574,34 @@ develop(bool, ShenandoahVerifyOptoBarriers, trueInDebug, \ "Verify no missing barriers in C2.") \ \ - -// end of GC_SHENANDOAH_FLAGS + product(uintx, ShenandoahOldCompactionReserve, 8, EXPERIMENTAL, \ + "During generational GC, prevent promotions from filling " \ + "this number of heap regions. These regions are reserved " \ + "for the purpose of supporting compaction of old-gen " \ + "memory. Otherwise, old-gen memory cannot be compacted.") \ + range(0, 128) \ + \ + product(bool, ShenandoahAllowOldMarkingPreemption, true, DIAGNOSTIC, \ + "Allow young generation collections to suspend concurrent" \ + " marking in the old generation.") \ + \ + product(uintx, ShenandoahAgingCyclePeriod, 1, EXPERIMENTAL, \ + "With generational mode, increment the age of objects and" \ + "regions each time this many young-gen GC cycles are completed.") \ + \ + notproduct(bool, ShenandoahEnableCardStats, false, \ + "Enable statistics collection related to clean & dirty cards") \ + \ + notproduct(int, ShenandoahCardStatsLogInterval, 50, \ + "Log cumulative card stats every so many remembered set or " \ + "update refs scans") \ + \ + product(uintx, ShenandoahMinimumOldMarkTimeMs, 100, EXPERIMENTAL, \ + "Minimum amount of time in milliseconds to run old marking " \ + "before a young collection is allowed to run. This is intended " \ + "to prevent starvation of the old collector. Setting this to " \ + "0 will allow back to back young collections to run during old " \ + "marking.") \ + // end of GC_SHENANDOAH_FLAGS #endif // SHARE_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP diff --git a/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp b/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp index 9bc3af5ba9e..376df7f7a0f 100644 --- a/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp +++ b/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp @@ -25,6 +25,8 @@ #define SHARE_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP #include "gc/shenandoah/shenandoahHeap.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" @@ -32,8 +34,9 @@ nonstatic_field(ShenandoahHeap, _num_regions, size_t) \ nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \ nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \ - volatile_nonstatic_field(ShenandoahHeap, _used, size_t) \ + nonstatic_field(ShenandoahHeap, _global_generation, ShenandoahGeneration*) \ volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \ + volatile_nonstatic_field(ShenandoahGeneration, _used, size_t) \ static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \ static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \ nonstatic_field(ShenandoahHeapRegion, _state, ShenandoahHeapRegion::RegionState) \ @@ -58,9 +61,12 @@ declare_toplevel_type, \ declare_integer_type) \ declare_type(ShenandoahHeap, CollectedHeap) \ + declare_type(ShenandoahGenerationalHeap, ShenandoahHeap) \ declare_toplevel_type(ShenandoahHeapRegion) \ declare_toplevel_type(ShenandoahHeap*) \ declare_toplevel_type(ShenandoahHeapRegion*) \ declare_toplevel_type(ShenandoahHeapRegion::RegionState) \ + declare_toplevel_type(ShenandoahGeneration) \ + declare_toplevel_type(ShenandoahGeneration*) \ #endif // SHARE_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGeneration.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGeneration.java new file mode 100644 index 00000000000..bcd59523ae0 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGeneration.java @@ -0,0 +1,59 @@ +/* + * * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.shenandoah; + +import sun.jvm.hotspot.utilities.Observable; +import sun.jvm.hotspot.utilities.Observer; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.runtime.VMObject; +import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; + +public class ShenandoahGeneration extends VMObject { + private static CIntegerField used; + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("ShenandoahGeneration"); + used = type.getCIntegerField("_used"); + } + + public ShenandoahGeneration(Address addr) { + super(addr); + } + + public long used() { + return used.getValue(addr); + } +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGenerationalHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGenerationalHeap.java new file mode 100644 index 00000000000..4b24c4ecedd --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahGenerationalHeap.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.shenandoah; + +import sun.jvm.hotspot.debugger.Address; + +public class ShenandoahGenerationalHeap extends ShenandoahHeap { + public ShenandoahGenerationalHeap(Address addr) { + super(addr); + } +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java index ca12562ac3e..3109fe22102 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java @@ -43,7 +43,7 @@ public class ShenandoahHeap extends CollectedHeap { private static CIntegerField numRegions; - private static CIntegerField used; + private static AddressField globalGeneration; private static CIntegerField committed; private static AddressField regions; private static CIntegerField logMinObjAlignmentInBytes; @@ -60,7 +60,7 @@ public void update(Observable o, Object data) { private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("ShenandoahHeap"); numRegions = type.getCIntegerField("_num_regions"); - used = type.getCIntegerField("_used"); + globalGeneration = type.getAddressField("_global_generation"); committed = type.getCIntegerField("_committed"); regions = type.getAddressField("_regions"); logMinObjAlignmentInBytes = type.getCIntegerField("_log_min_obj_alignment_in_bytes"); @@ -89,7 +89,9 @@ public long capacity() { @Override public long used() { - return used.getValue(addr); + Address globalGenerationAddress = globalGeneration.getValue(addr); + ShenandoahGeneration global = VMObjectFactory.newObject(ShenandoahGeneration.class, globalGenerationAddress); + return global.used(); } public long committed() { diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahNumberSeq.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahNumberSeq.cpp index 17a4ed642a5..ce0ddf434ac 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahNumberSeq.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahNumberSeq.cpp @@ -1,6 +1,6 @@ /* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,45 +34,130 @@ class ShenandoahNumberSeqTest: public ::testing::Test { protected: - HdrSeq seq; + const double err = 0.5; + + HdrSeq seq1; + HdrSeq seq2; + HdrSeq seq3; + + void print() { + if (seq1.num() > 0) { + print(seq1, "seq1"); + } + if (seq2.num() > 0) { + print(seq2, "seq2"); + } + if (seq3.num() > 0) { + print(seq3, "seq3"); + } + } + + void print(HdrSeq& seq, const char* msg) { + std::cout << "["; + for (int i = 0; i <= 100; i += 10) { + std::cout << "\t" << seq.percentile(i); + } + std::cout << " ] : " << msg << "\n"; + } }; class BasicShenandoahNumberSeqTest: public ShenandoahNumberSeqTest { - protected: - const double err = 0.5; + public: BasicShenandoahNumberSeqTest() { - seq.add(0); - seq.add(1); - seq.add(10); + seq1.add(0); + seq1.add(1); + seq1.add(10); for (int i = 0; i < 7; i++) { - seq.add(100); + seq1.add(100); + } + ShenandoahNumberSeqTest::print(); + } +}; + +class ShenandoahNumberSeqMergeTest: public ShenandoahNumberSeqTest { + public: + ShenandoahNumberSeqMergeTest() { + for (int i = 0; i < 80; i++) { + seq1.add(1); + seq3.add(1); } - std::cout << " p0 = " << seq.percentile(0); - std::cout << " p10 = " << seq.percentile(10); - std::cout << " p20 = " << seq.percentile(20); - std::cout << " p30 = " << seq.percentile(30); - std::cout << " p50 = " << seq.percentile(50); - std::cout << " p80 = " << seq.percentile(80); - std::cout << " p90 = " << seq.percentile(90); - std::cout << " p100 = " << seq.percentile(100); + + for (int i = 0; i < 20; i++) { + seq2.add(100); + seq3.add(100); + } + ShenandoahNumberSeqTest::print(); } }; TEST_VM_F(BasicShenandoahNumberSeqTest, maximum_test) { - EXPECT_EQ(seq.maximum(), 100); + EXPECT_EQ(seq1.maximum(), 100); } TEST_VM_F(BasicShenandoahNumberSeqTest, minimum_test) { - EXPECT_EQ(0, seq.percentile(0)); + EXPECT_EQ(0, seq1.percentile(0)); } TEST_VM_F(BasicShenandoahNumberSeqTest, percentile_test) { - EXPECT_NEAR(0, seq.percentile(10), err); - EXPECT_NEAR(1, seq.percentile(20), err); - EXPECT_NEAR(10, seq.percentile(30), err); - EXPECT_NEAR(100, seq.percentile(40), err); - EXPECT_NEAR(100, seq.percentile(50), err); - EXPECT_NEAR(100, seq.percentile(75), err); - EXPECT_NEAR(100, seq.percentile(90), err); - EXPECT_NEAR(100, seq.percentile(100), err); + EXPECT_NEAR(0, seq1.percentile(10), err); + EXPECT_NEAR(1, seq1.percentile(20), err); + EXPECT_NEAR(10, seq1.percentile(30), err); + EXPECT_NEAR(100, seq1.percentile(40), err); + EXPECT_NEAR(100, seq1.percentile(50), err); + EXPECT_NEAR(100, seq1.percentile(75), err); + EXPECT_NEAR(100, seq1.percentile(90), err); + EXPECT_NEAR(100, seq1.percentile(100), err); +} + +TEST_VM_F(BasicShenandoahNumberSeqTest, clear_test) { + HdrSeq test; + test.add(1); + + EXPECT_NE(test.num(), 0); + EXPECT_NE(test.sum(), 0); + EXPECT_NE(test.maximum(), 0); + EXPECT_NE(test.avg(), 0); + EXPECT_EQ(test.sd(), 0); + EXPECT_NE(test.davg(), 0); + EXPECT_EQ(test.dvariance(), 0); + for (int i = 0; i <= 100; i += 10) { + EXPECT_NE(test.percentile(i), 0); + } + + test.clear(); + + EXPECT_EQ(test.num(), 0); + EXPECT_EQ(test.sum(), 0); + EXPECT_EQ(test.maximum(), 0); + EXPECT_EQ(test.avg(), 0); + EXPECT_EQ(test.sd(), 0); + EXPECT_EQ(test.davg(), 0); + EXPECT_EQ(test.dvariance(), 0); + for (int i = 0; i <= 100; i += 10) { + EXPECT_EQ(test.percentile(i), 0); + } +} + +TEST_VM_F(ShenandoahNumberSeqMergeTest, merge_test) { + EXPECT_EQ(seq1.num(), 80); + EXPECT_EQ(seq2.num(), 20); + EXPECT_EQ(seq3.num(), 100); + + HdrSeq merged; + merged.add(seq1); + merged.add(seq2); + + EXPECT_EQ(merged.num(), seq3.num()); + + EXPECT_EQ(merged.maximum(), seq3.maximum()); + EXPECT_EQ(merged.percentile(0), seq3.percentile(0)); + for (int i = 0; i <= 100; i += 10) { + EXPECT_NEAR(merged.percentile(i), seq3.percentile(i), err); + } + EXPECT_NEAR(merged.avg(), seq3.avg(), err); + EXPECT_NEAR(merged.sd(), seq3.sd(), err); + + // These are not implemented + EXPECT_TRUE(isnan(merged.davg())); + EXPECT_TRUE(isnan(merged.dvariance())); } diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp new file mode 100644 index 00000000000..7db9a3f9adb --- /dev/null +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp @@ -0,0 +1,363 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "unittest.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" +#include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" +#include + +// These tests will all be skipped (unless Shenandoah becomes the default +// collector). To execute these tests, you must enable Shenandoah, which +// is done with: +// +// % make exploded-test TEST="gtest:ShenandoahOld*" CONF=release TEST_OPTS="JAVA_OPTIONS=-XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=generational" +// +// Please note that these 'unit' tests are really integration tests and rely +// on the JVM being initialized. These tests manipulate the state of the +// collector in ways that are not compatible with a normal collection run. +// If these tests take longer than the minimum time between gc intervals - +// or, more likely, if you have them paused in a debugger longer than this +// interval - you can expect trouble. These tests will also not run in a build +// with asserts enabled because they use APIs that expect to run on a safepoint. +#ifdef ASSERT +#define SKIP_IF_NOT_SHENANDOAH() \ + tty->print_cr("skipped (debug build)" ); \ + return; +#else +#define SKIP_IF_NOT_SHENANDOAH() \ + if (!UseShenandoahGC) { \ + tty->print_cr("skipped"); \ + return; \ + } +#endif + +class ShenandoahResetRegions : public ShenandoahHeapRegionClosure { + public: + virtual void heap_region_do(ShenandoahHeapRegion* region) override { + if (!region->is_empty()) { + region->make_trash(); + region->make_empty(); + } + region->set_affiliation(FREE); + region->clear_live_data(); + region->set_top(region->bottom()); + } +}; + +class ShenandoahOldHeuristicTest : public ::testing::Test { + protected: + ShenandoahHeap* _heap; + ShenandoahOldHeuristics* _heuristics; + ShenandoahCollectionSet* _collection_set; + + ShenandoahOldHeuristicTest() + : _heap(nullptr), + _heuristics(nullptr), + _collection_set(nullptr) { + SKIP_IF_NOT_SHENANDOAH(); + _heap = ShenandoahHeap::heap(); + _heuristics = _heap->old_heuristics(); + _collection_set = _heap->collection_set(); + ShenandoahHeapLocker locker(_heap->lock()); + ShenandoahResetRegions reset; + _heap->heap_region_iterate(&reset); + _heap->set_old_evac_reserve(_heap->old_generation()->soft_max_capacity() / 4); + _heuristics->abandon_collection_candidates(); + _collection_set->clear(); + } + + ShenandoahOldGeneration::State old_generation_state() { + return _heap->old_generation()->state(); + } + + size_t make_garbage(size_t region_idx, size_t garbage_bytes) { + ShenandoahHeapLocker locker(_heap->lock()); + ShenandoahHeapRegion* region = _heap->get_region(region_idx); + region->set_affiliation(OLD_GENERATION); + region->make_regular_allocation(OLD_GENERATION); + size_t live_bytes = ShenandoahHeapRegion::region_size_bytes() - garbage_bytes; + region->increase_live_data_alloc_words(live_bytes / HeapWordSize); + region->set_top(region->end()); + return region->garbage(); + } + + size_t create_too_much_garbage_for_one_mixed_evacuation() { + size_t garbage_target = _heap->old_generation()->soft_max_capacity() / 2; + size_t garbage_total = 0; + size_t region_idx = 0; + while (garbage_total < garbage_target && region_idx < _heap->num_regions()) { + garbage_total += make_garbage_above_collection_threshold(region_idx++); + } + return garbage_total; + } + + void make_pinned(size_t region_idx) { + ShenandoahHeapLocker locker(_heap->lock()); + ShenandoahHeapRegion* region = _heap->get_region(region_idx); + region->record_pin(); + region->make_pinned(); + } + + void make_unpinned(size_t region_idx) { + ShenandoahHeapLocker locker(_heap->lock()); + ShenandoahHeapRegion* region = _heap->get_region(region_idx); + region->record_unpin(); + region->make_unpinned(); + } + + size_t make_garbage_below_collection_threshold(size_t region_idx) { + return make_garbage(region_idx, collection_threshold() - 100); + } + + size_t make_garbage_above_collection_threshold(size_t region_idx) { + return make_garbage(region_idx, collection_threshold() + 100); + } + + size_t collection_threshold() const { + return ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold / 100; + } + + bool collection_set_is(size_t r1) { return _collection_set_is(1, r1); } + bool collection_set_is(size_t r1, size_t r2) { return _collection_set_is(2, r1, r2); } + bool collection_set_is(size_t r1, size_t r2, size_t r3) { return _collection_set_is(3, r1, r2, r3); } + + bool _collection_set_is(size_t count, ...) { + va_list args; + va_start(args, count); + EXPECT_EQ(count, _collection_set->count()); + bool result = true; + for (size_t i = 0; i < count; ++i) { + size_t index = va_arg(args, size_t); + if (!_collection_set->is_in(index)) { + result = false; + break; + } + } + va_end(args); + return result; + } +}; + +TEST_VM_F(ShenandoahOldHeuristicTest, select_no_old_regions) { + SKIP_IF_NOT_SHENANDOAH(); + + _heuristics->prepare_for_old_collections(); + EXPECT_EQ(0U, _heuristics->coalesce_and_fill_candidates_count()); + EXPECT_EQ(0U, _heuristics->last_old_collection_candidate_index()); + EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, select_no_old_region_above_threshold) { + SKIP_IF_NOT_SHENANDOAH(); + + // In this case, we have zero regions to add to the collection set, + // but we will have one region that must still be made parseable. + make_garbage_below_collection_threshold(10); + _heuristics->prepare_for_old_collections(); + EXPECT_EQ(1U, _heuristics->coalesce_and_fill_candidates_count()); + EXPECT_EQ(0U, _heuristics->last_old_collection_candidate_index()); + EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, select_one_old_region_above_threshold) { + SKIP_IF_NOT_SHENANDOAH(); + + make_garbage_above_collection_threshold(10); + _heuristics->prepare_for_old_collections(); + EXPECT_EQ(1U, _heuristics->coalesce_and_fill_candidates_count()); + EXPECT_EQ(1U, _heuristics->last_old_collection_candidate_index()); + EXPECT_EQ(1U, _heuristics->unprocessed_old_collection_candidates()); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) { + SKIP_IF_NOT_SHENANDOAH(); + + size_t garbage = make_garbage_above_collection_threshold(10); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(10UL)); + EXPECT_EQ(garbage, _collection_set->get_old_garbage()); + EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) { + SKIP_IF_NOT_SHENANDOAH(); + + size_t g1 = make_garbage_above_collection_threshold(100); + size_t g2 = make_garbage_above_collection_threshold(101); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(100UL, 101UL)); + EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage()); + EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) { + SKIP_IF_NOT_SHENANDOAH(); + + size_t garbage = create_too_much_garbage_for_one_mixed_evacuation(); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_LT(_collection_set->get_old_garbage(), garbage); + EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { + SKIP_IF_NOT_SHENANDOAH(); + + // Create three old regions with enough garbage to be collected. + size_t g1 = make_garbage_above_collection_threshold(0); + size_t g2 = make_garbage_above_collection_threshold(1); + size_t g3 = make_garbage_above_collection_threshold(2); + + // A region can be pinned when we chose collection set candidates. + make_pinned(1); + _heuristics->prepare_for_old_collections(); + + // We only exclude pinned regions when we actually add regions to the collection set. + ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates()); + + // Here the region is still pinned, so it cannot be added to the collection set. + _heuristics->prime_collection_set(_collection_set); + + // The two unpinned regions should be added to the collection set and the pinned + // region should be retained at the front of the list of candidates as it would be + // likely to become unpinned by the next mixed collection cycle. + EXPECT_TRUE(collection_set_is(0UL, 2UL)); + EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); + + // Simulate another mixed collection after making region 1 unpinned. This time, + // the now unpinned region should be added to the collection set. + make_unpinned(1); + _collection_set->clear(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_EQ(_collection_set->get_old_garbage(), g2); + EXPECT_TRUE(collection_set_is(1UL)); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) { + SKIP_IF_NOT_SHENANDOAH(); + + // Create three old regions with enough garbage to be collected. + size_t g1 = make_garbage_above_collection_threshold(0); + size_t g2 = make_garbage_above_collection_threshold(1); + size_t g3 = make_garbage_above_collection_threshold(2); + + make_pinned(0); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(1UL, 2UL)); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); + + make_unpinned(0); + _collection_set->clear(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(0UL)); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) { + SKIP_IF_NOT_SHENANDOAH(); + + // Create three old regions with enough garbage to be collected. + size_t g1 = make_garbage_above_collection_threshold(0); + size_t g2 = make_garbage_above_collection_threshold(1); + size_t g3 = make_garbage_above_collection_threshold(2); + + make_pinned(2); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(0UL, 1UL)); + EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); + + make_unpinned(2); + _collection_set->clear(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(2UL)); + EXPECT_EQ(_collection_set->get_old_garbage(), g3); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { + SKIP_IF_NOT_SHENANDOAH(); + + // Create three old regions with enough garbage to be collected. + size_t g1 = make_garbage_above_collection_threshold(0); + size_t g2 = make_garbage_above_collection_threshold(1); + size_t g3 = make_garbage_above_collection_threshold(2); + + make_pinned(0); + make_pinned(2); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(1UL)); + EXPECT_EQ(_collection_set->get_old_garbage(), g2); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 2UL); + + make_unpinned(0); + make_unpinned(2); + _collection_set->clear(); + _heuristics->prime_collection_set(_collection_set); + + EXPECT_TRUE(collection_set_is(0UL, 2UL)); + EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3); + EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); +} + +TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) { + SKIP_IF_NOT_SHENANDOAH(); + + size_t g1 = make_garbage_above_collection_threshold(0); + size_t g2 = make_garbage_above_collection_threshold(1); + size_t g3 = make_garbage_above_collection_threshold(2); + + make_pinned(0); + make_pinned(1); + make_pinned(2); + _heuristics->prepare_for_old_collections(); + _heuristics->prime_collection_set(_collection_set); + + // In the case when all candidates are pinned, we want to abandon + // this set of mixed collection candidates so that another old collection + // can run. This is meant to defend against "bad" JNI code that permanently + // leaves an old region in the pinned state. + EXPECT_EQ(_collection_set->count(), 0UL); + EXPECT_EQ(old_generation_state(), ShenandoahOldGeneration::FILLING); +} +#undef SKIP_IF_NOT_SHENANDOAH diff --git a/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java b/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java index 35b0cdfa49e..250b2c847d5 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +98,15 @@ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestAllocIntArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestAllocIntArrays */ /* diff --git a/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java index 8eebba4a308..64c78b03a65 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,6 +100,35 @@ * TestAllocObjectArrays */ +/* + * @test id=generational + * @summary Acceptance tests: collector can withstand allocation + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahOOMDuringEvacALot + * -XX:+ShenandoahVerify + * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahAllocFailureALot + * -XX:+ShenandoahVerify + * TestAllocObjectArrays + */ + /* * @test id=static * @summary Acceptance tests: collector can withstand allocation @@ -134,6 +164,11 @@ * -XX:+UseShenandoahGC * -XX:-UseTLAB -XX:+ShenandoahVerify * TestAllocObjectArrays + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestAllocObjectArrays */ /* diff --git a/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java index 32178555c9f..23e43117ef9 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +94,21 @@ * TestAllocObjects */ +/* + * @test id=generational + * @summary Acceptance tests: collector can withstand allocation + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestAllocObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestAllocObjects + */ + /* * @test id=static * @summary Acceptance tests: collector can withstand allocation diff --git a/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java index ecfe5f5836e..c370d03a55e 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,11 +24,18 @@ */ /* - * @test + * @test id=default * @requires vm.gc.Shenandoah * * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyCheckCast */ + +/* + * @test id=generational + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyCheckCast -XX:ShenandoahGCMode=generational + */ public class TestArrayCopyCheckCast { static class Foo {} diff --git a/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java index 6a00b0f51de..fd6e14145e4 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,13 +27,22 @@ import jdk.test.lib.Utils; /* - * @test + * @test id=default * @key randomness * @requires vm.gc.Shenandoah * @library /test/lib * * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyStress */ + +/* + * @test id=generational + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyStress + */ public class TestArrayCopyStress { private static final int ARRAY_SIZE = 1000; diff --git a/test/hotspot/jtreg/gc/shenandoah/TestDynamicSoftMaxHeapSize.java b/test/hotspot/jtreg/gc/shenandoah/TestDynamicSoftMaxHeapSize.java index f34fcd8024d..7a53796c5f0 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestDynamicSoftMaxHeapSize.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestDynamicSoftMaxHeapSize.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +63,17 @@ * TestDynamicSoftMaxHeapSize */ +/* + * @test id=generational + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -Dtarget=10000 + * TestDynamicSoftMaxHeapSize + */ + /* * @test id=static * @requires vm.gc.Shenandoah diff --git a/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java b/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java index 67690d8cad5..ecb73520ca8 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,13 +24,23 @@ */ /* - * @test + * @test id=default * @summary Tests for crash/assert when attaching init thread during shutdown * @requires vm.gc.Shenandoah * @library /test/lib * @modules java.base/jdk.internal.misc * java.management - * @run driver/timeout=480 TestEvilSyncBug + * @run driver/timeout=480 TestEvilSyncBug -XX:ShenandoahGCHeuristics=aggressive + */ + +/* + * @test id=generational + * @summary Tests for crash/assert when attaching init thread during shutdown + * @requires vm.gc.Shenandoah + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver/timeout=480 TestEvilSyncBug -XX:ShenandoahGCMode=generational */ import java.util.*; @@ -46,9 +57,10 @@ public class TestEvilSyncBug { static Thread[] hooks = new MyHook[10000]; public static void main(String[] args) throws Exception { - if (args.length > 0) { + if ("test".equals(args[0])) { test(); } else { + String options = args[0]; // Use 1/4 of available processors to avoid over-saturation. int numJobs = Math.max(1, Runtime.getRuntime().availableProcessors() / 4); ExecutorService pool = Executors.newFixedThreadPool(numJobs); @@ -61,7 +73,7 @@ public static void main(String[] args) throws Exception { "-XX:+UnlockExperimentalVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:+UseShenandoahGC", - "-XX:ShenandoahGCHeuristics=aggressive", + options, "TestEvilSyncBug", "test"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldHaveExitValue(0); diff --git a/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java b/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java index ff1596d6833..eb0d1ec2b46 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,6 +77,24 @@ * TestGCThreadGroups */ +/** + * @test id=generational + * @summary Test Shenandoah GC uses concurrent/parallel threads correctly + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 + * -Dtarget=1000 -XX:ShenandoahGCMode=generational + * TestGCThreadGroups + * + * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC + * -XX:-UseDynamicNumberOfGCThreads + * -Dtarget=1000 -XX:ShenandoahGCMode=generational + * TestGCThreadGroups + */ + /** * @test id=iu * @summary Test Shenandoah GC uses concurrent/parallel threads correctly diff --git a/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java b/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java index 6c54a34dae5..f4639b2d038 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,6 +85,28 @@ * TestHeapUncommit */ +/* + * @test id=generational + * @summary Acceptance tests: collector can withstand allocation + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestHeapUncommit + * + * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestHeapUncommit + */ + /* * @test id=iu * @summary Acceptance tests: collector can withstand allocation diff --git a/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java b/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java index 845a6617ebd..068b69f2855 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,6 +109,85 @@ * TestHumongousThreshold */ +/* + * @test id=generational + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + */ + +/* + * @test id=generational-16b + * @key randomness + * @requires vm.gc.Shenandoah + * @requires vm.bits == "64" + * @library /test/lib + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 + * TestHumongousThreshold + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g + * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 + * TestHumongousThreshold + */ + import java.util.Random; import jdk.test.lib.Utils; diff --git a/test/hotspot/jtreg/gc/shenandoah/TestJcmdHeapDump.java b/test/hotspot/jtreg/gc/shenandoah/TestJcmdHeapDump.java index cc5bc5d425e..6f6aa63d0a0 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestJcmdHeapDump.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestJcmdHeapDump.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,6 +73,18 @@ * TestJcmdHeapDump */ +/* + * @test id=generational + * @library /test/lib + * @modules jdk.attach/com.sun.tools.attach + * @requires vm.gc.Shenandoah + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -Dtarget=10000 + * TestJcmdHeapDump + */ + /* * @test id=static * @library /test/lib diff --git a/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java b/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java index 893014804e2..7780d57b45e 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /* - * @test + * @test default * @summary Shenandoah crashes with -XX:ObjectAlignmentInBytes=16 * @key randomness * @requires vm.gc.Shenandoah @@ -36,6 +37,20 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=4 TestLargeObjectAlignment */ +/* + * @test generational + * @summary Shenandoah crashes with -XX:ObjectAlignmentInBytes=16 + * @key randomness + * @requires vm.gc.Shenandoah + * @requires vm.bits == "64" + * @library /test/lib + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ObjectAlignmentInBytes=16 -Xint TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ObjectAlignmentInBytes=16 -XX:-TieredCompilation TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=1 TestLargeObjectAlignment + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=4 TestLargeObjectAlignment + */ + import java.util.ArrayList; import java.util.List; import java.util.Random; diff --git a/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java b/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java index 63d9fa08767..db6e520f007 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,6 +72,16 @@ * TestLotsOfCycles */ +/* + * @test id=generational + * @requires vm.gc.Shenandoah + * + * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -Dtarget=10000 + * TestLotsOfCycles + */ + /* * @test id=static * @requires vm.gc.Shenandoah diff --git a/test/hotspot/jtreg/gc/shenandoah/TestObjItrWithHeapDump.java b/test/hotspot/jtreg/gc/shenandoah/TestObjItrWithHeapDump.java index c0161b7a238..151d3308826 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestObjItrWithHeapDump.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestObjItrWithHeapDump.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,9 +57,10 @@ public static void main(String[] args) throws Exception { } String[][][] modeHeuristics = new String[][][] { - {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, - {{"iu"}, {"adaptive", "aggressive"}}, - {{"passive"}, {"passive"}} + {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, + {{"generational"}, {"adaptive"}}, + {{"iu"}, {"adaptive", "aggressive"}}, + {{"passive"}, {"passive"}} }; for (String[][] mh : modeHeuristics) { diff --git a/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java b/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java index b65aa0cd0b4..18552403db3 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,14 +47,39 @@ public static void testWith(String msg, boolean periodic, String... args) throws OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldHaveExitValue(0); - if (periodic && !output.getOutput().contains("Trigger: Time since last GC")) { + if (periodic && !output.getOutput().contains("Trigger (GLOBAL): Time since last GC")) { throw new AssertionError(msg + ": Should have periodic GC in logs"); } - if (!periodic && output.getOutput().contains("Trigger: Time since last GC")) { + if (!periodic && output.getOutput().contains("Trigger (GLOBAL): Time since last GC")) { throw new AssertionError(msg + ": Should not have periodic GC in logs"); } } + public static void testGenerational(boolean periodic, String... args) throws Exception { + String[] cmds = Arrays.copyOf(args, args.length + 2); + cmds[args.length] = TestPeriodicGC.class.getName(); + cmds[args.length + 1] = "test"; + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(cmds); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + if (periodic) { + if (!output.getOutput().contains("Trigger (YOUNG): Time since last GC")) { + throw new AssertionError("Generational mode: Should have periodic young GC in logs"); + } + if (!output.getOutput().contains("Trigger (OLD): Time since last GC")) { + throw new AssertionError("Generational mode: Should have periodic old GC in logs"); + } + } else { + if (output.getOutput().contains("Trigger (YOUNG): Time since last GC")) { + throw new AssertionError("Generational mode: Should not have periodic young GC in logs"); + } + if (output.getOutput().contains("Trigger (OLD): Time since last GC")) { + throw new AssertionError("Generational mode: Should not have periodic old GC in logs"); + } + } + } + public static void main(String[] args) throws Exception { if (args.length > 0 && args[0].equals("test")) { Thread.sleep(5000); // stay idle @@ -157,6 +183,26 @@ public static void main(String[] args) throws Exception { "-XX:ShenandoahGCMode=passive", "-XX:ShenandoahGuaranteedGCInterval=1000" ); + + testGenerational(true, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCMode=generational", + "-XX:ShenandoahGuaranteedYoungGCInterval=1000", + "-XX:ShenandoahGuaranteedOldGCInterval=1500" + ); + + testGenerational(false, + "-Xlog:gc", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + "-XX:ShenandoahGCMode=generational", + "-XX:ShenandoahGuaranteedYoungGCInterval=0", + "-XX:ShenandoahGuaranteedOldGCInterval=0" + ); } } diff --git a/test/hotspot/jtreg/gc/shenandoah/TestReferenceRefersToShenandoah.java b/test/hotspot/jtreg/gc/shenandoah/TestReferenceRefersToShenandoah.java index 8bad2fdbfa4..74c8dec8cc6 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestReferenceRefersToShenandoah.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestReferenceRefersToShenandoah.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,20 @@ * gc.shenandoah.TestReferenceRefersToShenandoah */ -/* @test id=iu +/* @test id=satb-100 + * @requires vm.gc.Shenandoah + * @library /test/lib + * @build jdk.test.whitebox.WhiteBox + * @modules java.base + * @run main jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm + * -Xbootclasspath/a:. + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=satb -XX:ShenandoahGarbageThreshold=100 -Xmx100m + * gc.shenandoah.TestReferenceRefersToShenandoah + */ + +/* @test id=generational * @requires vm.gc.Shenandoah * @library /test/lib * @build jdk.test.whitebox.WhiteBox @@ -43,11 +56,11 @@ * @run main/othervm * -Xbootclasspath/a:. * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational * gc.shenandoah.TestReferenceRefersToShenandoah */ -/* @test id=satb-100 +/* @test id=generational-100 * @requires vm.gc.Shenandoah * @library /test/lib * @build jdk.test.whitebox.WhiteBox @@ -56,7 +69,19 @@ * @run main/othervm * -Xbootclasspath/a:. * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=satb -XX:ShenandoahGarbageThreshold=100 -Xmx100m + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ShenandoahGarbageThreshold=100 -Xmx100m + * gc.shenandoah.TestReferenceRefersToShenandoah + */ + +/* @test id=iu + * @requires vm.gc.Shenandoah + * @library /test/lib + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm + * -Xbootclasspath/a:. + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu * gc.shenandoah.TestReferenceRefersToShenandoah */ diff --git a/test/hotspot/jtreg/gc/shenandoah/TestReferenceShortcutCycle.java b/test/hotspot/jtreg/gc/shenandoah/TestReferenceShortcutCycle.java index 1c2c2ed5ca7..bfd82376cd1 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestReferenceShortcutCycle.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestReferenceShortcutCycle.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,19 @@ * gc.shenandoah.TestReferenceShortcutCycle */ +/* @test id=generational-100 + * @requires vm.gc.Shenandoah + * @library /test/lib + * @build jdk.test.whitebox.WhiteBox + * @modules java.base + * @run main jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm + * -Xbootclasspath/a:. + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:ShenandoahGarbageThreshold=100 -Xmx100m + * gc.shenandoah.TestReferenceShortcutCycle + */ + /* @test id=iu-100 * @requires vm.gc.Shenandoah * @library /test/lib diff --git a/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java b/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java index 03f008d10c3..42e87adbbce 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +42,21 @@ * TestRefprocSanity */ +/* + * @test id=generational + * @summary Test that null references/referents work fine + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx128m -Xms128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestRefprocSanity + * + * @run main/othervm -Xmx128m -Xms128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestRefprocSanity + */ + /* * @test id=iu * @summary Test that null references/referents work fine diff --git a/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java b/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java index dd98585181c..20be9b7d969 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +47,15 @@ * TestRegionSampling */ +/* + * @test id=generational + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestRegionSampling + */ + /* * @test id=static * @requires vm.gc.Shenandoah diff --git a/test/hotspot/jtreg/gc/shenandoah/TestRegionSamplingLogging.java b/test/hotspot/jtreg/gc/shenandoah/TestRegionSamplingLogging.java new file mode 100644 index 00000000000..0017328b517 --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/TestRegionSamplingLogging.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test id=default-rotation + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+ShenandoahRegionSampling -XX:+ShenandoahRegionSampling + * -Xlog:gc+region=trace:region-snapshots-%p.log::filesize=100,filecount=3 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive + * TestRegionSamplingLogging + */ + +/* + * @test id=generational-rotation + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+ShenandoahRegionSampling -XX:+ShenandoahRegionSampling + * -Xlog:gc+region=trace:region-snapshots-%p.log::filesize=100,filecount=3 + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestRegionSamplingLogging + */ +import java.io.File; +import java.util.Arrays; + +public class TestRegionSamplingLogging { + + static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation + + static volatile Object sink; + + public static void main(String[] args) throws Exception { + long count = TARGET_MB * 1024 * 1024 / 16; + for (long c = 0; c < count; c++) { + sink = new Object(); + } + + File directory = new File("."); + File[] files = directory.listFiles((dir, name) -> name.startsWith("region-snapshots") && name.endsWith(".log")); + System.out.println(Arrays.toString(files)); + if (files == null || files.length == 0) { + throw new IllegalStateException("Did not find expected snapshot log file."); + } + } +} diff --git a/test/hotspot/jtreg/gc/shenandoah/TestResizeTLAB.java b/test/hotspot/jtreg/gc/shenandoah/TestResizeTLAB.java index 095f9939569..076adb93f89 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestResizeTLAB.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestResizeTLAB.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +99,26 @@ * TestResizeTLAB */ +/* + * @test id=generational + * @key randomness + * @summary Test that Shenandoah is able to work with(out) resizeable TLABs + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:+ResizeTLAB + * TestResizeTLAB + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:-ResizeTLAB + * TestResizeTLAB + */ + /* * @test id=static * @key randomness diff --git a/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java b/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java index 7a28548f610..3adeff94418 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,6 +84,31 @@ * TestRetainObjects */ +/* + * @test id=generational + * @summary Acceptance tests: collector can deal with retained objects + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestRetainObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestRetainObjects + */ + /* * @test id=static * @summary Acceptance tests: collector can deal with retained objects @@ -108,7 +134,7 @@ * @summary Acceptance tests: collector can deal with retained objects * @requires vm.gc.Shenandoah * - * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * @run main/othervm/timeout=300 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions * -XX:+UseShenandoahGC * -XX:-UseTLAB -XX:+ShenandoahVerify * TestRetainObjects diff --git a/test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java b/test/hotspot/jtreg/gc/shenandoah/TestShenandoahRegionLogging.java similarity index 53% rename from test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java rename to test/hotspot/jtreg/gc/shenandoah/TestShenandoahRegionLogging.java index 474831a4771..81e66c9d0cb 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestShenandoahRegionLogging.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,28 +23,27 @@ */ /* - * @test - * @summary Test that reference processing works with both parallel and non-parallel variants. + * @test id=rotation * @requires vm.gc.Shenandoah * - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestParallelRefprocSanity - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-ParallelRefProcEnabled TestParallelRefprocSanity - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:+ParallelRefProcEnabled TestParallelRefprocSanity + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+ShenandoahRegionSampling + * -Xlog:gc+region=trace:region-snapshots-%p.log::filesize=100,filecount=3 + * -XX:+UseShenandoahGC + * TestShenandoahRegionLogging */ +import java.io.File; -import java.lang.ref.*; - -public class TestParallelRefprocSanity { - - static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +public class TestShenandoahRegionLogging { + public static void main(String[] args) throws Exception { + System.gc(); - static volatile Object sink; + File directory = new File("."); + File[] files = directory.listFiles((dir, name) -> name.startsWith("region-snapshots")); - public static void main(String[] args) throws Exception { - long count = TARGET_MB * 1024 * 1024 / 32; - for (long c = 0; c < count; c++) { - sink = new WeakReference(new Object()); + // Expect one or more log files when region logging is enabled + if (files.length == 0) { + throw new Error("Expected at least one log file for region sampling data."); } } - } diff --git a/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java b/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java index 1b16ba4b8d2..c627368ca56 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,6 +91,28 @@ * */ +/* + * @test id=generational + * @summary Acceptance tests: collector can deal with retained objects + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify + * TestSieveObjects + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestSieveObjects + */ + /* * @test id=static * @summary Acceptance tests: collector can deal with retained objects @@ -121,7 +144,7 @@ * @requires vm.gc.Shenandoah * @library /test/lib * - * @run main/othervm/timeout=240 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * @run main/othervm/timeout=300 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions * -XX:+UseShenandoahGC * -XX:-UseTLAB -XX:+ShenandoahVerify * TestSieveObjects diff --git a/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java b/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java index 56f416790f5..b7540059751 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /* - * @test + * @test default * @requires vm.gc.Shenandoah * * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSmallHeap @@ -34,6 +35,17 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx4m TestSmallHeap */ +/* + * @test generational + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx64m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx32m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx16m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx8m TestSmallHeap + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx4m TestSmallHeap + */ public class TestSmallHeap { public static void main(String[] args) throws Exception { diff --git a/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java b/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java index 94691432f38..f1991c0f50c 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,6 +66,20 @@ * TestStringDedup */ +/* + * @test id=generational + * @summary Test Shenandoah string deduplication implementation + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx256m -Xlog:gc+stats -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:StringDeduplicationAgeThreshold=3 + * TestStringDedup + */ + /* * @test id=iu * @summary Test Shenandoah string deduplication implementation diff --git a/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java b/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java index 90b383d0587..7eb59bdc66f 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +43,22 @@ * TestStringDedupStress */ +/* + * @test id=generational + * @summary Test Shenandoah string deduplication implementation + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * @modules java.base/java.lang:open + * java.management + * + * @run main/othervm -Xmx1g -Xlog:gc+stats -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahDegeneratedGC + * -DtargetStrings=3000000 + * TestStringDedupStress + */ + /* * @test id=default * @summary Test Shenandoah string deduplication implementation @@ -113,7 +130,7 @@ public class TestStringDedupStress { private static final int TARGET_STRINGS = Integer.getInteger("targetStrings", 2_500_000); private static final long MAX_REWRITE_GC_CYCLES = 6; - private static final long MAX_REWRITE_TIME = 30*1000; // ms + private static final long MAX_REWRITE_TIME_NS = 30L * 1_000_000_000L; // 30s in ns private static final int UNIQUE_STRINGS = 20; @@ -211,7 +228,7 @@ public static void main(String[] args) { } long cycleBeforeRewrite = gcCycleMBean.getCollectionCount(); - long timeBeforeRewrite = System.currentTimeMillis(); + long timeBeforeRewriteNanos = System.nanoTime(); long loop = 1; while (true) { @@ -229,7 +246,7 @@ public static void main(String[] args) { } // enough time is spent waiting for GC to happen - if (System.currentTimeMillis() - timeBeforeRewrite >= MAX_REWRITE_TIME) { + if (System.nanoTime() - timeBeforeRewriteNanos >= MAX_REWRITE_TIME_NS) { break; } } diff --git a/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java b/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java index cf663f2329c..4cc410efdb3 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,6 +76,22 @@ * TestStringInternCleanup */ +/* + * @test id=generational + * @summary Check that Shenandoah cleans up interned strings + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestStringInternCleanup + * + * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark + * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive -XX:ShenandoahGCMode=generational + * TestStringInternCleanup + */ + + /* * @test id=iu * @summary Check that Shenandoah cleans up interned strings diff --git a/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java b/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java index 312ab964f5e..bfc65b11fc7 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java @@ -44,6 +44,7 @@ * @test id=default * @summary Tests that we pass at least one jcstress-like test with all verification turned on * @requires vm.gc.Shenandoah + * @requires !vm.debug * @modules java.base/jdk.internal.misc * java.management * @@ -58,10 +59,25 @@ * TestVerifyJCStress */ +/* + * @test id=iu-debug + * @summary Tests that we pass at least one jcstress-like test with all verification turned on + * @requires vm.gc.Shenandoah + * @requires vm.debug + * @modules java.base/jdk.internal.misc + * java.management + * + * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu + * -XX:+ShenandoahVerify -XX:+ShenandoahVerifyOptoBarriers + * TestVerifyJCStress + */ + /* * @test id=iu * @summary Tests that we pass at least one jcstress-like test with all verification turned on * @requires vm.gc.Shenandoah + * @requires !vm.debug * @modules java.base/jdk.internal.misc * java.management * diff --git a/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java b/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java index 880a965010f..750914a7d36 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /* - * @test + * @test default * @requires vm.gc.Shenandoah * * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=0 TestVerifyLevels @@ -33,6 +34,16 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=4 TestVerifyLevels */ +/* + * @test generational + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=0 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=1 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=2 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=3 TestVerifyLevels + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=4 TestVerifyLevels + */ public class TestVerifyLevels { static final long TARGET_MB = Long.getLong("target", 1_000); // 1 Gb allocation diff --git a/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java b/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java index b9214aeb53c..9f0fa13d182 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /* - * @test + * @test id=default * @summary Test Shenandoah with different log levels * @requires vm.gc.Shenandoah * @@ -34,6 +35,17 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=trace TestWithLogLevel */ +/* + * @test id=generational + * @summary Test Shenandoah with different log levels + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms256M -Xmx1G -Xlog:gc*=error TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms256M -Xmx1G -Xlog:gc*=warning TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms256M -Xmx1G -Xlog:gc*=info TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms256M -Xmx1G -Xlog:gc*=debug TestWithLogLevel + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms256M -Xmx1G -Xlog:gc*=trace TestWithLogLevel + */ import java.util.*; public class TestWithLogLevel { diff --git a/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java b/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java index ee645d994ab..e688cbbdcd8 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +27,9 @@ * @test * @requires vm.gc.Shenandoah * - * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestWrongArrayMember - * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu TestWrongArrayMember + * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestWrongArrayMember + * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu TestWrongArrayMember + * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational TestWrongArrayMember */ public class TestWrongArrayMember { @@ -54,4 +56,3 @@ public static void main(String... args) throws Exception { } } } - diff --git a/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java b/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java index cb8582ee420..2b1340890e1 100644 --- a/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java +++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -205,7 +206,131 @@ * TestClone */ +/* + * @test id=generational + * @summary Test clone barriers work correctly + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:TieredStopAtLevel=4 + * TestClone + */ + +/* + * @test id=generational-verify + * @summary Test clone barriers work correctly + * @requires vm.gc.Shenandoah + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=4 + * TestClone + */ + + /* + * @test id=generational-no-coops + * @summary Test clone barriers work correctly + * @requires vm.gc.Shenandoah + * @requires vm.bits == "64" + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:TieredStopAtLevel=4 + * TestClone + */ + /* + * @test id=generational-no-coops-verify + * @summary Test clone barriers work correctly + * @requires vm.gc.Shenandoah + * @requires vm.bits == "64" + * + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -Xint + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:-TieredCompilation + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=1 + * TestClone + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g + * -XX:-UseCompressedOops + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * -XX:TieredStopAtLevel=4 + * TestClone + */ public class TestClone { public static void main(String[] args) throws Exception { diff --git a/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java b/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java index ecfe46377b4..277da9eeb63 100644 --- a/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java +++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +54,32 @@ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=4 TestReferenceCAS */ +/* + * @test id=generational + * @summary Shenandoah reference CAS test + * @requires vm.gc.Shenandoah + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational TestReferenceCAS + * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xint TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-TieredCompilation TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:TieredStopAtLevel=1 TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:TieredStopAtLevel=4 TestReferenceCAS + */ + +/* + * @test id=generational-no-coops + * @summary Shenandoah reference CAS test + * @requires vm.gc.Shenandoah + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc:+open + * + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-UseCompressedOops TestReferenceCAS + * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-UseCompressedOops -Xint TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-UseCompressedOops -XX:-TieredCompilation TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-UseCompressedOops -XX:TieredStopAtLevel=1 TestReferenceCAS + * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:-UseCompressedOops -XX:TieredStopAtLevel=4 TestReferenceCAS + */ import java.lang.reflect.Field; public class TestReferenceCAS { diff --git a/test/hotspot/jtreg/gc/shenandoah/generational/TestCLIModeGenerational.java b/test/hotspot/jtreg/gc/shenandoah/generational/TestCLIModeGenerational.java new file mode 100644 index 00000000000..183787ced46 --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/generational/TestCLIModeGenerational.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package gc.shenandoah.generational; + +import jdk.test.whitebox.WhiteBox; + +/* + * @test TestCLIModeGenerational + * @requires vm.gc.Shenandoah + * @summary Test argument processing for -XX:+ShenandoahGCMode=generational. + * @library /testlibrary /test/lib / + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. + * -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * gc.shenandoah.generational.TestCLIModeGenerational + */ + +public class TestCLIModeGenerational { + + private static WhiteBox wb = WhiteBox.getWhiteBox(); + + public static void main(String args[]) throws Exception { + Boolean using_shenandoah = wb.getBooleanVMFlag("UseShenandoahGC"); + String gc_mode = wb.getStringVMFlag("ShenandoahGCMode"); + if (!using_shenandoah || !gc_mode.equals("generational")) + throw new IllegalStateException("Command-line options not honored!"); + } +} + diff --git a/test/hotspot/jtreg/gc/shenandoah/generational/TestConcurrentEvac.java b/test/hotspot/jtreg/gc/shenandoah/generational/TestConcurrentEvac.java new file mode 100644 index 00000000000..a8879ea31a1 --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/generational/TestConcurrentEvac.java @@ -0,0 +1,201 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package gc.shenandoah.generational; + +import jdk.test.whitebox.WhiteBox; +import java.util.Random; +import java.util.HashMap; + +/* + * To avoid the risk of false regressions identified by this test, the heap + * size is set artificially high. Though this test is known to run reliably + * in 66 MB heap, the heap size for this test run is currently set to 256 MB. + */ + +/* + * @test TestConcurrentEvac + * @requires vm.gc.Shenandoah + * @summary Confirm that card marking and remembered set scanning do not crash. + * @library /testlibrary /test/lib / + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. + * -Xms256m -Xmx256m + * -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:NewRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:ShenandoahGuaranteedGCInterval=3000 + * -XX:-UseDynamicNumberOfGCThreads -XX:-ShenandoahPacing + * gc.shenandoah.generational.TestConcurrentEvac + */ + +public class TestConcurrentEvac { + private static WhiteBox wb = WhiteBox.getWhiteBox(); + + static private final int SeedForRandom = 46; + // Sequence of random numbers should end with same value + + // Smaller table will cause creation of more old-gen garbage + // as previous entries in table are overwritten with new values. + static private final int TableSize = 53; + static private final int MaxStringLength = 47; + static private final int SentenceLength = 5; + + static private Random random = new Random(SeedForRandom); + + public static class Node { + static private final int NeighborCount = 48; + static private final int ChildOverwriteCount = 32; + static private final int IntArraySize = 128; + + private String name; + + // Each Node instance holds an array containing all substrings of + // its name + + // This array has entries from 0 .. (name.length() - 1). + // num_substrings[i] represents the number of substrings that + // correspond to a name of length i+1. + private static int [] num_substrings; + + static { + // Initialize num_substrings. + // For a name of length N, there are + // N substrings of length 1 + // N-1 substrings of length 2 + // N-2 substrings of length 3 + // ... + // 1 substring of length N + // Note that: + // num_substrings[0] = 1 + // num_substrings[1] = 3 + // num_substrings[i] = (i+1)+num_substrings[i-1] + + num_substrings = new int[MaxStringLength]; + num_substrings[0] = 1; + for (int i = 1; i < MaxStringLength; i++) + num_substrings[i] = (i+1)+num_substrings[i-1]; + } + + private String [] substrings; + private Node [] neighbors; + + public Node(String name) { + this.name = name; + this.substrings = new String[num_substrings[name.length() - 1]]; + + int index = 0; + for (int substring_length = 1; + substring_length <= name.length(); substring_length++) { + for (int offset = 0; + offset + substring_length <= name.length(); offset++) { + this.substrings[index++] = name.substring(offset, + offset + substring_length); + } + } + } + + public String value() { + return name; + } + + public String arbitrary_substring() { + int index = TestConcurrentEvac.randomUnsignedInt() % substrings.length; + return substrings[index]; + } + } + + + // Return random int between 1 and MaxStringLength inclusive + static int randomStringLength() { + int length = randomUnsignedInt(); + length %= (MaxStringLength - 1); + length += 1; + return length; + } + + static String randomCharacter() { + int index = randomUnsignedInt() % 52; + return ("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ". + substring(index, index+1)); + } + + static String randomString() { + int length = randomStringLength(); + String result = new String(); // make the compiler work for this garbage... + for (int i = 0; i < length; i++) + result += randomCharacter(); + return result; + } + + static int randomUnsignedInt() { + int result = random.nextInt(); + if (result < 0) result = -result; + if (result < 0) result = 0; + return result; + } + + static int randomIndex() { + int index = randomUnsignedInt(); + index %= TableSize; + return index; + } + + public static void main(String args[]) throws Exception { + HashMap table = new HashMap(TableSize); + + if (!wb.getBooleanVMFlag("UseShenandoahGC") || + !wb.getStringVMFlag("ShenandoahGCMode").equals("generational")) + throw new IllegalStateException("Command-line options not honored!"); + + for (int count = java.lang.Integer.MAX_VALUE/1024; count >= 0; count--) { + int index = randomIndex(); + String name = randomString(); + table.put(index, new Node(name)); + } + + String conclusion = ""; + + for (int i = 0; i < SentenceLength; i++) { + Node a_node = table.get(randomIndex()); + if (a_node == null) + i--; + else { + String a_string = a_node.arbitrary_substring(); + conclusion += a_string; + conclusion += " "; + } + } + conclusion = conclusion.substring(0, conclusion.length() - 1); + + System.out.println("Conclusion is [" + conclusion + "]"); + + if (!conclusion.equals("cTy cTykJ kAkKAOWYEHbxFCmRIlyk xjYMdNmtAQXNGdIc sqHKsWnJIP")) + throw new IllegalStateException("Random sequence of words did not end well!"); + + } +} + diff --git a/test/hotspot/jtreg/gc/shenandoah/generational/TestSimpleGenerational.java b/test/hotspot/jtreg/gc/shenandoah/generational/TestSimpleGenerational.java new file mode 100644 index 00000000000..a305d00102f --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/generational/TestSimpleGenerational.java @@ -0,0 +1,125 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package gc.shenandoah.generational; + +import jdk.test.whitebox.WhiteBox; +import java.util.Random; + +/* + * @test TestSimpleGenerational + * @requires vm.gc.Shenandoah + * @summary Confirm that card marking and remembered set scanning do not crash. + * @library /testlibrary /test/lib / + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. + * -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * gc.shenandoah.generational.TestSimpleGenerational + */ +public class TestSimpleGenerational { + private static WhiteBox wb = WhiteBox.getWhiteBox(); + static private final int SeedForRandom = 46; + // Sequence of random numbers should end with same value + private static int ExpectedLastRandom = 272454100; + + + public static class Node { + static private final int NeighborCount = 5; + static private final int IntArraySize = 8; + static private Random random = new Random(SeedForRandom); + + private int val; + private Object field_o; + + // Each Node instance holds references to two "private" arrays. + // One array holds raw seething bits (primitive integers) and the + // holds references. + + private int[] field_ints; + private Node [] neighbors; + + public Node(int val) { + this.val = val; + this.field_o = new Object(); + this.field_ints = new int[IntArraySize]; + this.field_ints[0] = 0xca; + this.field_ints[1] = 0xfe; + this.field_ints[2] = 0xba; + this.field_ints[3] = 0xbe; + this.field_ints[4] = 0xba; + this.field_ints[5] = 0xad; + this.field_ints[6] = 0xba; + this.field_ints[7] = 0xbe; + + this.neighbors = new Node[NeighborCount]; + } + + public int value() { + return val; + } + + // Copy each neighbor of n into a new node's neighbor array. + // Then overwrite arbitrarily selected neighbor with newly allocated + // leaf node. + public static Node upheaval(Node n) { + int first_val = random.nextInt(); + if (first_val < 0) first_val = -first_val; + if (first_val < 0) first_val = 0; + Node result = new Node(first_val); + if (n != null) { + for (int i = 0; i < NeighborCount; i++) + result.neighbors[i] = n.neighbors[i]; + } + int second_val = random.nextInt(); + if (second_val < 0) second_val = -second_val; + if (second_val < 0) second_val = 0; + + int overwrite_index = first_val % NeighborCount; + result.neighbors[overwrite_index] = new Node(second_val); + return result; + } + } + + public static void main(String args[]) throws Exception { + Node n = null; + + if (!wb.getBooleanVMFlag("UseShenandoahGC") || + !wb.getStringVMFlag("ShenandoahGCMode").equals("generational")) + throw new IllegalStateException("Command-line options not honored!"); + + for (int count = 10000; count > 0; count--) { + n = Node.upheaval(n); + } + + if (n.value() != ExpectedLastRandom) + throw new IllegalStateException("Random number sequence ended badly!"); + + } + +} + diff --git a/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java index c5bcc623d45..62d9addeaa4 100644 --- a/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java +++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +23,7 @@ * */ -/* @test +/* @test id=default * @summary test JNI critical arrays support in Shenandoah * @key randomness * @requires vm.gc.Shenandoah @@ -32,6 +33,16 @@ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestJNICritical */ + /* @test id=generational + * @summary test JNI critical arrays support in Shenandoah + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -XX:+ShenandoahVerify TestJNICritical + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational TestJNICritical + */ + import java.util.Arrays; import java.util.Random; import jdk.test.lib.Utils; diff --git a/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java index 64520391f25..6f211edf343 100644 --- a/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java +++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +42,25 @@ * TestJNIGlobalRefs */ +/* @test id=generational-verify + * @summary Test JNI Global Refs with Shenandoah + * @requires vm.gc.Shenandoah + * + * @run main/othervm/native -Xmx1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestJNIGlobalRefs + */ + +/* @test id=generational + * @summary Test JNI Global Refs with Shenandoah + * @requires vm.gc.Shenandoah + * + * @run main/othervm/native -Xmx1g -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestJNIGlobalRefs + */ + import java.util.Arrays; import java.util.Random; @@ -49,7 +69,7 @@ public class TestJNIGlobalRefs { System.loadLibrary("TestJNIGlobalRefs"); } - private static final int TIME_MSEC = 120000; + private static final long TIME_NSEC = 120L * 1_000_000_000L; private static final int ARRAY_SIZE = 10000; private static native void makeGlobalRef(Object o); @@ -60,13 +80,13 @@ public class TestJNIGlobalRefs { public static void main(String[] args) throws Throwable { seedGlobalRef(); seedWeakGlobalRef(); - long start = System.currentTimeMillis(); - long current = start; - while (current - start < TIME_MSEC) { + long startNanos = System.nanoTime(); + long currentNanos = startNanos; + while (currentNanos - startNanos < TIME_NSEC) { testGlobal(); testWeakGlobal(); Thread.sleep(1); - current = System.currentTimeMillis(); + currentNanos = System.nanoTime(); } } diff --git a/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java b/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java index a6b53a0ee2b..507772f9b18 100644 --- a/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java +++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +62,22 @@ * TestPinnedGarbage */ +/* @test id=generational + * @summary Test that garbage in the pinned region does not crash VM + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx128m + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestPinnedGarbage + * + * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx128m + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:+ShenandoahVerify + * TestPinnedGarbage + */ + import java.util.Arrays; import java.util.Random; import jdk.test.lib.Utils; diff --git a/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java b/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java index b570dad875f..dec5efd1855 100644 --- a/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java +++ b/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +64,46 @@ * -XX:+UseStringDeduplication TestHeapDump */ +/** + * @test id=generational + * @summary Tests JVMTI heap dumps + * @requires vm.gc.Shenandoah + * @requires vm.jvmti + * @compile TestHeapDump.java + * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump + * -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -Xmx128m + * -XX:ShenandoahGCMode=generational + * TestHeapDump + * + */ + +/** + * @test id=no-coops-generational + * @summary Tests JVMTI heap dumps + * @requires vm.gc.Shenandoah + * @requires vm.jvmti + * @requires vm.bits == "64" + * @compile TestHeapDump.java + * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump + * -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -Xmx128m + * -XX:ShenandoahGCMode=generational + * -XX:-UseCompressedOops TestHeapDump + */ + +/** + * @test id=generational-strdedup + * @summary Tests JVMTI heap dumps + * @requires vm.gc.Shenandoah + * @requires vm.jvmti + * @compile TestHeapDump.java + * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump + * -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -Xmx128m + * -XX:ShenandoahGCMode=generational + * -XX:+UseStringDeduplication TestHeapDump + */ import java.lang.ref.Reference; public class TestHeapDump { diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java index e83b85c62b8..6944bc5adcd 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,6 +105,18 @@ * TestChurnNotifications */ +/* + * @test id=generational + * @summary Check that MX notifications are reported for all cycles + * @library /test/lib / + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -Dprecise=false -Dmem.pool=Young + * TestChurnNotifications + */ + import java.util.*; import java.util.concurrent.atomic.*; import javax.management.*; @@ -128,8 +141,10 @@ public class TestChurnNotifications { static volatile Object sink; + private static final String POOL_NAME = "Young".equals(System.getProperty("mem.pool")) ? "Shenandoah Young Gen" : "Shenandoah"; + public static void main(String[] args) throws Exception { - final long startTime = System.currentTimeMillis(); + final long startTimeNanos = System.nanoTime(); final AtomicLong churnBytes = new AtomicLong(); @@ -141,8 +156,8 @@ public void handleNotification(Notification n, Object o) { Map mapBefore = info.getGcInfo().getMemoryUsageBeforeGc(); Map mapAfter = info.getGcInfo().getMemoryUsageAfterGc(); - MemoryUsage before = mapBefore.get("Shenandoah"); - MemoryUsage after = mapAfter.get("Shenandoah"); + MemoryUsage before = mapBefore.get(POOL_NAME); + MemoryUsage after = mapAfter.get(POOL_NAME); if ((before != null) && (after != null)) { long diff = before.getUsed() - after.getUsed(); @@ -176,8 +191,8 @@ public void handleNotification(Notification n, Object o) { // Look at test timeout to figure out how long we can wait without breaking into timeout. // Default to 1/4 of the remaining time in 1s steps. final long STEP_MS = 1000; - long spentTime = System.currentTimeMillis() - startTime; - long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - spentTime) / STEP_MS / 4; + long spentTimeNanos = System.nanoTime() - startTimeNanos; + long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - (spentTimeNanos / 1_000_000L)) / STEP_MS / 4; // Wait until enough notifications are accrued to match minimum boundary. long tries = 0; diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java index 56988e2f993..ac350448ba4 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /** - * @test + * @test id=default * @summary Test JMX memory beans * @requires vm.gc.Shenandoah * @modules java.base/jdk.internal.misc @@ -35,6 +36,19 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 128 1024 */ +/** + * @test id=generational + * @summary Test JMX memory beans + * @requires vm.gc.Shenandoah + * @modules java.base/jdk.internal.misc + * java.management + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g TestMemoryMXBeans -1 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms1g -Xmx1g TestMemoryMXBeans 1024 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms128m -Xmx1g TestMemoryMXBeans 128 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms1g -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 1024 1024 + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xms128m -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 128 1024 + */ + import java.lang.management.*; import java.util.*; diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java index 7c7cbe67384..50f710a92c0 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +24,7 @@ */ /** - * @test + * @test id=default * @summary Test JMX memory pools * @requires vm.gc.Shenandoah * @modules java.base/jdk.internal.misc @@ -31,6 +32,15 @@ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestMemoryPools */ +/** + * @test id=generational + * @summary Test JMX memory pools + * @requires vm.gc.Shenandoah + * @modules java.base/jdk.internal.misc + * java.management + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational -Xmx1g -Xms1g TestMemoryPools + */ + import java.lang.management.*; import java.util.*; diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java index 796806569b5..99b1c02e0db 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +99,17 @@ * TestPauseNotifications */ +/* + * @test id=generational + * @summary Check that MX notifications are reported for all cycles + * @library /test/lib / + * @requires vm.gc.Shenandoah + * + * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * TestPauseNotifications + */ + import java.util.*; import java.util.concurrent.atomic.*; import javax.management.*; @@ -122,7 +134,7 @@ private static boolean isExpectedPauseAction(String action) { } public static void main(String[] args) throws Exception { - final long startTime = System.currentTimeMillis(); + final long startTimeNanos = System.nanoTime(); final AtomicLong pausesDuration = new AtomicLong(); final AtomicLong cyclesDuration = new AtomicLong(); @@ -173,8 +185,8 @@ public void handleNotification(Notification n, Object o) { // Look at test timeout to figure out how long we can wait without breaking into timeout. // Default to 1/4 of the remaining time in 1s steps. final long STEP_MS = 1000; - long spentTime = System.currentTimeMillis() - startTime; - long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - spentTime) / STEP_MS / 4; + long spentTimeNanos = System.nanoTime() - startTimeNanos; + long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - (spentTimeNanos / 1_000_000L)) / STEP_MS / 4; long actualPauses = 0; long actualCycles = 0; diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java deleted file mode 100644 index 1057eb4a977..00000000000 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2018, Red Hat, Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/** - * @test - * @summary Test allocation of small object to result OOM, but not to crash JVM - * @requires vm.gc.Shenandoah - * @library /test/lib - * @run driver TestAllocLargeObj - */ - -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.process.ProcessTools; - -public class TestAllocLargeObj { - - static final int SIZE = 1 * 1024 * 1024; - static final int COUNT = 16; - - static volatile Object sink; - - public static void work() throws Exception { - Object[] root = new Object[COUNT]; - sink = root; - for (int c = 0; c < COUNT; c++) { - root[c] = new Object[SIZE]; - } - } - - public static void main(String[] args) throws Exception { - if (args.length > 0) { - work(); - return; - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx16m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocLargeObj.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(1); - analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx1g", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocLargeObj.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(0); - analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); - } - } -} diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java deleted file mode 100644 index 1567e3d05da..00000000000 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2018, Red Hat, Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/** - * @test - * @summary Test that allocation of the object larger than heap fails predictably - * @requires vm.gc.Shenandoah - * @library /test/lib - * @run driver TestAllocLargerThanHeap - */ - -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.process.ProcessTools; - -public class TestAllocLargerThanHeap { - - static final int SIZE = 16 * 1024 * 1024; - - static volatile Object sink; - - public static void work() throws Exception { - sink = new Object[SIZE]; - } - - public static void main(String[] args) throws Exception { - if (args.length > 0) { - work(); - return; - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx16m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocLargerThanHeap.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(1); - analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx1g", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocLargerThanHeap.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(0); - analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); - } - } -} diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocOutOfMemory.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocOutOfMemory.java new file mode 100644 index 00000000000..d1a0c81c4d4 --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocOutOfMemory.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test id=large + * @summary Test allocation of large objects results in OOM, but will not crash the JVM + * @requires vm.gc.Shenandoah + * @library /test/lib + * @run driver TestAllocOutOfMemory large + */ + +/** + * @test id=heap + * @summary Test allocation of a heap-sized object results in OOM, but will not crash the JVM + * @requires vm.gc.Shenandoah + * @library /test/lib + * @run driver TestAllocOutOfMemory heap + */ + +/** + * @test id=small + * @summary Test allocation of small objects results in OOM, but will not crash the JVM + * @requires vm.gc.Shenandoah + * @library /test/lib + * @run driver TestAllocOutOfMemory small + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestAllocOutOfMemory { + + static volatile Object sink; + + public static void work(int size, int count) throws Exception { + Object[] root = new Object[count]; + sink = root; + for (int c = 0; c < count; c++) { + root[c] = new Object[size]; + } + } + + private static void allocate(String size, int multiplier) throws Exception { + switch (size) { + case "large": + work(1024 * 1024, 16 * multiplier); + break; + case "heap": + work(16 * 1024 * 1024, multiplier); + break; + case "small": + work(1, 16 * 1024 * 1024 * multiplier); + break; + default: + throw new IllegalArgumentException("Usage: test [large|small|heap]"); + } + } + + public static void main(String[] args) throws Exception { + if (args.length > 2) { + // Called from test, size is second argument, heap requested is third + String size = args[1]; + long spec_heap = Integer.parseInt(args[2]); + + // The actual heap we get may be larger than the one we asked for + // (particularly in the generational case) + final long actual_heap = Runtime.getRuntime().maxMemory(); + int multiplier = 1; + if (actual_heap > spec_heap) { + // A suitable multiplier is used, so as to allocate an + // amount appropriate to the larger actual heap size than what + // was specified. + multiplier = (int)((actual_heap + spec_heap - 1)/spec_heap); + } + + allocate(size, multiplier); + return; + } + + // Called from jtreg, size is first argument + String size = args[0]; + { + int heap = 16*1024*1024; // -Xmx16m + expectFailure("-Xmx16m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocOutOfMemory.class.getName(), + "test", size, Integer.toString(heap)); + + expectFailure("-Xmx16m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", "-XX:ShenandoahGCMode=generational", + TestAllocOutOfMemory.class.getName(), + "test", size, Integer.toString(heap)); + } + + { + int heap = 1*1024*1024*1024; // -Xmx1g + expectSuccess("-Xmx1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", + TestAllocOutOfMemory.class.getName(), + "test", size, Integer.toString(heap)); + + expectSuccess("-Xmx1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", "-XX:ShenandoahGCMode=generational", + TestAllocOutOfMemory.class.getName(), + "test", size, Integer.toString(heap)); + } + } + + private static void expectSuccess(String... args) throws Exception { + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(args); + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); + } + + private static void expectFailure(String... args) throws Exception { + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(args); + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(1); + analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); + } +} diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java deleted file mode 100644 index bc32c1f0aa0..00000000000 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2018, Red Hat, Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/** - * @test - * @summary Test allocation of small object to result OOM, but not to crash JVM - * @requires vm.gc.Shenandoah - * @library /test/lib - * @run driver TestAllocSmallObj - */ - -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.process.ProcessTools; - -public class TestAllocSmallObj { - - static final int COUNT = 16 * 1024 * 1024; - - static volatile Object sink; - - public static void work() throws Exception { - Object[] root = new Object[COUNT]; - sink = root; - for (int c = 0; c < COUNT; c++) { - root[c] = new Object(); - } - } - - public static void main(String[] args) throws Exception { - if (args.length > 0) { - work(); - return; - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx16m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocSmallObj.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(1); - analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); - } - - { - ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( - "-Xmx1g", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - TestAllocSmallObj.class.getName(), - "test"); - - OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); - analyzer.shouldHaveExitValue(0); - analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); - } - } -} diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java index beb57b0b401..d30bc00c00d 100644 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java +++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,9 +126,10 @@ public static void main(String[] args) throws Exception { } String[][][] modeHeuristics = new String[][][] { - {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, - {{"iu"}, {"adaptive", "aggressive"}}, - {{"passive"}, {"passive"}} + {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, + {{"iu"}, {"adaptive", "aggressive"}}, + {{"passive"}, {"passive"}}, + {{"generational"}, {"adaptive"}} }; for (String[][] mh : modeHeuristics) { diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java index 75cb2d5c31a..56d9235548f 100644 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java +++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,5 +75,19 @@ public static void main(String[] args) throws Exception { analyzer.shouldContain("java.lang.OutOfMemoryError"); analyzer.shouldContain("All good"); } + + { + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-Xmx32m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseShenandoahGC", "-XX:ShenandoahGCMode=generational", + TestThreadFailure.class.getName(), + "test"); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + analyzer.shouldContain("java.lang.OutOfMemoryError"); + analyzer.shouldContain("All good"); + } } } diff --git a/test/hotspot/jtreg/gc/shenandoah/options/TestModeUnlock.java b/test/hotspot/jtreg/gc/shenandoah/options/TestModeUnlock.java index 0c8fea7f993..57eaa2f4ae0 100644 --- a/test/hotspot/jtreg/gc/shenandoah/options/TestModeUnlock.java +++ b/test/hotspot/jtreg/gc/shenandoah/options/TestModeUnlock.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2020, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,9 +45,10 @@ enum Mode { } public static void main(String[] args) throws Exception { - testWith("-XX:ShenandoahGCMode=satb", Mode.PRODUCT); - testWith("-XX:ShenandoahGCMode=iu", Mode.EXPERIMENTAL); - testWith("-XX:ShenandoahGCMode=passive", Mode.DIAGNOSTIC); + testWith("-XX:ShenandoahGCMode=satb", Mode.PRODUCT); + testWith("-XX:ShenandoahGCMode=iu", Mode.EXPERIMENTAL); + testWith("-XX:ShenandoahGCMode=passive", Mode.DIAGNOSTIC); + testWith("-XX:ShenandoahGCMode=generational", Mode.EXPERIMENTAL); } private static void testWith(String h, Mode mode) throws Exception {