diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index 40d63419e7c..7054bd39bad 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -78,86 +78,71 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { return size; } -CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : - _code_begin(layout.code_begin()), - _code_end(layout.code_end()), - _content_begin(layout.content_begin()), - _data_end(layout.data_end()), - _relocation_begin(layout.relocation_begin()), - _relocation_end(layout.relocation_end()), - _oop_maps(oop_maps), +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CompilerType type, CodeBuffer* cb, int size, uint16_t header_size, + int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : + _oop_maps(nullptr), // will be set by set_oop_maps() call _name(name), - _size(layout.size()), - _header_size(layout.header_size()), - _frame_complete_offset(frame_complete_offset), - _data_offset(layout.data_offset()), + _size(size), + _relocation_size(align_up(cb->total_relocation_size(), oopSize)), + _content_offset(CodeBlob::align_code_offset(header_size + _relocation_size)), + _code_offset(_content_offset + cb->total_offset_of(cb->insts())), + _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)), _frame_size(frame_size), - _caller_must_gc_arguments(caller_must_gc_arguments), - _is_compiled(compiled), - _type(type) -{ - assert(is_aligned(layout.size(), oopSize), "unaligned size"); - assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); - assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); - assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); -#ifdef COMPILER1 - // probably wrong for tiered - assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); -#endif // COMPILER1 - S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields -} - -CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : - _code_begin(layout.code_begin()), - _code_end(layout.code_end()), - _content_begin(layout.content_begin()), - _data_end(layout.data_end()), - _relocation_begin(layout.relocation_begin()), - _relocation_end(layout.relocation_end()), - _name(name), - _size(layout.size()), - _header_size(layout.header_size()), + S390_ONLY(_ctable_offset(0) COMMA) + _header_size(header_size), _frame_complete_offset(frame_complete_offset), - _data_offset(layout.data_offset()), - _frame_size(frame_size), + _kind(kind), _caller_must_gc_arguments(caller_must_gc_arguments), - _is_compiled(compiled), _type(type) { - assert(is_aligned(_size, oopSize), "unaligned size"); - assert(is_aligned(_header_size, oopSize), "unaligned size"); - assert(_data_offset <= _size, "codeBlob is too small"); - assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); - - set_oop_maps(oop_maps); + assert(is_aligned(_size, oopSize), "unaligned size"); + assert(is_aligned(header_size, oopSize), "unaligned size"); + assert(is_aligned(_relocation_size, oopSize), "unaligned size"); + assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size); + assert(code_end() == content_end(), "must be the same - see code_end()"); #ifdef COMPILER1 // probably wrong for tiered assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); #endif // COMPILER1 - S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields + + set_oop_maps(oop_maps); } - -// Creates a simple CodeBlob. Sets up the size of the different regions. -RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) - : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */) +// Simple CodeBlob used for simple BufferBlob. +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) : + _oop_maps(nullptr), + _name(name), + _size(size), + _relocation_size(0), + _content_offset(CodeBlob::align_code_offset(header_size)), + _code_offset(_content_offset), + _data_offset(size), + _frame_size(0), + S390_ONLY(_ctable_offset(0) COMMA) + _header_size(header_size), + _frame_complete_offset(CodeOffsets::frame_never_safe), + _kind(kind), + _caller_must_gc_arguments(false), + _type(compiler_none) { - assert(is_aligned(locs_size, oopSize), "unaligned size"); + assert(is_aligned(size, oopSize), "unaligned size"); + assert(is_aligned(header_size, oopSize), "unaligned size"); } - // Creates a RuntimeBlob from a CodeBuffer // and copy code and relocation info. RuntimeBlob::RuntimeBlob( const char* name, + CodeBlobKind kind, CodeBuffer* cb, - int header_size, int size, - int frame_complete, + uint16_t header_size, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments -) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { +) : CodeBlob(name, kind, compiler_none, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) +{ cb->copy_code_and_locs_to(this); } @@ -245,8 +230,8 @@ void CodeBlob::print_code() { // Implementation of BufferBlob -BufferBlob::BufferBlob(const char* name, int size) -: RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) +BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size) +: RuntimeBlob(name, kind, size, sizeof(BufferBlob)) {} BufferBlob* BufferBlob::create(const char* name, int buffer_size) { @@ -260,7 +245,7 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) { assert(name != nullptr, "must provide a name"); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size); + blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -269,10 +254,11 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) { } -BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) - : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, nullptr) +BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size) + : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr) {} +// Used by gtest BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock @@ -281,7 +267,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { assert(name != nullptr, "must provide a name"); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size, cb); + blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -302,7 +288,7 @@ void BufferBlob::free(BufferBlob *blob) { // Implementation of AdapterBlob AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : - BufferBlob("I2C/C2I adapters", size, cb) { + BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) { CodeCache::commit(this); } @@ -334,7 +320,7 @@ void* VtableBlob::operator new(size_t s, unsigned size) throw() { } VtableBlob::VtableBlob(const char* name, int size) : - BufferBlob(name, size) { + BufferBlob(name, CodeBlobKind::Vtable, size) { } VtableBlob* VtableBlob::create(const char* name, int buffer_size) { @@ -400,18 +386,19 @@ RuntimeStub::RuntimeStub( const char* name, CodeBuffer* cb, int size, - int frame_complete, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments ) -: RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) +: RuntimeBlob(name, CodeBlobKind::Runtime_Stub, cb, size, sizeof(RuntimeStub), + frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { } RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, CodeBuffer* cb, - int frame_complete, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) @@ -456,7 +443,8 @@ DeoptimizationBlob::DeoptimizationBlob( int unpack_with_reexecution_offset, int frame_size ) -: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) +: SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, + size, sizeof(DeoptimizationBlob), frame_size, oop_maps) { _unpack_offset = unpack_offset; _unpack_with_exception = unpack_with_exception_offset; @@ -505,7 +493,8 @@ UncommonTrapBlob::UncommonTrapBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) +: SingletonBlob("UncommonTrapBlob", CodeBlobKind::Uncommon_Trap, cb, + size, sizeof(UncommonTrapBlob), frame_size, oop_maps) {} @@ -541,7 +530,8 @@ ExceptionBlob::ExceptionBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) +: SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, + size, sizeof(ExceptionBlob), frame_size, oop_maps) {} @@ -576,7 +566,8 @@ SafepointBlob::SafepointBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) +: SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, + size, sizeof(SafepointBlob), frame_size, oop_maps) {} @@ -736,7 +727,8 @@ void DeoptimizationBlob::print_value_on(outputStream* st) const { UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, intptr_t exception_handler_offset, jobject receiver, ByteSize frame_data_offset) : - RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */, + RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub), + CodeOffsets::frame_never_safe, 0 /* no frame size */, /* oop maps = */ nullptr, /* caller must gc arguments = */ false), _exception_handler_offset(exception_handler_offset), _receiver(receiver), diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index c1c34a06c75..553b5bf022d 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -75,10 +75,24 @@ enum class CodeBlobType { // - instruction space // - data space +enum class CodeBlobKind : u1 { + None, + Nmethod, + Buffer, + Adapter, + Vtable, + MH_Adapter, + Runtime_Stub, + Deoptimization, + Exception, + Safepoint, + Uncommon_Trap, + Upcall, + Number_Of_Kinds +}; -class CodeBlobLayout; -class UpcallStub; // for as_upcall_stub() -class RuntimeStub; // for as_runtime_stub() +class UpcallStub; // for as_upcall_stub() +class RuntimeStub; // for as_runtime_stub() class JavaFrameAnchor; // for UpcallStub::jfa_for_frame class CodeBlob { @@ -89,44 +103,37 @@ class CodeBlob { protected: // order fields from large to small to minimize padding between fields - address _code_begin; - address _code_end; - address _content_begin; // address to where content region begins (this includes consts, insts, stubs) - // address _content_end - not required, for all CodeBlobs _code_end == _content_end for now - address _data_end; - address _relocation_begin; - address _relocation_end; + ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob + const char* _name; - ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob + int _size; // total size of CodeBlob in bytes + int _relocation_size; // size of relocation + int _content_offset; // offset to where content region begins (this includes consts, insts, stubs) + int _code_offset; // offset to where instructions region begins (this includes insts, stubs) + int _data_offset; // offset to where data region begins + int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words) - const char* _name; S390_ONLY(int _ctable_offset;) + uint16_t _header_size; // size of header (depends on subclass) + int16_t _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have + // not finished setting up their frame. Beware of pc's in + // that range. There is a similar range(s) on returns + // which we don't detect. - int _size; // total size of CodeBlob in bytes - int _header_size; // size of header (depends on subclass) - int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have - // not finished setting up their frame. Beware of pc's in - // that range. There is a similar range(s) on returns - // which we don't detect. - int _data_offset; // offset to where data region begins - int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words) - + CodeBlobKind _kind; // Kind of this code blob bool _caller_must_gc_arguments; - - bool _is_compiled; const CompilerType _type; // CompilerType #ifndef PRODUCT AsmRemarks _asm_remarks; DbgStrings _dbg_strings; -#endif // not PRODUCT +#endif - CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, - int frame_size, ImmutableOopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled = false); - CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, - int frame_size, OopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled = false); + CodeBlob(const char* name, CodeBlobKind kind, CompilerType type, CodeBuffer* cb, int size, uint16_t header_size, + int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); + + // Simple CodeBlob used for simple BufferBlob. + CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size); void operator delete(void* p) { } @@ -146,19 +153,18 @@ class CodeBlob { virtual void purge(bool free_code_cache_data, bool unregister_nmethod); // Typing - virtual bool is_buffer_blob() const { return false; } - virtual bool is_nmethod() const { return false; } - virtual bool is_runtime_stub() const { return false; } - virtual bool is_deoptimization_stub() const { return false; } - virtual bool is_uncommon_trap_stub() const { return false; } - virtual bool is_exception_stub() const { return false; } - virtual bool is_safepoint_stub() const { return false; } - virtual bool is_adapter_blob() const { return false; } - virtual bool is_vtable_blob() const { return false; } - virtual bool is_method_handles_adapter_blob() const { return false; } - virtual bool is_upcall_stub() const { return false; } - bool is_compiled() const { return _is_compiled; } - const bool* is_compiled_addr() const { return &_is_compiled; } + bool is_nmethod() const { return _kind == CodeBlobKind::Nmethod; } + bool is_buffer_blob() const { return _kind == CodeBlobKind::Buffer; } + bool is_runtime_stub() const { return _kind == CodeBlobKind::Runtime_Stub; } + bool is_deoptimization_stub() const { return _kind == CodeBlobKind::Deoptimization; } + bool is_uncommon_trap_stub() const { return _kind == CodeBlobKind::Uncommon_Trap; } + bool is_exception_stub() const { return _kind == CodeBlobKind::Exception; } + bool is_safepoint_stub() const { return _kind == CodeBlobKind::Safepoint; } + bool is_adapter_blob() const { return _kind == CodeBlobKind::Adapter; } + bool is_vtable_blob() const { return _kind == CodeBlobKind::Vtable; } + bool is_method_handles_adapter_blob() const { return _kind == CodeBlobKind::MH_Adapter; } + bool is_upcall_stub() const { return _kind == CodeBlobKind::Upcall; } + bool is_compiled() const { return _kind == CodeBlobKind::Nmethod || _type != compiler_none; } inline bool is_compiled_by_c1() const { return _type == compiler_c1; }; inline bool is_compiled_by_c2() const { return _type == compiler_c2; }; @@ -176,14 +182,22 @@ class CodeBlob { RuntimeStub* as_runtime_stub() const { assert(is_runtime_stub(), "must be runtime blob"); return (RuntimeStub*) this; } // Boundaries - address header_begin() const { return (address) this; } - relocInfo* relocation_begin() const { return (relocInfo*) _relocation_begin; }; - relocInfo* relocation_end() const { return (relocInfo*) _relocation_end; } - address content_begin() const { return _content_begin; } - address content_end() const { return _code_end; } // _code_end == _content_end is true for all types of blobs for now, it is also checked in the constructor - address code_begin() const { return _code_begin; } - address code_end() const { return _code_end; } - address data_end() const { return _data_end; } + address header_begin() const { return (address) this; } + address header_end() const { return ((address) this) + _header_size; } + relocInfo* relocation_begin() const { return (relocInfo*) header_end(); } + relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); } + address content_begin() const { return (address) header_begin() + _content_offset; } + address content_end() const { return (address) header_begin() + _data_offset; } + address code_begin() const { return (address) header_begin() + _code_offset; } + // code_end == content_end is true for all types of blobs for now, it is also checked in the constructor + address code_end() const { return (address) header_begin() + _data_offset; } + address data_begin() const { return (address) header_begin() + _data_offset; } + address data_end() const { return (address) header_begin() + _size; } + + // Offsets + int content_offset() const { return _content_offset; } + int code_offset() const { return _code_offset; } + int data_offset() const { return _data_offset; } // This field holds the beginning of the const section in the old code buffer. // It is needed to fix relocations of pc-relative loads when resizing the @@ -201,8 +215,6 @@ class CodeBlob { void adjust_size(size_t used) { _size = (int)used; _data_offset = (int)used; - _code_end = (address)this + used; - _data_end = (address)this + used; } // Containment @@ -260,97 +272,8 @@ class CodeBlob { #endif }; -class CodeBlobLayout : public StackObj { -private: - int _size; - int _header_size; - int _relocation_size; - int _content_offset; - int _code_offset; - int _data_offset; - address _code_begin; - address _code_end; - address _content_begin; - address _content_end; - address _data_end; - address _relocation_begin; - address _relocation_end; - -public: - CodeBlobLayout(address code_begin, address code_end, address content_begin, address content_end, address data_end, address relocation_begin, address relocation_end) : - _size(0), - _header_size(0), - _relocation_size(0), - _content_offset(0), - _code_offset(0), - _data_offset(0), - _code_begin(code_begin), - _code_end(code_end), - _content_begin(content_begin), - _content_end(content_end), - _data_end(data_end), - _relocation_begin(relocation_begin), - _relocation_end(relocation_end) - { - } - - CodeBlobLayout(const address start, int size, int header_size, int relocation_size, int data_offset) : - _size(size), - _header_size(header_size), - _relocation_size(relocation_size), - _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)), - _code_offset(_content_offset), - _data_offset(data_offset) - { - assert(is_aligned(_relocation_size, oopSize), "unaligned size"); - - _code_begin = (address) start + _code_offset; - _code_end = (address) start + _data_offset; - - _content_begin = (address) start + _content_offset; - _content_end = (address) start + _data_offset; - - _data_end = (address) start + _size; - _relocation_begin = (address) start + _header_size; - _relocation_end = _relocation_begin + _relocation_size; - } - - CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) : - _size(size), - _header_size(header_size), - _relocation_size(align_up(cb->total_relocation_size(), oopSize)), - _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)), - _code_offset(_content_offset + cb->total_offset_of(cb->insts())), - _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)) - { - assert(is_aligned(_relocation_size, oopSize), "unaligned size"); - - _code_begin = (address) start + _code_offset; - _code_end = (address) start + _data_offset; - - _content_begin = (address) start + _content_offset; - _content_end = (address) start + _data_offset; - - _data_end = (address) start + _size; - _relocation_begin = (address) start + _header_size; - _relocation_end = _relocation_begin + _relocation_size; - } - - int size() const { return _size; } - int header_size() const { return _header_size; } - int relocation_size() const { return _relocation_size; } - int content_offset() const { return _content_offset; } - int code_offset() const { return _code_offset; } - int data_offset() const { return _data_offset; } - address code_begin() const { return _code_begin; } - address code_end() const { return _code_end; } - address data_end() const { return _data_end; } - address relocation_begin() const { return _relocation_begin; } - address relocation_end() const { return _relocation_end; } - address content_begin() const { return _content_begin; } - address content_end() const { return _content_end; } -}; - +//---------------------------------------------------------------------------------------------------- +// RuntimeBlob: used for non-compiled method code (adapters, stubs, blobs) class RuntimeBlob : public CodeBlob { friend class VMStructs; @@ -358,17 +281,20 @@ class RuntimeBlob : public CodeBlob { // Creation // a) simple CodeBlob - // frame_complete is the offset from the beginning of the instructions - // to where the frame setup (from stackwalk viewpoint) is complete. - RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size); + RuntimeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) + : CodeBlob(name, kind, size, header_size) + {} // b) full CodeBlob + // frame_complete is the offset from the beginning of the instructions + // to where the frame setup (from stackwalk viewpoint) is complete. RuntimeBlob( const char* name, + CodeBlobKind kind, CodeBuffer* cb, - int header_size, int size, - int frame_complete, + uint16_t header_size, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments = false @@ -403,8 +329,8 @@ class BufferBlob: public RuntimeBlob { private: // Creation support - BufferBlob(const char* name, int size); - BufferBlob(const char* name, int size, CodeBuffer* cb); + BufferBlob(const char* name, CodeBlobKind kind, int size); + BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size); void* operator new(size_t s, unsigned size) throw(); @@ -415,9 +341,6 @@ class BufferBlob: public RuntimeBlob { static void free(BufferBlob* buf); - // Typing - virtual bool is_buffer_blob() const { return true; } - // GC/Verification support void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } @@ -437,9 +360,6 @@ class AdapterBlob: public BufferBlob { public: // Creation static AdapterBlob* create(CodeBuffer* cb); - - // Typing - virtual bool is_adapter_blob() const { return true; } }; //--------------------------------------------------------------------------------------------------- @@ -452,9 +372,6 @@ class VtableBlob: public BufferBlob { public: // Creation static VtableBlob* create(const char* name, int buffer_size); - - // Typing - virtual bool is_vtable_blob() const { return true; } }; //---------------------------------------------------------------------------------------------------- @@ -462,14 +379,11 @@ class VtableBlob: public BufferBlob { class MethodHandlesAdapterBlob: public BufferBlob { private: - MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", size) {} + MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", CodeBlobKind::MH_Adapter, size) {} public: // Creation static MethodHandlesAdapterBlob* create(int buffer_size); - - // Typing - virtual bool is_method_handles_adapter_blob() const { return true; } }; @@ -484,7 +398,7 @@ class RuntimeStub: public RuntimeBlob { const char* name, CodeBuffer* cb, int size, - int frame_complete, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments @@ -497,7 +411,7 @@ class RuntimeStub: public RuntimeBlob { static RuntimeStub* new_runtime_stub( const char* stub_name, CodeBuffer* cb, - int frame_complete, + int16_t frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments @@ -505,9 +419,6 @@ class RuntimeStub: public RuntimeBlob { static void free(RuntimeStub* stub) { RuntimeBlob::free(stub); } - // Typing - bool is_runtime_stub() const { return true; } - address entry_point() const { return code_begin(); } // GC/Verification support @@ -530,14 +441,15 @@ class SingletonBlob: public RuntimeBlob { public: SingletonBlob( - const char* name, - CodeBuffer* cb, - int header_size, - int size, - int frame_size, - OopMapSet* oop_maps + const char* name, + CodeBlobKind kind, + CodeBuffer* cb, + int size, + uint16_t header_size, + int frame_size, + OopMapSet* oop_maps ) - : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps) + : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, frame_size, oop_maps) {}; address entry_point() { return code_begin(); } @@ -591,9 +503,6 @@ class DeoptimizationBlob: public SingletonBlob { int frame_size ); - // Typing - bool is_deoptimization_stub() const { return true; } - // GC for args void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } @@ -658,9 +567,6 @@ class UncommonTrapBlob: public SingletonBlob { // GC for args void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_uncommon_trap_stub() const { return true; } }; @@ -688,9 +594,6 @@ class ExceptionBlob: public SingletonBlob { // GC for args void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_exception_stub() const { return true; } }; #endif // COMPILER2 @@ -719,9 +622,6 @@ class SafepointBlob: public SingletonBlob { // GC for args void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_safepoint_stub() const { return true; } }; //---------------------------------------------------------------------------------------------------- @@ -764,9 +664,6 @@ class UpcallStub: public RuntimeBlob { JavaFrameAnchor* jfa_for_frame(const frame& frame) const; - // Typing - virtual bool is_upcall_stub() const override { return true; } - // GC/Verification support void oops_do(OopClosure* f, const frame& frame); virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override; diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp index 4da319644de..b79df8ff66a 100644 --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -50,23 +50,11 @@ #include "runtime/mutexLocker.hpp" #include "runtime/sharedRuntime.hpp" -CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, - int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled) - : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), - _deoptimization_status(not_marked), - _deoptimization_generation(0), - _method(method), - _gc_data(nullptr) -{ - init_defaults(); -} - CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, - OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) - : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, - frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), + OopMapSet* oop_maps, bool caller_must_gc_arguments) + : CodeBlob(name, CodeBlobKind::Nmethod, type, cb, size, header_size, + frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), _deoptimization_status(not_marked), _deoptimization_generation(0), _method(method), diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index ca441d9ae64..c07ac24cceb 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -182,8 +182,7 @@ class CompiledMethod : public CodeBlob { } protected: - CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); - CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); + CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); public: // Only used by unit test. diff --git a/src/hotspot/share/code/debugInfoRec.cpp b/src/hotspot/share/code/debugInfoRec.cpp index 15353bf2872..ab7be142051 100644 --- a/src/hotspot/share/code/debugInfoRec.cpp +++ b/src/hotspot/share/code/debugInfoRec.cpp @@ -244,14 +244,11 @@ static struct dir_stats_struct { int chunks_queried; int chunks_shared; - int chunks_reshared; int chunks_elided; void print() { - tty->print_cr("Debug Data Chunks: %d, shared %d+%d, non-SP's elided %d", - chunks_queried, - chunks_shared, chunks_reshared, - chunks_elided); + tty->print_cr("Debug Data Chunks: %d, shared %d, non-SP's elided %d", + chunks_queried, chunks_shared, chunks_elided); } } dir_stats; #endif //PRODUCT diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp index 52ba939d6da..0f164573f87 100644 --- a/src/hotspot/share/code/dependencies.cpp +++ b/src/hotspot/share/code/dependencies.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -387,9 +387,7 @@ void Dependencies::copy_to(nmethod* nm) { address beg = nm->dependencies_begin(); address end = nm->dependencies_end(); guarantee(end - beg >= (ptrdiff_t) size_in_bytes(), "bad sizing"); - Copy::disjoint_words((HeapWord*) content_bytes(), - (HeapWord*) beg, - size_in_bytes() / sizeof(HeapWord)); + (void)memcpy(beg, content_bytes(), size_in_bytes()); assert(size_in_bytes() % sizeof(HeapWord) == 0, "copy by words"); } diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index c07b5e28c17..b1ff0c8a0ec 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -110,6 +110,11 @@ #endif +// Cast from int value to narrow type +#define CHECKED_CAST(result, T, thing) \ + result = static_cast(thing); \ + assert(static_cast(result) == thing, "failed: %d != %d", static_cast(result), thing); + //--------------------------------------------------------------------------------- // NMethod statistics // They are printed under various flags, including: @@ -121,26 +126,28 @@ // and make it simpler to print from the debugger. struct java_nmethod_stats_struct { int nmethod_count; - int total_size; + uint total_nm_size; + uint total_immut_size; int relocation_size; int consts_size; int insts_size; int stub_size; - int scopes_data_size; - int scopes_pcs_size; + uint oops_size; + uint metadata_size; int dependencies_size; - int handler_table_size; int nul_chk_table_size; + uint handler_table_size; + uint scopes_pcs_size; + uint scopes_data_size; #if INCLUDE_JVMCI int speculations_size; int jvmci_data_size; #endif - int oops_size; - int metadata_size; void note_nmethod(nmethod* nm) { nmethod_count += 1; - total_size += nm->size(); + total_nm_size += nm->size(); + total_immut_size += nm->immutable_data_size(); relocation_size += nm->relocation_size(); consts_size += nm->consts_size(); insts_size += nm->insts_size(); @@ -160,22 +167,60 @@ struct java_nmethod_stats_struct { void print_nmethod_stats(const char* name) { if (nmethod_count == 0) return; tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name); - if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); - if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod)); - if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); - if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); - if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); - if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); - if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); - if (metadata_size != 0) tty->print_cr(" metadata = %d", metadata_size); - if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); - if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); - if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); - if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); - if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); + uint total_size = total_nm_size + total_immut_size; + if (total_nm_size != 0) { + tty->print_cr(" total size = %u (100%%)", total_size); + tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size); + } + uint header_size = (uint)(nmethod_count * sizeof(nmethod)); + if (nmethod_count != 0) { + tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size); + } + if (relocation_size != 0) { + tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_nm_size); + } + if (consts_size != 0) { + tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size); + } + if (insts_size != 0) { + tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size); + } + if (stub_size != 0) { + tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size); + } + if (oops_size != 0) { + tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size); + } + if (metadata_size != 0) { + tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_nm_size); + } +#if INCLUDE_JVMCI + if (jvmci_data_size != 0) { + tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_nm_size); + } +#endif + if (total_immut_size != 0) { + tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size); + } + if (dependencies_size != 0) { + tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size); + } + if (nul_chk_table_size != 0) { + tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size); + } + if (handler_table_size != 0) { + tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size); + } + if (scopes_pcs_size != 0) { + tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size); + } + if (scopes_data_size != 0) { + tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size); + } #if INCLUDE_JVMCI - if (speculations_size != 0) tty->print_cr(" speculations = %d", speculations_size); - if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d", jvmci_data_size); + if (speculations_size != 0) { + tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size); + } #endif } }; @@ -440,19 +485,6 @@ const char* nmethod::compile_kind() const { return nullptr; } -// Fill in default values for various flag fields -void nmethod::init_defaults() { - _state = not_installed; - _has_flushed_dependencies = 0; - _load_reported = false; // jvmti state - - _oops_do_mark_link = nullptr; - _osr_link = nullptr; -#if INCLUDE_RTM_OPT - _rtm_state = NoRTM; -#endif -} - #ifdef ASSERT class CheckForOopsClosure : public OopClosure { bool _found_oop = false; @@ -560,31 +592,40 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, code_buffer->finalize_oop_references(method); // create nmethod nmethod* nm = nullptr; + int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)); #if INCLUDE_JVMCI - int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0; + if (compiler->is_jvmci()) { + nmethod_size += align_up(jvmci_data->size(), oopSize); + } #endif - int nmethod_size = - CodeBlob::allocation_size(code_buffer, sizeof(nmethod)) - + adjust_pcs_size(debug_info->pcs_size()) + + int immutable_data_size = + adjust_pcs_size(debug_info->pcs_size()) + align_up((int)dependencies->size_in_bytes(), oopSize) + align_up(handler_table->size_in_bytes() , oopSize) + align_up(nul_chk_table->size_in_bytes() , oopSize) #if INCLUDE_JVMCI + align_up(speculations_len , oopSize) - + align_up(jvmci_data_size , oopSize) #endif + align_up(debug_info->data_size() , oopSize); + + // First, allocate space for immutable data in C heap. + address immutable_data = nullptr; + if (immutable_data_size > 0) { + immutable_data = (address)os::malloc(immutable_data_size, mtCode); + if (immutable_data == nullptr) { + vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data"); + return nullptr; + } + } { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nm = new (nmethod_size, comp_level) - nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets, - orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, - oop_maps, - handler_table, - nul_chk_table, - compiler, - comp_level + nmethod(method(), compiler->type(), nmethod_size, immutable_data_size, + compile_id, entry_bci, immutable_data, offsets, orig_pc_offset, + debug_info, dependencies, code_buffer, frame_size, oop_maps, + handler_table, nul_chk_table, compiler, comp_level #if INCLUDE_JVMCI , speculations, speculations_len, @@ -627,6 +668,51 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, return nm; } +// Fill in default values for various fields +void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) { + // avoid uninitialized fields, even for short time periods + _exception_cache = nullptr; + _gc_data = nullptr; + _oops_do_mark_link = nullptr; + +#if INCLUDE_RTM_OPT + _rtm_state = NoRTM; +#endif + _is_unloading_state = 0; + _state = not_installed; + + _has_unsafe_access = 0; + _has_method_handle_invokes = 0; + _has_wide_vectors = 0; + _has_monitors = 0; + _has_flushed_dependencies = 0; + _load_reported = 0; // jvmti state + + _deoptimization_status = not_marked; + + // SECT_CONSTS is first in code buffer so the offset should be 0. + int consts_offset = code_buffer->total_offset_of(code_buffer->consts()); + assert(consts_offset == 0, "const_offset: %d", consts_offset); + + _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); + + CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry))); + CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry))); + CHECKED_CAST(_skipped_instructions_size, uint16_t, (code_buffer->total_skipped_instructions_size())); +} + +// Post initialization +void nmethod::post_init() { + clear_unloading_state(); + + finalize_relocations(); + + Universe::heap()->register_nmethod(this); + debug_only(Universe::heap()->verify_nmethod(this)); + + CodeCache::commit(this); +} + // For native wrappers nmethod::nmethod( Method* method, @@ -639,70 +725,67 @@ nmethod::nmethod( ByteSize basic_lock_owner_sp_offset, ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) - : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), - _is_unlinked(false), + : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), + _gc_epoch(CodeCache::gc_epoch()), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset), - _is_unloading_state(0) + _is_unlinked(false) { { - int scopes_data_offset = 0; int deoptimize_offset = 0; int deoptimize_mh_offset = 0; debug_only(NoSafepointVerifier nsv;) assert_locked_or_safepoint(CodeCache_lock); - init_defaults(); - _comp_level = CompLevel_none; + init_defaults(code_buffer, offsets); + + _osr_entry_point = nullptr; _entry_bci = InvocationEntryBci; - // We have no exception handler or deopt handler make the - // values something that will never match a pc like the nmethod vtable entry - _exception_offset = 0; + _compile_id = compile_id; + _comp_level = CompLevel_none; _orig_pc_offset = 0; - _gc_epoch = CodeCache::gc_epoch(); - - _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); - _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); - _oops_offset = data_offset(); - _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); - scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); - _scopes_pcs_offset = scopes_data_offset; - _dependencies_offset = _scopes_pcs_offset; - _handler_table_offset = _dependencies_offset; - _nul_chk_table_offset = _handler_table_offset; - _skipped_instructions_size = code_buffer->total_skipped_instructions_size(); + + if (offsets->value(CodeOffsets::Exceptions) != -1) { + // Continuation enter intrinsic + _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); + } else { + _exception_offset = 0; + } + // Native wrappers do not have deopt handlers. Make the values + // something that will never match a pc like the nmethod vtable entry + _unwind_handler_offset = 0; + + CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize))); + int data_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); #if INCLUDE_JVMCI - _speculations_offset = _nul_chk_table_offset; - _jvmci_data_offset = _speculations_offset; - _nmethod_end_offset = _jvmci_data_offset; -#else - _nmethod_end_offset = _nul_chk_table_offset; + // jvmci_data_size is 0 in native wrapper but we need to set offset + // to correctly calculate metadata_end address + CHECKED_CAST(_jvmci_data_offset, uint16_t, data_end_offset); +#endif + assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d < %d", nmethod_size, (data_offset() + data_end_offset)); + + // native wrapper does not have read-only data but we need unique not null address + _immutable_data = data_end(); + _immutable_data_size = 0; + _nul_chk_table_offset = 0; + _handler_table_offset = 0; + _scopes_pcs_offset = 0; + _scopes_data_offset = 0; +#if INCLUDE_JVMCI + _speculations_offset = 0; #endif - _compile_id = compile_id; - _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); - _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); - _osr_entry_point = nullptr; - _exception_cache = nullptr; - _pc_desc_container.reset_to(nullptr); - _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); + _pc_desc_container.reset_to(nullptr); - _scopes_data_begin = (address) this + scopes_data_offset; - _deopt_handler_begin = (address) this + deoptimize_offset; - _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset; + _scopes_data_begin = this->immutable_data_begin() + _scopes_data_offset; + _deopt_handler_begin = (address)this + deoptimize_offset; + _deopt_mh_handler_begin = (address)this + deoptimize_mh_offset; code_buffer->copy_code_and_locs_to(this); code_buffer->copy_values_to(this); - clear_unloading_state(); - - Universe::heap()->register_nmethod(this); - debug_only(Universe::heap()->verify_nmethod(this)); - - CodeCache::commit(this); - - finalize_relocations(); + post_init(); } if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { @@ -760,12 +843,15 @@ void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod); } +// For normal JIT compiled code nmethod::nmethod( Method* method, CompilerType type, int nmethod_size, + int immutable_data_size, int compile_id, int entry_bci, + address immutable_data, CodeOffsets* offsets, int orig_pc_offset, DebugInformationRecorder* debug_info, @@ -783,11 +869,10 @@ nmethod::nmethod( JVMCINMethodData* jvmci_data #endif ) - : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), - _is_unlinked(false), - _native_receiver_sp_offset(in_ByteSize(-1)), - _native_basic_lock_sp_offset(in_ByteSize(-1)), - _is_unloading_state(0) + : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), + _gc_epoch(CodeCache::gc_epoch()), + _osr_link(nullptr), + _is_unlinked(false) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); { @@ -797,18 +882,17 @@ nmethod::nmethod( _deopt_handler_begin = (address) this; _deopt_mh_handler_begin = (address) this; - init_defaults(); - _entry_bci = entry_bci; - _compile_id = compile_id; - _comp_level = comp_level; - _orig_pc_offset = orig_pc_offset; - _gc_epoch = CodeCache::gc_epoch(); + + init_defaults(code_buffer, offsets); + + _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); + _entry_bci = entry_bci; + _compile_id = compile_id; + _comp_level = comp_level; + _orig_pc_offset = orig_pc_offset; // Section offsets - _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); - _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); - set_ctable_begin(header_begin() + _consts_offset); - _skipped_instructions_size = code_buffer->total_skipped_instructions_size(); + set_ctable_begin(header_begin() + content_offset()); #if INCLUDE_JVMCI if (compiler->is_jvmci()) { @@ -844,33 +928,53 @@ nmethod::nmethod( } } if (offsets->value(CodeOffsets::UnwindHandler) != -1) { - _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); + // C1 generates UnwindHandler at the end of instructions section. + // Calculate positive offset as distance between the start of stubs section + // (which is also the end of instructions section) and the start of the handler. + int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); + CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset)); } else { _unwind_handler_offset = -1; } - _oops_offset = data_offset(); - _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); - int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); + CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize))); + int metadata_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); + +#if INCLUDE_JVMCI + CHECKED_CAST(_jvmci_data_offset, uint16_t, metadata_end_offset); + int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0; + DEBUG_ONLY( int data_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); ) +#else + DEBUG_ONLY( int data_end_offset = metadata_end_offset; ) +#endif + assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d > %d", + (data_offset() + data_end_offset), nmethod_size); + + _immutable_data_size = immutable_data_size; + if (immutable_data_size > 0) { + assert(immutable_data != nullptr, "required"); + _immutable_data = immutable_data; + } else { + // We need unique not null address + _immutable_data = data_end(); + } + CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize))); + CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize))); + _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); + _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); - _scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize); - _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); - _handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize); - _nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); #if INCLUDE_JVMCI - _speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); - _jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize); - int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0; - _nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); + _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); + DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); ) #else - _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); + DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); ) #endif - _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); - _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); - _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); - _exception_cache = nullptr; - _scopes_data_begin = (address) this + scopes_data_offset; + assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d", + immutable_data_end_offset, immutable_data_size); + + _scopes_data_begin = _immutable_data + _scopes_data_offset; + // after _scopes_pcs_offset is set _pc_desc_container.reset_to(scopes_pcs_begin()); code_buffer->copy_code_and_locs_to(this); @@ -878,7 +982,6 @@ nmethod::nmethod( code_buffer->copy_values_to(this); debug_info->copy_to(this); dependencies->copy_to(this); - clear_unloading_state(); #if INCLUDE_JVMCI if (compiler->is_jvmci()) { @@ -887,13 +990,6 @@ nmethod::nmethod( } #endif - Universe::heap()->register_nmethod(this); - debug_only(Universe::heap()->verify_nmethod(this)); - - CodeCache::commit(this); - - finalize_relocations(); - // Copy contents of ExceptionHandlerTable to nmethod handler_table->copy_to(this); nul_chk_table->copy_to(this); @@ -905,10 +1001,12 @@ nmethod::nmethod( } #endif + post_init(); + // we use the information of entry points to find out if a method is // static or non static assert(compiler->is_c2() || compiler->is_jvmci() || - _method->is_static() == (entry_point() == _verified_entry_point), + _method->is_static() == (entry_point() == verified_entry_point()), " entry points must be same for static methods and vice versa"); } } @@ -1466,10 +1564,13 @@ void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) { ec = next; } + if (_immutable_data != data_end()) { + os::free(_immutable_data); + _immutable_data = data_end(); // Valid not null address + } if (unregister_nmethod) { Universe::heap()->unregister_nmethod(this); } - CodeCache::unregister_old_nmethod(this); CodeBlob::purge(free_code_cache_data, unregister_nmethod); @@ -2450,35 +2551,41 @@ void nmethod::print(outputStream* st) const { p2i(metadata_begin()), p2i(metadata_end()), metadata_size()); - if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - p2i(scopes_data_begin()), - p2i(scopes_data_end()), - scopes_data_size()); - if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - p2i(scopes_pcs_begin()), - p2i(scopes_pcs_end()), - scopes_pcs_size()); +#if INCLUDE_JVMCI + if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + p2i(jvmci_data_begin()), + p2i(jvmci_data_end()), + jvmci_data_size()); +#endif + if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + p2i(immutable_data_begin()), + p2i(immutable_data_end()), + immutable_data_size()); if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", p2i(dependencies_begin()), p2i(dependencies_end()), dependencies_size()); - if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - p2i(handler_table_begin()), - p2i(handler_table_end()), - handler_table_size()); if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", p2i(nul_chk_table_begin()), p2i(nul_chk_table_end()), nul_chk_table_size()); + if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + p2i(handler_table_begin()), + p2i(handler_table_end()), + handler_table_size()); + if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + p2i(scopes_pcs_begin()), + p2i(scopes_pcs_end()), + scopes_pcs_size()); + if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + p2i(scopes_data_begin()), + p2i(scopes_data_end()), + scopes_data_size()); #if INCLUDE_JVMCI if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", p2i(speculations_begin()), p2i(speculations_end()), speculations_size()); - if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - p2i(jvmci_data_begin()), - p2i(jvmci_data_end()), - jvmci_data_size()); #endif } diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index f428aa4ef3d..3d91b487e5c 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -74,8 +74,26 @@ class nmethod : public CompiledMethod { uint64_t _gc_epoch; - // To support simple linked-list chaining of nmethods: - nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head + // To reduce header size union fields which usages do not overlap. + union { + // To support simple linked-list chaining of nmethods: + nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head + struct { + // These are used for compiled synchronized native methods to + // locate the owner and stack slot for the BasicLock. They are + // needed because there is no debug information for compiled native + // wrappers and the oop maps are insufficient to allow + // frame::retrieve_receiver() to work. Currently they are expected + // to be byte offsets from the Java stack pointer for maximum code + // sharing between platforms. JVMTI's GetLocalInstance() uses these + // offsets to find the receiver for non-static native wrapper frames. + ByteSize _native_receiver_sp_offset; + ByteSize _native_basic_lock_sp_offset; + }; + }; + + // nmethod's read-only data + address _immutable_data; // STW two-phase nmethod root processing helpers. // @@ -192,34 +210,37 @@ class nmethod : public CompiledMethod { oops_do_mark_link* volatile _oops_do_mark_link; // offsets for entry points - address _entry_point; // entry point with class check - address _verified_entry_point; // entry point without class check - address _osr_entry_point; // entry point for on stack replacement + address _osr_entry_point; // entry point for on stack replacement + uint16_t _entry_offset; // entry point with class check + uint16_t _verified_entry_offset; // entry point without class check + int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method + int _immutable_data_size; - bool _is_unlinked; + // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer + int _stub_offset; + // Offsets for different stubs section parts + int _exception_offset; + // Offset (from insts_end) of the unwind handler if it exists + int16_t _unwind_handler_offset; - // Shared fields for all nmethod's - int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method + uint16_t _skipped_instructions_size; - // Offsets for different nmethod parts - int _exception_offset; - // Offset of the unwind handler if it exists - int _unwind_handler_offset; + // Offsets in mutable data section + // _oops_offset == _data_offset, offset where embedded oop table begins (inside data) + uint16_t _metadata_offset; // embedded meta data table +#if INCLUDE_JVMCI + uint16_t _jvmci_data_offset; +#endif - int _consts_offset; - int _stub_offset; - int _oops_offset; // offset to where embedded oop table begins (inside data) - int _metadata_offset; // embedded meta data table - int _scopes_data_offset; - int _scopes_pcs_offset; - int _dependencies_offset; - int _handler_table_offset; - int _nul_chk_table_offset; + // Offset in immutable data section + // _dependencies_offset == 0 + uint16_t _nul_chk_table_offset; + uint16_t _handler_table_offset; // This table could be big in C1 code + int _scopes_pcs_offset; + int _scopes_data_offset; #if INCLUDE_JVMCI - int _speculations_offset; - int _jvmci_data_offset; + int _speculations_offset; #endif - int _nmethod_end_offset; int code_offset() const { return (address) code_begin() - header_begin(); } @@ -227,7 +248,8 @@ class nmethod : public CompiledMethod { // pc during a deopt. int _orig_pc_offset; - int _compile_id; // which compilation made this nmethod + int _compile_id; // which compilation made this nmethod + CompLevel _comp_level; // compilation level (s1) #if INCLUDE_RTM_OPT // RTM state at compile time. Used during deoptimization to decide @@ -235,32 +257,24 @@ class nmethod : public CompiledMethod { RTMState _rtm_state; #endif - // These are used for compiled synchronized native methods to - // locate the owner and stack slot for the BasicLock. They are - // needed because there is no debug information for compiled native - // wrappers and the oop maps are insufficient to allow - // frame::retrieve_receiver() to work. Currently they are expected - // to be byte offsets from the Java stack pointer for maximum code - // sharing between platforms. JVMTI's GetLocalInstance() uses these - // offsets to find the receiver for non-static native wrapper frames. - ByteSize _native_receiver_sp_offset; - ByteSize _native_basic_lock_sp_offset; - - CompLevel _comp_level; // compilation level - // Local state used to keep track of whether unloading is happening or not volatile uint8_t _is_unloading_state; // protected by CodeCache_lock bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) + bool _is_unlinked; // used by jvmti to track if an event has been posted for this nmethod. bool _load_reported; // Protected by CompiledMethod_lock volatile signed char _state; // {not_installed, in_use, not_used, not_entrant} - int _skipped_instructions_size; + // Initialize fields to their default values + void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets); + + // Post initialization + void post_init(); // For native wrappers nmethod(Method* method, @@ -274,12 +288,14 @@ class nmethod : public CompiledMethod { ByteSize basic_lock_sp_offset, /* synchronized natives only */ OopMapSet* oop_maps); - // Creation support + // For normal JIT compiled code nmethod(Method* method, CompilerType type, int nmethod_size, + int immutable_data_size, int compile_id, int entry_bci, + address immutable_data, CodeOffsets* offsets, int orig_pc_offset, DebugInformationRecorder *recorder, @@ -317,15 +333,6 @@ class nmethod : public CompiledMethod { // Inform external interfaces that a compiled method has been unloaded void post_compiled_method_unload(); - // Initialize fields to their default values - void init_defaults(); - - // Offsets - int content_offset() const { return content_begin() - header_begin(); } - int data_offset() const { return _data_offset; } - - address header_end() const { return (address) header_begin() + header_size(); } - public: // create nmethod with entry_bci static nmethod* new_nmethod(const methodHandle& method, @@ -373,67 +380,95 @@ class nmethod : public CompiledMethod { bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } // boundaries for different parts - address consts_begin () const { return header_begin() + _consts_offset ; } - address consts_end () const { return code_begin() ; } - address stub_begin () const { return header_begin() + _stub_offset ; } - address stub_end () const { return header_begin() + _oops_offset ; } - address exception_begin () const { return header_begin() + _exception_offset ; } - address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; } - oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } - oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } - - Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } - Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } - - address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } - PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } - PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } - address dependencies_begin () const { return header_begin() + _dependencies_offset ; } - address dependencies_end () const { return header_begin() + _handler_table_offset ; } - address handler_table_begin () const { return header_begin() + _handler_table_offset ; } - address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } - address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } - - int skipped_instructions_size () const { return _skipped_instructions_size ; } + address consts_begin () const { return content_begin(); } + address consts_end () const { return code_begin() ; } + address insts_begin () const { return code_begin() ; } + address insts_end () const { return header_begin() + _stub_offset ; } + address stub_begin () const { return header_begin() + _stub_offset ; } + address stub_end () const { return data_begin() ; } + address exception_begin () const { return header_begin() + _exception_offset ; } + address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; } + + // mutable data + oop* oops_begin () const { return (oop*) data_begin(); } + oop* oops_end () const { return (oop*) (data_begin() + _metadata_offset) ; } + + Metadata** metadata_begin () const { return (Metadata**) (data_begin() + _metadata_offset) ; } + +#if INCLUDE_JVMCI + Metadata** metadata_end () const { return (Metadata**) (data_begin() + _jvmci_data_offset) ; } + address jvmci_data_begin () const { return data_begin() + _jvmci_data_offset ; } + address jvmci_data_end () const { return data_end(); } +#else + Metadata** metadata_end () const { return (Metadata**) data_end(); } +#endif + + // immutable data + address immutable_data_begin () const { return _immutable_data; } + address immutable_data_end () const { return _immutable_data + _immutable_data_size ; } + address dependencies_begin () const { return _immutable_data; } + address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; } + address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; } + address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; } + address handler_table_begin () const { return _immutable_data + _handler_table_offset; } + address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; } + PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; } + PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; } + address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; } #if INCLUDE_JVMCI - address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } - address speculations_begin () const { return header_begin() + _speculations_offset ; } - address speculations_end () const { return header_begin() + _jvmci_data_offset ; } - address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } - address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } + address scopes_data_end () const { return _immutable_data + _speculations_offset ; } + address speculations_begin () const { return _immutable_data + _speculations_offset ; } + address speculations_end () const { return immutable_data_end(); } #else - address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } + address scopes_data_end () const { return immutable_data_end(); } #endif // Sizes - int oops_size () const { return (address) oops_end () - (address) oops_begin (); } - int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } - int dependencies_size () const { return dependencies_end () - dependencies_begin (); } + int immutable_data_size() const { return _immutable_data_size; } + int consts_size () const { return int( consts_end () - consts_begin ()); } + int insts_size () const { return int( insts_end () - insts_begin ()); } + int stub_size () const { return int( stub_end () - stub_begin ()); } + int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); } + int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); } + int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); } + int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); } + int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); } + int handler_table_size () const { return int( handler_table_end() - handler_table_begin()); } + int nul_chk_table_size () const { return int( nul_chk_table_end() - nul_chk_table_begin()); } #if INCLUDE_JVMCI - int speculations_size () const { return speculations_end () - speculations_begin (); } - int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); } + int speculations_size () const { return int( speculations_end () - speculations_begin ()); } + int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); } #endif int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } + int skipped_instructions_size () const { return _skipped_instructions_size; } int total_size () const; // Containment - bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } - bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } - bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } - bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } + bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } + // Returns true if a given address is in the 'insts' section. The method + // insts_contains_inclusive() is end-inclusive. + bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } + bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); } + bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } + bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } + bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } + bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } + bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } + bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } + bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } // entry points - address entry_point() const { return _entry_point; } // normal entry point - address verified_entry_point() const { return _verified_entry_point; } // if klass is correct + address entry_point() const { return code_begin() + _entry_offset; } // normal entry point + address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct // flag accessing and manipulation - bool is_not_installed() const { return _state == not_installed; } - bool is_in_use() const { return _state <= in_use; } - bool is_not_entrant() const { return _state == not_entrant; } + bool is_not_installed() const { return _state == not_installed; } + bool is_in_use() const { return _state <= in_use; } + bool is_not_entrant() const { return _state == not_entrant; } void clear_unloading_state(); // Heuristically deduce an nmethod isn't worth keeping around @@ -509,11 +544,11 @@ class nmethod : public CompiledMethod { void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); } // On-stack replacement support - int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } - address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } - void invalidate_osr_method(); - nmethod* osr_link() const { return _osr_link; } - void set_osr_link(nmethod *n) { _osr_link = n; } + int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } + address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } + nmethod* osr_link() const { return _osr_link; } + void set_osr_link(nmethod *n) { _osr_link = n; } + void invalidate_osr_method(); // Verify calls to dead methods have been cleaned. void verify_clean_inline_caches(); @@ -692,16 +727,17 @@ class nmethod : public CompiledMethod { // JVMTI's GetLocalInstance() support ByteSize native_receiver_sp_offset() { + assert(is_native_method(), "sanity"); return _native_receiver_sp_offset; } ByteSize native_basic_lock_sp_offset() { + assert(is_native_method(), "sanity"); return _native_basic_lock_sp_offset; } // support for code generation - static ByteSize verified_entry_point_offset() { return byte_offset_of(nmethod, _verified_entry_point); } - static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); } - static ByteSize state_offset() { return byte_offset_of(nmethod, _state); } + static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); } + static ByteSize state_offset() { return byte_offset_of(nmethod, _state); } virtual void metadata_do(MetadataClosure* f); diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp index 4e8b55f1a36..135606cfaa2 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.hpp +++ b/src/hotspot/share/compiler/compilerDefinitions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,7 +100,7 @@ inline bool is_compile(int comp_level) { // States of Restricted Transactional Memory usage. -enum RTMState { +enum RTMState: u1 { NoRTM = 0x2, // Don't use RTM UseRTM = 0x1, // Use RTM ProfileRTM = 0x0 // Use RTM with abort ratio calculation diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index a4195a04f18..b69bc01305d 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -281,7 +281,7 @@ nonstatic_field(MethodData, _backedge_mask, int) \ nonstatic_field(MethodData, _jvmci_ir_size, int) \ \ - nonstatic_field(nmethod, _verified_entry_point, address) \ + nonstatic_field(nmethod, _verified_entry_offset, u2) \ nonstatic_field(nmethod, _comp_level, CompLevel) \ \ nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \ diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp index da02eecae49..1d7e5f409c1 100644 --- a/src/hotspot/share/memory/heap.hpp +++ b/src/hotspot/share/memory/heap.hpp @@ -38,8 +38,8 @@ class HeapBlock { public: struct Header { - size_t _length; // the length in segments - bool _used; // Used bit + uint32_t _length; // the length in segments + bool _used; // Used bit }; protected: @@ -51,9 +51,11 @@ class HeapBlock { public: // Initialization - void initialize(size_t length) { _header._length = length; set_used(); } + void initialize(size_t length) { set_length(length); set_used(); } // Merging/splitting - void set_length(size_t length) { _header._length = length; } + void set_length(size_t length) { + _header._length = checked_cast(length); + } // Accessors void* allocated_space() const { return (void*)(this + 1); } diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 4a7e69ca8b4..f694c522b72 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -1591,7 +1591,7 @@ CodeBlob* WhiteBox::allocate_code_blob(int size, CodeBlobType blob_type) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type); if (blob != nullptr) { - ::new (blob) BufferBlob("WB::DummyBlob", full_size); + ::new (blob) BufferBlob("WB::DummyBlob", CodeBlobKind::Buffer, full_size); } } // Track memory usage statistic after releasing CodeCache_lock diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index fd6968634da..0dac83f2b6f 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -501,7 +501,7 @@ nonstatic_field(CodeHeap, _segmap, VirtualSpace) \ nonstatic_field(CodeHeap, _log2_segment_size, int) \ nonstatic_field(HeapBlock, _header, HeapBlock::Header) \ - nonstatic_field(HeapBlock::Header, _length, size_t) \ + nonstatic_field(HeapBlock::Header, _length, uint32_t) \ nonstatic_field(HeapBlock::Header, _used, bool) \ \ /**********************************/ \ @@ -612,19 +612,18 @@ \ nonstatic_field(CodeBlob, _name, const char*) \ nonstatic_field(CodeBlob, _size, int) \ - nonstatic_field(CodeBlob, _header_size, int) \ - nonstatic_field(CodeBlob, _frame_complete_offset, int) \ + nonstatic_field(CodeBlob, _header_size, u2) \ + nonstatic_field(CodeBlob, _relocation_size, int) \ + nonstatic_field(CodeBlob, _content_offset, int) \ + nonstatic_field(CodeBlob, _code_offset, int) \ + nonstatic_field(CodeBlob, _frame_complete_offset, int16_t) \ nonstatic_field(CodeBlob, _data_offset, int) \ nonstatic_field(CodeBlob, _frame_size, int) \ nonstatic_field(CodeBlob, _oop_maps, ImmutableOopMapSet*) \ - nonstatic_field(CodeBlob, _code_begin, address) \ - nonstatic_field(CodeBlob, _code_end, address) \ - nonstatic_field(CodeBlob, _content_begin, address) \ - nonstatic_field(CodeBlob, _data_end, address) \ + nonstatic_field(CodeBlob, _caller_must_gc_arguments, bool) \ \ nonstatic_field(DeoptimizationBlob, _unpack_offset, int) \ \ - nonstatic_field(RuntimeStub, _caller_must_gc_arguments, bool) \ \ /********************************************************/ \ /* CompiledMethod (NOTE: incomplete, but only a little) */ \ @@ -646,17 +645,16 @@ nonstatic_field(nmethod, _exception_offset, int) \ nonstatic_field(nmethod, _orig_pc_offset, int) \ nonstatic_field(nmethod, _stub_offset, int) \ - nonstatic_field(nmethod, _consts_offset, int) \ - nonstatic_field(nmethod, _oops_offset, int) \ - nonstatic_field(nmethod, _metadata_offset, int) \ - nonstatic_field(nmethod, _scopes_pcs_offset, int) \ - nonstatic_field(nmethod, _dependencies_offset, int) \ - nonstatic_field(nmethod, _handler_table_offset, int) \ - nonstatic_field(nmethod, _nul_chk_table_offset, int) \ - nonstatic_field(nmethod, _nmethod_end_offset, int) \ - nonstatic_field(nmethod, _entry_point, address) \ - nonstatic_field(nmethod, _verified_entry_point, address) \ + nonstatic_field(nmethod, _metadata_offset, u2) \ + nonstatic_field(nmethod, _scopes_pcs_offset, int) \ + nonstatic_field(nmethod, _scopes_data_offset, int) \ + nonstatic_field(nmethod, _handler_table_offset, u2) \ + nonstatic_field(nmethod, _nul_chk_table_offset, u2) \ + nonstatic_field(nmethod, _entry_offset, u2) \ + nonstatic_field(nmethod, _verified_entry_offset, u2) \ nonstatic_field(nmethod, _osr_entry_point, address) \ + nonstatic_field(nmethod, _immutable_data, address) \ + nonstatic_field(nmethod, _immutable_data_size, int) \ nonstatic_field(nmethod, _compile_id, int) \ nonstatic_field(nmethod, _comp_level, CompLevel) \ \ @@ -1199,6 +1197,7 @@ declare_integer_type(ssize_t) \ declare_integer_type(intx) \ declare_integer_type(intptr_t) \ + declare_integer_type(int16_t) \ declare_integer_type(int64_t) \ declare_unsigned_integer_type(uintx) \ declare_unsigned_integer_type(uintptr_t) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java index 976af5bac20..3786a8fc51e 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java @@ -25,10 +25,12 @@ import sun.jvm.hotspot.compiler.ImmutableOopMap; import sun.jvm.hotspot.compiler.ImmutableOopMapSet; import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.oops.CIntField; import sun.jvm.hotspot.runtime.VM; import sun.jvm.hotspot.runtime.VMObject; import sun.jvm.hotspot.types.AddressField; import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.types.JShortField; import sun.jvm.hotspot.types.Type; import sun.jvm.hotspot.types.TypeDataBase; import sun.jvm.hotspot.utilities.Assert; @@ -41,12 +43,11 @@ public class CodeBlob extends VMObject { private static AddressField nameField; private static CIntegerField sizeField; - private static CIntegerField headerSizeField; - private static AddressField contentBeginField; - private static AddressField codeBeginField; - private static AddressField codeEndField; - private static AddressField dataEndField; - private static CIntegerField frameCompleteOffsetField; + private static CIntegerField relocationSizeField; + private static CIntField headerSizeField; + private static CIntegerField contentOffsetField; + private static CIntegerField codeOffsetField; + private static CIntField frameCompleteOffsetField; private static CIntegerField dataOffsetField; private static CIntegerField frameSizeField; private static AddressField oopMapsField; @@ -62,12 +63,11 @@ private static void initialize(TypeDataBase db) { nameField = type.getAddressField("_name"); sizeField = type.getCIntegerField("_size"); - headerSizeField = type.getCIntegerField("_header_size"); - frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset"); - contentBeginField = type.getAddressField("_content_begin"); - codeBeginField = type.getAddressField("_code_begin"); - codeEndField = type.getAddressField("_code_end"); - dataEndField = type.getAddressField("_data_end"); + relocationSizeField = type.getCIntegerField("_relocation_size"); + headerSizeField = new CIntField(type.getCIntegerField("_header_size"), 0); + contentOffsetField = type.getCIntegerField("_content_offset"); + codeOffsetField = type.getCIntegerField("_code_offset"); + frameCompleteOffsetField = new CIntField(type.getCIntegerField("_frame_complete_offset"), 0); dataOffsetField = type.getCIntegerField("_data_offset"); frameSizeField = type.getCIntegerField("_frame_size"); oopMapsField = type.getAddressField("_oop_maps"); @@ -90,17 +90,22 @@ public void update(Observable o, Object data) { public Address headerEnd() { return getAddress().addOffsetTo(getHeaderSize()); } - public Address contentBegin() { return contentBeginField.getValue(addr); } + public Address contentBegin() { return headerBegin().addOffsetTo(getContentOffset()); } public Address contentEnd() { return headerBegin().addOffsetTo(getDataOffset()); } - public Address codeBegin() { return codeBeginField.getValue(addr); } + public Address codeBegin() { return headerBegin().addOffsetTo(getCodeOffset()); } - public Address codeEnd() { return codeEndField.getValue(addr); } + public Address codeEnd() { return headerBegin().addOffsetTo(getDataOffset()); } public Address dataBegin() { return headerBegin().addOffsetTo(getDataOffset()); } - public Address dataEnd() { return dataEndField.getValue(addr); } + public Address dataEnd() { return headerBegin().addOffsetTo(getSize()); } + + // Offsets + public int getContentOffset() { return (int) contentOffsetField.getValue(addr); } + + public int getCodeOffset() { return (int) codeOffsetField.getValue(addr); } public long getFrameCompleteOffset() { return frameCompleteOffsetField.getValue(addr); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java index 6263f8aa22c..c8c95fd0c1b 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java @@ -40,30 +40,30 @@ public class NMethod extends CompiledMethod { private static CIntegerField entryBCIField; /** To support simple linked-list chaining of nmethods */ private static AddressField osrLinkField; + private static AddressField immutableDataField; + private static CIntegerField immutableDataSizeField; /** Offsets for different nmethod parts */ private static CIntegerField exceptionOffsetField; private static CIntegerField origPCOffsetField; private static CIntegerField stubOffsetField; - private static CIntegerField oopsOffsetField; - private static CIntegerField metadataOffsetField; + private static CIntField metadataOffsetField; + private static CIntField handlerTableOffsetField; + private static CIntField nulChkTableOffsetField; private static CIntegerField scopesPCsOffsetField; - private static CIntegerField dependenciesOffsetField; - private static CIntegerField handlerTableOffsetField; - private static CIntegerField nulChkTableOffsetField; - private static CIntegerField nmethodEndOffsetField; + private static CIntegerField scopesDataOffsetField; /** Offsets for entry points */ /** Entry point with class check */ - private static AddressField entryPointField; + private static CIntField entryOffsetField; /** Entry point without class check */ - private static AddressField verifiedEntryPointField; + private static CIntField verifiedEntryOffsetField; /** Entry point for on stack replacement */ private static AddressField osrEntryPointField; // FIXME: add access to flags (how?) - private static CIntegerField compLevelField; + private static CIntField compLevelField; static { VM.registerVMInitializedObserver(new Observer() { @@ -78,21 +78,21 @@ private static void initialize(TypeDataBase db) { entryBCIField = type.getCIntegerField("_entry_bci"); osrLinkField = type.getAddressField("_osr_link"); + immutableDataField = type.getAddressField("_immutable_data"); + immutableDataSizeField = type.getCIntegerField("_immutable_data_size"); exceptionOffsetField = type.getCIntegerField("_exception_offset"); origPCOffsetField = type.getCIntegerField("_orig_pc_offset"); stubOffsetField = type.getCIntegerField("_stub_offset"); - oopsOffsetField = type.getCIntegerField("_oops_offset"); - metadataOffsetField = type.getCIntegerField("_metadata_offset"); + metadataOffsetField = new CIntField(type.getCIntegerField("_metadata_offset"), 0); scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset"); - dependenciesOffsetField = type.getCIntegerField("_dependencies_offset"); - handlerTableOffsetField = type.getCIntegerField("_handler_table_offset"); - nulChkTableOffsetField = type.getCIntegerField("_nul_chk_table_offset"); - nmethodEndOffsetField = type.getCIntegerField("_nmethod_end_offset"); - entryPointField = type.getAddressField("_entry_point"); - verifiedEntryPointField = type.getAddressField("_verified_entry_point"); + scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset"); + handlerTableOffsetField = new CIntField(type.getCIntegerField("_handler_table_offset"), 0); + nulChkTableOffsetField = new CIntField(type.getCIntegerField("_nul_chk_table_offset"), 0); + entryOffsetField = new CIntField(type.getCIntegerField("_entry_offset"), 0); + verifiedEntryOffsetField = new CIntField(type.getCIntegerField("_verified_entry_offset"), 0); osrEntryPointField = type.getAddressField("_osr_entry_point"); - compLevelField = type.getCIntegerField("_comp_level"); + compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0); pcDescSize = db.lookupType("PcDesc").getSize(); } @@ -113,26 +113,32 @@ public Address getAddress() { /** Boundaries for different parts */ public Address constantsBegin() { return contentBegin(); } - public Address constantsEnd() { return getEntryPoint(); } + public Address constantsEnd() { return codeBegin(); } public Address instsBegin() { return codeBegin(); } public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); } public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } - public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); } - public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); } - public Address oopsEnd() { return headerBegin().addOffsetTo(getMetadataOffset()); } - public Address metadataBegin() { return headerBegin().addOffsetTo(getMetadataOffset()); } - public Address metadataEnd() { return scopesDataBegin(); } - public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } - public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } - public Address scopesPCsEnd() { return headerBegin().addOffsetTo(getDependenciesOffset()); } - public Address dependenciesBegin() { return headerBegin().addOffsetTo(getDependenciesOffset()); } - public Address dependenciesEnd() { return headerBegin().addOffsetTo(getHandlerTableOffset()); } - public Address handlerTableBegin() { return headerBegin().addOffsetTo(getHandlerTableOffset()); } - public Address handlerTableEnd() { return headerBegin().addOffsetTo(getNulChkTableOffset()); } - public Address nulChkTableBegin() { return headerBegin().addOffsetTo(getNulChkTableOffset()); } - public Address nulChkTableEnd() { return headerBegin().addOffsetTo(getNMethodEndOffset()); } - + public Address stubEnd() { return dataBegin(); } + public Address oopsBegin() { return dataBegin(); } + public Address oopsEnd() { return dataBegin().addOffsetTo(getMetadataOffset()); } + public Address metadataBegin() { return dataBegin().addOffsetTo(getMetadataOffset()); } + + public Address metadataEnd() { return dataEnd(); } + + public Address immutableDataBegin() { return immutableDataField.getValue(addr); } + public Address immutableDataEnd() { return immutableDataBegin().addOffsetTo(getImmutableDataSize()); } + public Address dependenciesBegin() { return immutableDataBegin(); } + public Address dependenciesEnd() { return immutableDataBegin().addOffsetTo(getHandlerTableOffset()); } + public Address handlerTableBegin() { return immutableDataBegin().addOffsetTo(getHandlerTableOffset()); } + public Address handlerTableEnd() { return immutableDataBegin().addOffsetTo(getNulChkTableOffset()); } + public Address nulChkTableBegin() { return immutableDataBegin().addOffsetTo(getNulChkTableOffset()); } + public Address nulChkTableEnd() { return immutableDataBegin().addOffsetTo(getScopesDataOffset()); } + public Address scopesDataBegin() { return immutableDataBegin().addOffsetTo(getScopesDataOffset()); } + public Address scopesDataEnd() { return immutableDataBegin().addOffsetTo(getScopesPCsOffset()); } + public Address scopesPCsBegin() { return immutableDataBegin().addOffsetTo(getScopesPCsOffset()); } + public Address scopesPCsEnd() { return immutableDataEnd(); } + + public int getImmutableDataSize() { return (int) immutableDataSizeField.getValue(addr); } public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); } public int instsSize() { return (int) instsEnd() .minus(instsBegin()); } public int stubSize() { return (int) stubEnd() .minus(stubBegin()); } @@ -149,7 +155,10 @@ public int totalSize() { return constantsSize() + instsSize() + - stubSize() + + stubSize(); + } + public int immutableDataSize() { + return scopesDataSize() + scopesPCsSize() + dependenciesSize() + @@ -171,8 +180,8 @@ public int totalSize() { public int getMetadataLength() { return (int) (metadataSize() / VM.getVM().getOopSize()); } /** Entry points */ - public Address getEntryPoint() { return entryPointField.getValue(addr); } - public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); } + public Address getEntryPoint() { return codeBegin().addOffsetTo(getEntryPointOffset()); } + public Address getVerifiedEntryPoint() { return codeBegin().addOffsetTo(getVerifiedEntryPointOffset()); } /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */ public OopHandle getOopAt(int index) { @@ -416,10 +425,10 @@ public Map getSafepoints() { // FIXME: add isPatchableAt() /** Support for code generation. Only here for proof-of-concept. */ - public static int getEntryPointOffset() { return (int) entryPointField.getOffset(); } - public static int getVerifiedEntryPointOffset() { return (int) verifiedEntryPointField.getOffset(); } - public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); } - public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); } + public int getEntryPointOffset() { return (int) entryOffsetField.getValue(addr); } + public int getVerifiedEntryPointOffset() { return (int) verifiedEntryOffsetField.getValue(addr);} + public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); } + public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); } public void print() { printOn(System.out); @@ -498,12 +507,10 @@ public void dumpReplayData(PrintStream out) { private int getEntryBCI() { return (int) entryBCIField .getValue(addr); } private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); } private int getStubOffset() { return (int) stubOffsetField .getValue(addr); } - private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); } private int getMetadataOffset() { return (int) metadataOffsetField .getValue(addr); } + private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); } private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); } - private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); } private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); } - private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); } private int getCompLevel() { return (int) compLevelField .getValue(addr); } } diff --git a/test/hotspot/jtreg/compiler/c1/TestLinearScanOrderMain.java b/test/hotspot/jtreg/compiler/c1/TestLinearScanOrderMain.java index 4165fa4d0e1..5b262f5cfec 100644 --- a/test/hotspot/jtreg/compiler/c1/TestLinearScanOrderMain.java +++ b/test/hotspot/jtreg/compiler/c1/TestLinearScanOrderMain.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ * @bug 8207355 * @compile TestLinearScanOrder.jasm * @run main/othervm -Xcomp -XX:+TieredCompilation -XX:TieredStopAtLevel=1 + * -XX:+IgnoreUnrecognizedVMOptions -XX:NMethodSizeLimit=655360 * -XX:CompileCommand=compileonly,compiler.c1.TestLinearScanOrder::test * compiler.c1.TestLinearScanOrderMain */