Skip to content

Commit

Permalink
Merge upstream-jdk
Browse files Browse the repository at this point in the history
  • Loading branch information
corretto-github-robot committed Oct 2, 2023
2 parents d8a1974 + eeb63cd commit a8ebe1c
Show file tree
Hide file tree
Showing 38 changed files with 725 additions and 282 deletions.
6 changes: 0 additions & 6 deletions make/autoconf/libraries.m4
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,6 @@ AC_DEFUN([LIB_SETUP_JVM_LIBS],
BASIC_JVM_LIBS_$1="$BASIC_JVM_LIBS_$1 -latomic"
fi
fi
# Because RISC-V only has word-sized atomics, it requires libatomic where
# other common architectures do not, so link libatomic by default.
if test "x$OPENJDK_$1_OS" = xlinux && test "x$OPENJDK_$1_CPU" = xriscv64; then
BASIC_JVM_LIBS_$1="$BASIC_JVM_LIBS_$1 -latomic"
fi
])

################################################################################
Expand Down
5 changes: 1 addition & 4 deletions make/conf/jib-profiles.js
Original file line number Diff line number Diff line change
Expand Up @@ -945,10 +945,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: input.build_os,
target_cpu: input.build_cpu,
dependencies: [ "jtreg", "gnumake", "boot_jdk", "devkit", "jib" ],
labels: "test",
environment: {
"JT_JAVA": common.boot_jdk_home
}
labels: "test"
}
};
profiles = concatObjects(profiles, testOnlyProfiles);
Expand Down
110 changes: 75 additions & 35 deletions src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,23 @@
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/

#if defined(__clang_major__)
#define FULL_COMPILER_ATOMIC_SUPPORT
#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ >= 2))
#define FULL_COMPILER_ATOMIC_SUPPORT
#endif

template<size_t byte_size>
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {

#ifndef FULL_COMPILER_ATOMIC_SUPPORT
// If we add add and fetch for sub word and are using older compiler
// it must be added here due to not using lib atomic.
STATIC_ASSERT(byte_size >= 4);
#endif

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
Expand All @@ -55,76 +68,101 @@ struct Atomic::PlatformAdd {
}
};

template<size_t byte_size>
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
template<>
template<typename T>
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}

T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
uint32_t volatile* aligned_dst = (uint32_t volatile*)(((uintptr_t)dest) & (~((uintptr_t)0x3)));
int shift = 8 * (((uintptr_t)dest) - ((uintptr_t)aligned_dst)); // 0, 8, 16, 24

uint64_t mask = 0xfful << shift; // 0x00000000..FF..
uint64_t remask = ~mask; // 0xFFFFFFFF..00..

uint64_t w_cv = ((uint64_t)(unsigned char)compare_value) << shift; // widen to 64-bit 0x00000000..CC..
uint64_t w_ev = ((uint64_t)(unsigned char)exchange_value) << shift; // widen to 64-bit 0x00000000..EE..

uint64_t old_value;
uint64_t rc_temp;

__asm__ __volatile__ (
"1: lr.w %0, %2 \n\t"
" and %1, %0, %5 \n\t" // ignore unrelated bytes and widen to 64-bit 0x00000000..XX..
" bne %1, %3, 2f \n\t" // compare 64-bit w_cv
" and %1, %0, %6 \n\t" // remove old byte
" or %1, %1, %4 \n\t" // add new byte
" sc.w %1, %1, %2 \n\t" // store new word
" bnez %1, 1b \n\t"
"2: \n\t"
: /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc_temp), /*%2*/"+A" (*aligned_dst)
: /*%3*/"r" (w_cv), /*%4*/"r" (w_ev), /*%5*/"r" (mask), /*%6*/"r" (remask)
: "memory" );

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
return res;

return (T)((old_value & mask) >> shift);
}
#endif

// __attribute__((unused)) on dest is to get rid of spurious GCC warnings.
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
// If we add xchg for sub word and are using older compiler
// it must be added here due to not using lib atomic.
STATIC_ASSERT(byte_size >= 4);
#endif

STATIC_ASSERT(byte_size == sizeof(T));
T value = compare_value;

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}

__atomic_compare_exchange(dest, &value, &exchange_value, /* weak */ false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
return value;
return res;
}

template<>
// __attribute__((unused)) on dest is to get rid of spurious GCC warnings.
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {

T old_value;
long rc;
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
STATIC_ASSERT(byte_size >= 4);
#endif

STATIC_ASSERT(byte_size == sizeof(T));
if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}

__asm__ __volatile__ (
"1: sext.w %1, %3 \n\t" // sign-extend compare_value
" lr.w %0, %2 \n\t"
" bne %0, %1, 2f \n\t"
" sc.w %1, %4, %2 \n\t"
" bnez %1, 1b \n\t"
"2: \n\t"
: /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc), /*%2*/"+A" (*dest)
: /*%3*/"r" (compare_value), /*%4*/"r" (exchange_value)
: "memory" );
__atomic_compare_exchange(dest, &compare_value, &exchange_value, /* weak */ false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);

if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
return old_value;
return compare_value;
}

template<size_t byte_size>
Expand All @@ -148,4 +186,6 @@ struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
};

#undef FULL_COMPILER_ATOMIC_SUPPORT

#endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP
Loading

0 comments on commit a8ebe1c

Please sign in to comment.