diff --git a/Include/cpython/pyatomic.h b/Include/cpython/pyatomic.h new file mode 100644 index 000000000000000..bbc0b4c2f6e1243 --- /dev/null +++ b/Include/cpython/pyatomic.h @@ -0,0 +1,397 @@ +// This header provides cross-platform low-level atomic operations +// similar to C11 atomics. +// +// Operations are sequentially consistent unless they have a suffix indicating +// otherwise. If in doubt, prefer the sequentially consistent operations. +// +// The "_relaxed" suffix for load and store operations indicates the "relaxed" +// memory order. They don't provide synchronization, but (roughly speaking) +// guarantee somewhat sane behavior for races instead of undefined behavior. +// In practice, they correspond to "normal" hardware load and store +// instructions, so they are almost as inexpensive as plain loads and stores +// in C. +// +// Note that atomic read-modify-write operations like _Py_atomic_add_* return +// the previous value of the atomic variable, not the new value. +// +// See https://en.cppreference.com/w/c/atomic for more information on C11 +// atomics. +// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf +// "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage +// or relaxed atomics. + +#ifndef Py_ATOMIC_H +#define Py_ATOMIC_H + +// Atomically adds `value` to `address` and returns the previous value +static inline int +_Py_atomic_add_int(int *address, int value); + +static inline int8_t +_Py_atomic_add_int8(int8_t *address, int8_t value); + +static inline int16_t +_Py_atomic_add_int16(int16_t *address, int16_t value); + +static inline int32_t +_Py_atomic_add_int32(int32_t *address, int32_t value); + +static inline int64_t +_Py_atomic_add_int64(int64_t *address, int64_t value); + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *address, intptr_t value); + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *address, unsigned int value); + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *address, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *address, Py_ssize_t value); + +// Performs an atomic compare-and-exchange. If `*address` and `expected` are equal, +// then `value` is stored in `*address`. Returns 1 on success and 0 on failure. +// These correspond to the "strong" variations of the C11 atomic_compare_exchange_* functions. +static inline int +_Py_atomic_compare_exchange_int(int *address, int expected, int value); + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *address, int8_t expected, int8_t value); + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *address, int16_t expected, int16_t value); + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *address, int32_t expected, int32_t value); + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *address, int64_t expected, int64_t value); + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *address, intptr_t expected, intptr_t value); + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *address, unsigned int expected, unsigned int value); + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *address, uint8_t expected, uint8_t value); + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *address, uint16_t expected, uint16_t value); + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *address, uint32_t expected, uint32_t value); + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *address, uint64_t expected, uint64_t value); + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *address, uintptr_t expected, uintptr_t value); + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value); + +static inline int +_Py_atomic_compare_exchange_ptr(void *address, void *expected, void *value); + +// Atomically replaces `*address` with `value` and returns the previous value of `*address`. +static inline int +_Py_atomic_exchange_int(int *address, int value); + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *address, int8_t value); + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *address, int16_t value); + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *address, int32_t value); + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *address, int64_t value); + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *address, intptr_t value); + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *address, unsigned int value); + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *address, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *address, Py_ssize_t value); + +static inline void * +_Py_atomic_exchange_ptr(void *address, void *value); + +// Performs `*address &= value` atomically and returns the previous value of `*address`. +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *address, uintptr_t value); + +// Performs `*address |= value` atomically and returns the previous value of `*address`. +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *address, uintptr_t value); + +// Atomically loads `*address` (sequential consistency) +static inline int +_Py_atomic_load_int(const int *address); + +static inline int8_t +_Py_atomic_load_int8(const int8_t *address); + +static inline int16_t +_Py_atomic_load_int16(const int16_t *address); + +static inline int32_t +_Py_atomic_load_int32(const int32_t *address); + +static inline int64_t +_Py_atomic_load_int64(const int64_t *address); + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *address); + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *address); + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *address); + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *address); + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *address); + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *address); + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *address); + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *address); + +static inline void * +_Py_atomic_load_ptr(const void *address); + +// Loads `*address` (relaxed consistency, i.e., no ordering) +static inline int +_Py_atomic_load_int_relaxed(const int *address); + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *address); + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *address); + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *address); + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *address); + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *address); + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *address); + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *address); + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *address); + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *address); + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *address); + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *address); + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *address); + +static inline void * +_Py_atomic_load_ptr_relaxed(const void *address); + +// Atomically performs `*address = value` (sequential consistency) +static inline void +_Py_atomic_store_int(int *address, int value); + +static inline void +_Py_atomic_store_int8(int8_t *address, int8_t value); + +static inline void +_Py_atomic_store_int16(int16_t *address, int16_t value); + +static inline void +_Py_atomic_store_int32(int32_t *address, int32_t value); + +static inline void +_Py_atomic_store_int64(int64_t *address, int64_t value); + +static inline void +_Py_atomic_store_intptr(intptr_t *address, intptr_t value); + +static inline void +_Py_atomic_store_uint8(uint8_t *address, uint8_t value); + +static inline void +_Py_atomic_store_uint16(uint16_t *address, uint16_t value); + +static inline void +_Py_atomic_store_uint32(uint32_t *address, uint32_t value); + +static inline void +_Py_atomic_store_uint64(uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_uintptr(uintptr_t *address, uintptr_t value); + +static inline void +_Py_atomic_store_uint(unsigned int *address, unsigned int value); + +static inline void +_Py_atomic_store_ptr(void *address, void *value); + +static inline void +_Py_atomic_store_ssize(Py_ssize_t* address, Py_ssize_t value); + +// Stores `*address = value` (relaxed consistency, i.e., no ordering) +static inline void +_Py_atomic_store_int_relaxed(int *address, int value); + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *address, int8_t value); + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *address, int16_t value); + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *address, int32_t value); + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *address, int64_t value); + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *address, intptr_t value); + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t* address, uint8_t value); + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *address, uint16_t value); + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *address, uint32_t value); + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *address, uintptr_t value); + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *address, unsigned int value); + +static inline void +_Py_atomic_store_ptr_relaxed(void *address, void *value); + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *address, Py_ssize_t value); + +// Stores `*address = value` (release operation) +static inline void +_Py_atomic_store_uint64_release(uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_ptr_release(void *address, void *value); + + +// Sequential consistency fence +static inline void _Py_atomic_fence_seq_cst(void); + +// Release fence +static inline void _Py_atomic_fence_release(void); + + +#ifndef _Py_USE_GCC_BUILTIN_ATOMICS +# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +# define _Py_USE_GCC_BUILTIN_ATOMICS 1 +# elif defined(__clang__) +# if __has_builtin(__atomic_load) +# define _Py_USE_GCC_BUILTIN_ATOMICS 1 +# endif +# endif +#endif + +#if _Py_USE_GCC_BUILTIN_ATOMICS +# define Py_ATOMIC_GCC_H +# include "cpython/pyatomic_gcc.h" +#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +# define Py_ATOMIC_STD_H +# include "cpython/pyatomic_std.h" +#elif defined(_MSC_VER) +# define Py_ATOMIC_MSC_H +# include "cpython/pyatomic_msc.h" +#else +# error "no available pyatomic implementation for this platform/compiler" +#endif + +#endif /* Py_ATOMIC_H */ + diff --git a/Include/cpython/pyatomic_gcc.h b/Include/cpython/pyatomic_gcc.h new file mode 100644 index 000000000000000..aff732ecfe61fdd --- /dev/null +++ b/Include/cpython/pyatomic_gcc.h @@ -0,0 +1,679 @@ +// This is the implementation of Python atomic operations using GCC's built-in +// functions that match the C+11 memory model. This implementation is preferred +// for GCC compatible compilers, such as Clang. These functions are available in +// GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11. + +#ifndef Py_ATOMIC_GCC_H +# error "this header file must not be included directly" +#endif + +static inline int +_Py_atomic_add_int(int *address, int value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_add_int8(int8_t *address, int8_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_add_int16(int16_t *address, int16_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_add_int32(int32_t *address, int32_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_add_int64(int64_t *address, int64_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *address, intptr_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *address, unsigned int value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *address, uint8_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *address, uint16_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *address, uint32_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *address, uint64_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_compare_exchange_int(int *address, int expected, int value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *address, int8_t expected, int8_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *address, int16_t expected, int16_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *address, int32_t expected, int32_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *address, int64_t expected, int64_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *address, intptr_t expected, intptr_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *address, unsigned int expected, unsigned int value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *address, uint8_t expected, uint8_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *address, uint16_t expected, uint16_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *address, uint32_t expected, uint32_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *address, uint64_t expected, uint64_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_ptr(void *address, void *expected, void *value) +{ + void *e = expected; + return __atomic_compare_exchange_n((void **)address, &e, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_exchange_int(int *address, int value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *address, int8_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *address, int16_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *address, int32_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *address, int64_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *address, intptr_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *address, unsigned int value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *address, uint8_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *address, uint16_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *address, uint32_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *address, uint64_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *address, uintptr_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void * +_Py_atomic_exchange_ptr(void *address, void *value) +{ + return __atomic_exchange_n((void **)address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *address, uint8_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *address, uint16_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *address, uint32_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *address, uint64_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *address, uint8_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *address, uint16_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *address, uint32_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *address, uint64_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_load_int(const int *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_load_int8(const int8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_load_int16(const int16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_load_int32(const int32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_load_int64(const int64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline void * +_Py_atomic_load_ptr(const void *address) +{ + return (void *)__atomic_load_n((void **)address, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_load_int_relaxed(const int *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline void * +_Py_atomic_load_ptr_relaxed(const void *address) +{ + return (void *)__atomic_load_n((const void **)address, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int(int *address, int value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int8(int8_t *address, int8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int16(int16_t *address, int16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int32(int32_t *address, int32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int64(int64_t *address, int64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_intptr(intptr_t *address, intptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint8(uint8_t *address, uint8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint16(uint16_t *address, uint16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint32(uint32_t *address, uint32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint64(uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uintptr(uintptr_t *address, uintptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint(unsigned int *address, unsigned int value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_ptr(void *address, void *value) +{ + __atomic_store_n((void **)address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int_relaxed(int *address, int value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *address, int8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *address, int16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *address, int32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *address, int64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *address, intptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *address, uint8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *address, uint16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *address, uint32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *address, uintptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *address, unsigned int value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_ptr_relaxed(void *address, void *value) +{ + __atomic_store_n((void **)address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *address, Py_ssize_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + + +static inline void +_Py_atomic_store_uint64_release(uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELEASE); +} + +static inline void +_Py_atomic_store_ptr_release(void *address, void *value) +{ + __atomic_store_n((void **)address, value, __ATOMIC_RELEASE); +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + + static inline void +_Py_atomic_fence_release(void) +{ + __atomic_thread_fence(__ATOMIC_RELEASE); +} diff --git a/Include/cpython/pyatomic_msc.h b/Include/cpython/pyatomic_msc.h new file mode 100644 index 000000000000000..6da4903717ad932 --- /dev/null +++ b/Include/cpython/pyatomic_msc.h @@ -0,0 +1,860 @@ +// This is the implementation of Python atomic operations for MSVC if the +// compiler does not support C11 or C++11 atomics. + +#ifndef Py_ATOMIC_MSC_H +# error "this header file must not be included directly" +#endif + +#include + + +static inline int +_Py_atomic_add_int(int *address, int value) +{ + return (int)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline int8_t +_Py_atomic_add_int8(int8_t *address, int8_t value) +{ + return (int8_t)_InterlockedExchangeAdd8((volatile char*)address, (char)value); +} + +static inline int16_t +_Py_atomic_add_int16(int16_t *address, int16_t value) +{ + return (int16_t)_InterlockedExchangeAdd16((volatile short*)address, (short)value); +} + +static inline int32_t +_Py_atomic_add_int32(int32_t *address, int32_t value) +{ + return (int32_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline int64_t +_Py_atomic_add_int64(int64_t *address, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (int64_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + __int64 old_value = *(volatile __int64*)address; + __int64 new_value = old_value + (__int64)value; + if (old_value == _InterlockedCompareExchange64((volatile __int64*)address, new_value, old_value)) { + return old_value; + } + } +#endif +} + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *address, intptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (intptr_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (intptr_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *address, unsigned int value) +{ + return (unsigned int)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedExchangeAdd8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedExchangeAdd16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *address, uint64_t value) +{ + return (uint64_t)_Py_atomic_add_int64((int64_t*)address, (int64_t)value); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (Py_ssize_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (Py_ssize_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + + +static inline int +_Py_atomic_compare_exchange_int(int *address, int expected, int value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *address, int8_t expected, int8_t value) +{ + return (char)expected == _InterlockedCompareExchange8((volatile char*)address, (char)value, (char)expected); +} + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *address, int16_t expected, int16_t value) +{ + return (short)expected == _InterlockedCompareExchange16((volatile short*)address, (short)value, (short)expected); +} + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *address, int32_t expected, int32_t value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *address, int64_t expected, int64_t value) +{ + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +} + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *address, intptr_t expected, intptr_t value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *address, uint8_t expected, uint8_t value) +{ + return (char)expected == _InterlockedCompareExchange8((volatile char*)address, (char)value, (char)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *address, uint16_t expected, uint16_t value) +{ + return (short)expected == _InterlockedCompareExchange16((volatile short*)address, (short)value, (short)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *address, unsigned int expected, unsigned int value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *address, uint32_t expected, uint32_t value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *address, uint64_t expected, uint64_t value) +{ + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +#else + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +#endif +} + +static inline int +_Py_atomic_compare_exchange_ptr(void *address, void *expected, void *value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_exchange_int(int *address, int value) +{ + return (int)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *address, int8_t value) +{ + return (int8_t)_InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *address, int16_t value) +{ + return (int16_t)_InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *address, int32_t value) +{ + return (int32_t)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *address, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (int64_t)_InterlockedExchange64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + __int64 old_value = *(volatile __int64*)address; + __int64 new_value = (__int64)value; + if (old_value == _InterlockedCompareExchange64((volatile __int64*)address, new_value, old_value)) { + return old_value; + } + } +#endif +} + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *address, intptr_t value) +{ + return (intptr_t)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *address, unsigned int value) +{ + return (unsigned int)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *address, uint64_t value) +{ + return (uint64_t)_Py_atomic_exchange_int64((int64_t *)address, (int64_t)value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *address, uintptr_t value) +{ + return (uintptr_t)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (Py_ssize_t)_InterlockedExchange64((volatile __int64*)address, (__int64)value); +#else + return (Py_ssize_t)_InterlockedExchange((volatile long*)address, (long)value); +#endif +} + +static inline void * +_Py_atomic_exchange_ptr(void *address, void *value) +{ + return (void *)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedAnd8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedAnd16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedAnd((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (uint64_t)_InterlockedAnd64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + uint64_t old_value = *(volatile uint64_t*)address; + uint64_t new_value = old_value & value; + if ((__int64)old_value == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedAnd64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedAnd((volatile long*)address, (long)value); +#endif +} + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedOr8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedOr16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedOr((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (uint64_t)_InterlockedOr64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + uint64_t old_value = *(volatile uint64_t *)address; + uint64_t new_value = old_value | value; + if (old_value == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedOr64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedOr((volatile long*)address, (long)value); +#endif +} + +static inline int +_Py_atomic_load_int(const int *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile int *)address; +#elif defined(_M_ARM64) + return (int)__ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int +#endif +} + +static inline int8_t +_Py_atomic_load_int8(const int8_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile int8_t *)address; +#elif defined(_M_ARM64) + return (int8_t)__ldar8((unsigned __int8 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int8 +#endif +} + +static inline int16_t +_Py_atomic_load_int16(const int16_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile int16_t *)address; +#elif defined(_M_ARM64) + return (int16_t)__ldar16((unsigned __int16 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int16 +#endif +} + +static inline int32_t +_Py_atomic_load_int32(const int32_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile int32_t *)address; +#elif defined(_M_ARM64) + return (int32_t)__ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int32 +#endif +} + +static inline int64_t +_Py_atomic_load_int64(const int64_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile int64_t *)address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int64 +#endif +} + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile intptr_t *)address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_intptr +#endif +} + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint8_t *)address; +#elif defined(_M_ARM64) + return __ldar8((unsigned __int8 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint8 +#endif +} + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint16_t *)address; +#elif defined(_M_ARM64) + return __ldar16((unsigned __int16 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint16 +#endif +} + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint32_t *)address; +#elif defined(_M_ARM64) + return __ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint32 +#endif +} + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uint64_t *)address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint64 +#endif +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile uintptr_t *)address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uintptr +#endif +} + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile unsigned int *)address; +#elif defined(_M_ARM64) + return __ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint +#endif +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile Py_ssize_t *)address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_ssize +#endif +} + +static inline void * +_Py_atomic_load_ptr(const void *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(void * volatile *)address; +#elif defined(_M_ARM64) + return (void *)__ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_ptr +#endif +} + +static inline int +_Py_atomic_load_int_relaxed(const int *address) +{ + return *(volatile int *)address; +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *address) +{ + return *(volatile int8_t *)address; +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *address) +{ + return *(volatile int16_t *)address; +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *address) +{ + return *(volatile int32_t *)address; +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *address) +{ + return *(volatile int64_t *)address; +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *address) +{ + return *(volatile intptr_t *)address; +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *address) +{ + return *(volatile uint8_t *)address; +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *address) +{ + return *(volatile uint16_t *)address; +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *address) +{ + return *(volatile uint32_t *)address; +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *address) +{ + return *(volatile uint64_t *)address; +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *address) +{ + return *(volatile uintptr_t *)address; +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *address) +{ + return *(volatile unsigned int *)address; +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *address) +{ + return *(volatile Py_ssize_t *)address; +} + +static inline void* +_Py_atomic_load_ptr_relaxed(const void *address) +{ + return *(void * volatile *)address; +} + + +static inline void +_Py_atomic_store_int(int *address, int value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_int8(int8_t *address, int8_t value) +{ + _InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline void +_Py_atomic_store_int16(int16_t *address, int16_t value) +{ + _InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline void +_Py_atomic_store_int32(int32_t *address, int32_t value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_int64(int64_t *address, int64_t value) +{ + _Py_atomic_exchange_int64(address, value); +} + +static inline void +_Py_atomic_store_intptr(intptr_t *address, intptr_t value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_uint8(uint8_t *address, uint8_t value) +{ + _InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline void +_Py_atomic_store_uint16(uint16_t *address, uint16_t value) +{ + _InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline void +_Py_atomic_store_uint32(uint32_t *address, uint32_t value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_uint64(uint64_t *address, uint64_t value) +{ + _Py_atomic_exchange_int64((int64_t *)address, (int64_t)value); +} + +static inline void +_Py_atomic_store_uintptr(uintptr_t *address, uintptr_t value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_uint(unsigned int *address, unsigned int value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_ptr(void *address, void *value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + _InterlockedExchange64((volatile __int64 *)address, (__int64)value); +#else + _InterlockedExchange((volatile long*)address, (long)value); +#endif +} + + +static inline void +_Py_atomic_store_int_relaxed(int *address, int value) +{ + *(volatile int *)address = value; +} + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *address, int8_t value) +{ + *(volatile int8_t *)address = value; +} + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *address, int16_t value) +{ + *(volatile int16_t *)address = value; +} + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *address, int32_t value) +{ + *(volatile int32_t *)address = value; +} + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *address, int64_t value) +{ + *(volatile int64_t *)address = value; +} + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *address, intptr_t value) +{ + *(volatile intptr_t *)address = value; +} + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *address, uint8_t value) +{ + *(volatile uint8_t *)address = value; +} + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *address, uint16_t value) +{ + *(volatile uint16_t *)address = value; +} + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *address, uint32_t value) +{ + *(volatile uint32_t *)address = value; +} + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *address, uint64_t value) +{ + *(volatile uint64_t *)address = value; +} + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *address, uintptr_t value) +{ + *(volatile uintptr_t *)address = value; +} + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *address, unsigned int value) +{ + *(volatile unsigned int *)address = value; +} + +static inline void +_Py_atomic_store_ptr_relaxed(void *address, void* value) +{ + *(void * volatile *)address = value; +} + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *address, Py_ssize_t value) +{ + *(volatile Py_ssize_t *)address = value; +} + +static inline void +_Py_atomic_store_uint64_release(uint64_t *address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *(volatile uint64_t *)address = value; +#elif defined(_M_ARM64) + __stlr64(address, value); +#else +#error no implementation of _Py_atomic_store_uint64_release +#endif +} + +static inline void +_Py_atomic_store_ptr_release(void *address, void *value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *(void * volatile *)address = value; +#elif defined(_M_ARM64) + __stlr64(address, (uintptr_t)value); +#else +#error no implementation of _Py_atomic_store_ptr_release +#endif +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) + __faststorefence(); +#elif defined(_M_IX86) + _mm_mfence(); +#else +#error no implementation of _Py_atomic_fence_seq_cst +#endif +} + + static inline void +_Py_atomic_fence_release(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) || defined(_M_IX86) + _ReadWriteBarrier(); +#else +#error no implementation of _Py_atomic_fence_release +#endif +} diff --git a/Include/cpython/pyatomic_std.h b/Include/cpython/pyatomic_std.h new file mode 100644 index 000000000000000..ae6980577958299 --- /dev/null +++ b/Include/cpython/pyatomic_std.h @@ -0,0 +1,799 @@ +// This is the implementation of Python atomic operations using C++11 or C11 +// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC +// compatible compilers, even if they support C++11 atomics. + +#ifndef Py_ATOMIC_STD_H +# error "this header file must not be included directly" +#endif + +#ifdef __cplusplus +extern "C++" { +#include +} +#define _Py_USING_STD using namespace std; +#define _Atomic(tp) atomic +#else +#include +#define _Py_USING_STD +#endif + + +static inline int +_Py_atomic_add_int(int *address, int value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(int)*)address, value); +} + +static inline int8_t +_Py_atomic_add_int8(int8_t *address, int8_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(int8_t)*)address, value); +} + +static inline int16_t +_Py_atomic_add_int16(int16_t *address, int16_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(int16_t)*)address, value); +} + +static inline int32_t +_Py_atomic_add_int32(int32_t *address, int32_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(int32_t)*)address, value); +} + +static inline int64_t +_Py_atomic_add_int64(int64_t *address, int64_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(int64_t)*)address, value); +} + +static inline intptr_t +_Py_atomic_add_intptr(intptr_t *address, intptr_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(intptr_t)*)address, value); +} + +static inline unsigned int +_Py_atomic_add_uint(unsigned int *address, unsigned int value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(unsigned int)*)address, value); +} + +static inline uint8_t +_Py_atomic_add_uint8(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_add_uint16(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_add_uint32(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_add_uint64(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(uintptr_t)*)address, value); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_fetch_add((_Atomic(Py_ssize_t)*)address, value); +} + +static inline int +_Py_atomic_compare_exchange_int(int *address, int expected, int value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(int)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int8(int8_t *address, int8_t expected, int8_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(int8_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int16(int16_t *address, int16_t expected, int16_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(int16_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int32(int32_t *address, int32_t expected, int32_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(int32_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int64(int64_t *address, int64_t expected, int64_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(int64_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_intptr(intptr_t *address, intptr_t expected, intptr_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(intptr_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint(unsigned int *address, unsigned int expected, unsigned int value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(unsigned int)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint8(uint8_t *address, uint8_t expected, uint8_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(uint8_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint16(uint16_t *address, uint16_t expected, uint16_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(uint16_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint32(uint32_t *address, uint32_t expected, uint32_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(uint32_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint64(uint64_t *address, uint64_t expected, uint64_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(uint64_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_ssize(Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_ptr(void *address, void *expected, void *value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((_Atomic(void *)*)address, &expected, value); +} + + +static inline int +_Py_atomic_exchange_int(int *address, int value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(int)*)address, value); +} + +static inline int8_t +_Py_atomic_exchange_int8(int8_t *address, int8_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(int8_t)*)address, value); +} + +static inline int16_t +_Py_atomic_exchange_int16(int16_t *address, int16_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(int16_t)*)address, value); +} + +static inline int32_t +_Py_atomic_exchange_int32(int32_t *address, int32_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(int32_t)*)address, value); +} + +static inline int64_t +_Py_atomic_exchange_int64(int64_t *address, int64_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(int64_t)*)address, value); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(intptr_t *address, intptr_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(intptr_t)*)address, value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(unsigned int *address, unsigned int value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(unsigned int)*)address, value); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(uintptr_t)*)address, value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(Py_ssize_t)*)address, value); +} + +static inline void * +_Py_atomic_exchange_ptr(void *address, void *value) +{ + _Py_USING_STD + return atomic_exchange((_Atomic(void *)*)address, value); +} + +static inline uint8_t +_Py_atomic_and_uint8(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_and((_Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_and_uint16(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_and((_Atomic(uint16_t)*)address, value); +} + + +static inline uint32_t +_Py_atomic_and_uint32(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_and((_Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_and_uint64(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_and((_Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_and_uintptr(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_and((_Atomic(uintptr_t)*)address, value); +} + +static inline uint8_t +_Py_atomic_or_uint8(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_or((_Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_or_uint16(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_or((_Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_or_uint32(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_or((_Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_or_uint64(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_or((_Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_or_uintptr(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_or((_Atomic(uintptr_t)*)address, value); +} + +static inline int +_Py_atomic_load_int(const int *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(int)*)address); +} + +static inline int8_t +_Py_atomic_load_int8(const int8_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(int8_t)*)address); +} + +static inline int16_t +_Py_atomic_load_int16(const int16_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(int16_t)*)address); +} + +static inline int32_t +_Py_atomic_load_int32(const int32_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(int32_t)*)address); +} + +static inline int64_t +_Py_atomic_load_int64(const int64_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(int64_t)*)address); +} + +static inline intptr_t +_Py_atomic_load_intptr(const intptr_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(intptr_t)*)address); +} + +static inline uint8_t +_Py_atomic_load_uint8(const uint8_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(uint8_t)*)address); +} + +static inline uint16_t +_Py_atomic_load_uint16(const uint16_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(uint32_t)*)address); +} + +static inline uint32_t +_Py_atomic_load_uint32(const uint32_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(uint32_t)*)address); +} + +static inline uint64_t +_Py_atomic_load_uint64(const uint64_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(uint64_t)*)address); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const uintptr_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(uintptr_t)*)address); +} + +static inline unsigned int +_Py_atomic_load_uint(const unsigned int *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(unsigned int)*)address); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const Py_ssize_t *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(Py_ssize_t)*)address); +} + +static inline void * +_Py_atomic_load_ptr(const void *address) +{ + _Py_USING_STD + return atomic_load((const _Atomic(void*)*)address); +} + + +static inline int +_Py_atomic_load_int_relaxed(const int *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(int)*)address, memory_order_relaxed); +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const int8_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(int8_t)*)address, memory_order_relaxed); +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const int16_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(int16_t)*)address, memory_order_relaxed); +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const int32_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(int32_t)*)address, memory_order_relaxed); +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const int64_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(int64_t)*)address, memory_order_relaxed); +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const intptr_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(intptr_t)*)address, memory_order_relaxed); +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const uint8_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(uint8_t)*)address, memory_order_relaxed); +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const uint16_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(uint16_t)*)address, memory_order_relaxed); +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const uint32_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(uint32_t)*)address, memory_order_relaxed); +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const uint64_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(uint64_t)*)address, memory_order_relaxed); +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const uintptr_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(uintptr_t)*)address, memory_order_relaxed); +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const unsigned int *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(unsigned int)*)address, memory_order_relaxed); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const Py_ssize_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(Py_ssize_t)*)address, memory_order_relaxed); +} + +static inline void * +_Py_atomic_load_ptr_relaxed(const void *address) +{ + _Py_USING_STD + return atomic_load_explicit((const _Atomic(void*)*)address, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int(int *address, int value) +{ + _Py_USING_STD + atomic_store((_Atomic(int)*)address, value); +} + +static inline void +_Py_atomic_store_int8(int8_t *address, int8_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(int8_t)*)address, value); +} + +static inline void +_Py_atomic_store_int16(int16_t *address, int16_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(int16_t)*)address, value); +} + +static inline void +_Py_atomic_store_int32(int32_t *address, int32_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(int32_t)*)address, value); +} + +static inline void +_Py_atomic_store_int64(int64_t *address, int64_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(int64_t)*)address, value); +} + +static inline void +_Py_atomic_store_intptr(intptr_t *address, intptr_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(intptr_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint8(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(uint8_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint16(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(uint16_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint32(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(uint32_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint64(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(uint64_t)*)address, value); +} + +static inline void +_Py_atomic_store_uintptr(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(uintptr_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint(unsigned int *address, unsigned int value) +{ + _Py_USING_STD + atomic_store((_Atomic(unsigned int)*)address, value); +} + +static inline void +_Py_atomic_store_ptr(void *address, void *value) +{ + _Py_USING_STD + atomic_store((_Atomic(void*)*)address, value); +} + +static inline void +_Py_atomic_store_ssize(Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + atomic_store((_Atomic(Py_ssize_t)*)address, value); +} + +static inline void +_Py_atomic_store_int_relaxed(int *address, int value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(int)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int8_relaxed(int8_t *address, int8_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(int8_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int16_relaxed(int16_t *address, int16_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(int16_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int32_relaxed(int32_t *address, int32_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(int32_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int64_relaxed(int64_t *address, int64_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(int64_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_intptr_relaxed(intptr_t *address, intptr_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(intptr_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint8_relaxed(uint8_t *address, uint8_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uint8_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint16_relaxed(uint16_t *address, uint16_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uint16_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint32_relaxed(uint32_t *address, uint32_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uint32_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint64_relaxed(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uint64_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uintptr_relaxed(uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uintptr_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint_relaxed(unsigned int *address, unsigned int value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(unsigned int)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ptr_relaxed(void *address, void *value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(void*)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ssize_relaxed(Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(Py_ssize_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint64_release(uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(uint64_t)*)address, value, memory_order_release); +} + +static inline void +_Py_atomic_store_ptr_release(void *address, void *value) +{ + _Py_USING_STD + atomic_store_explicit((_Atomic(void*)*)address, value, memory_order_release); +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ + _Py_USING_STD + atomic_thread_fence(memory_order_seq_cst); +} + + static inline void +_Py_atomic_fence_release(void) +{ + _Py_USING_STD + atomic_thread_fence(memory_order_release); +} diff --git a/Include/internal/pycore_atomic.h b/Include/internal/pycore_atomic.h index 48d246ea08f3d98..22ce971a64f3dfd 100644 --- a/Include/internal/pycore_atomic.h +++ b/Include/internal/pycore_atomic.h @@ -1,5 +1,5 @@ -#ifndef Py_ATOMIC_H -#define Py_ATOMIC_H +#ifndef Py_INTERNAL_ATOMIC_H +#define Py_INTERNAL_ATOMIC_H #ifdef __cplusplus extern "C" { #endif @@ -554,4 +554,4 @@ typedef struct _Py_atomic_int { #ifdef __cplusplus } #endif -#endif /* Py_ATOMIC_H */ +#endif /* Py_INTERNAL_ATOMIC_H */ diff --git a/Lib/test/test_capi/test_pyatomic.py b/Lib/test/test_capi/test_pyatomic.py new file mode 100644 index 000000000000000..846d6d50c25969b --- /dev/null +++ b/Lib/test/test_capi/test_pyatomic.py @@ -0,0 +1,15 @@ +import unittest +from test.support import import_helper + +# Skip this test if the _testcapi module isn't available. +_testcapi = import_helper.import_module('_testcapi') + +class PyAtomicTests(unittest.TestCase): + pass + +for name in sorted(dir(_testcapi)): + if name.startswith('test_atomic'): + setattr(PyAtomicTests, name, getattr(_testcapi, name)) + +if __name__ == "__main__": + unittest.main() diff --git a/Makefile.pre.in b/Makefile.pre.in index 9be5c3b50eb9ee2..adc013dc1319446 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1721,6 +1721,9 @@ PYTHON_HEADERS= \ $(srcdir)/Include/cpython/optimizer.h \ $(srcdir)/Include/cpython/picklebufobject.h \ $(srcdir)/Include/cpython/pthread_stubs.h \ + $(srcdir)/Include/cpython/pyatomic.h \ + $(srcdir)/Include/cpython/pyatomic_gcc.h \ + $(srcdir)/Include/cpython/pyatomic_std.h \ $(srcdir)/Include/cpython/pyctype.h \ $(srcdir)/Include/cpython/pydebug.h \ $(srcdir)/Include/cpython/pyerrors.h \ diff --git a/Misc/NEWS.d/next/C API/2023-08-22-13-00-54.gh-issue-108337.wceHZm.rst b/Misc/NEWS.d/next/C API/2023-08-22-13-00-54.gh-issue-108337.wceHZm.rst new file mode 100644 index 000000000000000..476123a051bb3f1 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-08-22-13-00-54.gh-issue-108337.wceHZm.rst @@ -0,0 +1 @@ +Add atomic operations on additional data types in pyatomic.h. diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 689f1d42ef0eeee..e913ee405573d35 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -159,7 +159,7 @@ @MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c @MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c @MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c -@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c +@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c @MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c # Some testing modules MUST be built as shared libraries. diff --git a/Modules/_testcapi/parts.h b/Modules/_testcapi/parts.h index 65ebf80bcd1e95a..8e51b064b7531fd 100644 --- a/Modules/_testcapi/parts.h +++ b/Modules/_testcapi/parts.h @@ -40,6 +40,7 @@ int _PyTestCapi_Init_Structmember(PyObject *module); int _PyTestCapi_Init_Exceptions(PyObject *module); int _PyTestCapi_Init_Code(PyObject *module); int _PyTestCapi_Init_Buffer(PyObject *module); +int _PyTestCapi_Init_PyAtomic(PyObject *module); int _PyTestCapi_Init_PyOS(PyObject *module); int _PyTestCapi_Init_Immortal(PyObject *module); int _PyTestCapi_Init_GC(PyObject *mod); diff --git a/Modules/_testcapi/pyatomic.c b/Modules/_testcapi/pyatomic.c new file mode 100644 index 000000000000000..da54a87915923c4 --- /dev/null +++ b/Modules/_testcapi/pyatomic.c @@ -0,0 +1,165 @@ +/* + * C Extension module to smoke test pyatomic.h API. + * + * This only tests basic functionality, not any synchronizing ordering. + */ + +/* Always enable assertions */ +#undef NDEBUG + +#include "Python.h" +#include "cpython/pyatomic.h" +#include "parts.h" + +// We define atomic bitwise operations on these types +#define FOR_BITWISE_TYPES(V) \ + V(uint8, uint8_t) \ + V(uint16, uint16_t) \ + V(uint32, uint32_t) \ + V(uint64, uint64_t) \ + V(uintptr, uintptr_t) + +// We define atomic addition on these types +#define FOR_ARITHMETIC_TYPES(V) \ + FOR_BITWISE_TYPES(V) \ + V(int, int) \ + V(uint, unsigned int) \ + V(int8, int8_t) \ + V(int16, int16_t) \ + V(int32, int32_t) \ + V(int64, int64_t) \ + V(intptr, intptr_t) \ + V(ssize, Py_ssize_t) + +// We define atomic load, store, exchange, and compare_exchange on these types +#define FOR_ALL_TYPES(V) \ + FOR_ARITHMETIC_TYPES(V) \ + V(ptr, void*) + +#define IMPL_TEST_ADD(suffix, dtype) \ +static PyObject * \ +test_atomic_add_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = 0; \ + assert(_Py_atomic_add_##suffix(&x, 1) == 0); \ + assert(x == 1); \ + assert(_Py_atomic_add_##suffix(&x, 2) == 1); \ + assert(x == 3); \ + assert(_Py_atomic_add_##suffix(&x, -2) == 3); \ + assert(x == 1); \ + assert(_Py_atomic_add_##suffix(&x, -1) == 1); \ + assert(x == 0); \ + assert(_Py_atomic_add_##suffix(&x, -1) == 0); \ + assert(x == (dtype)-1); \ + assert(_Py_atomic_add_##suffix(&x, -2) == (dtype)-1); \ + assert(x == (dtype)-3); \ + assert(_Py_atomic_add_##suffix(&x, 2) == (dtype)-3); \ + assert(x == (dtype)-1); \ + Py_RETURN_NONE; \ +} +FOR_ARITHMETIC_TYPES(IMPL_TEST_ADD) + +#define IMPL_TEST_COMPARE_EXCHANGE(suffix, dtype) \ +static PyObject * \ +test_atomic_compare_exchange_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_compare_exchange_##suffix(&x, y, z) == 0); \ + assert(x == 0); \ + assert(_Py_atomic_compare_exchange_##suffix(&x, 0, z) == 1); \ + assert(x == z); \ + assert(_Py_atomic_compare_exchange_##suffix(&x, y, z) == 0); \ + assert(x == z); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_COMPARE_EXCHANGE) + +#define IMPL_TEST_EXCHANGE(suffix, dtype) \ +static PyObject * \ +test_atomic_exchange_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)0); \ + assert(x == (dtype)1); \ + assert(_Py_atomic_exchange_##suffix(&x, z) == (dtype)1); \ + assert(x == (dtype)2); \ + assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)2); \ + assert(x == (dtype)1); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_EXCHANGE) + +#define IMPL_TEST_LOAD_STORE(suffix, dtype) \ +static PyObject * \ +test_atomic_load_store_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_load_##suffix(&x) == (dtype)0); \ + assert(x == (dtype)0); \ + _Py_atomic_store_##suffix(&x, y); \ + assert(_Py_atomic_load_##suffix(&x) == (dtype)1); \ + assert(x == (dtype)1); \ + _Py_atomic_store_##suffix##_relaxed(&x, z); \ + assert(_Py_atomic_load_##suffix##_relaxed(&x) == (dtype)2); \ + assert(x == (dtype)2); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_LOAD_STORE) + +#define IMPL_TEST_AND_OR(suffix, dtype) \ +static PyObject * \ +test_atomic_and_or_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)3; \ + assert(_Py_atomic_or_##suffix(&x, z) == (dtype)0); \ + assert(x == (dtype)3); \ + assert(_Py_atomic_and_##suffix(&x, y) == (dtype)3); \ + assert(x == (dtype)1); \ + Py_RETURN_NONE; \ +} +FOR_BITWISE_TYPES(IMPL_TEST_AND_OR) + +static PyObject * +test_atomic_fences(PyObject *self, PyObject *obj) { + // Just make sure that the fences compile. We are not + // testing any synchronizing ordering. + _Py_atomic_fence_seq_cst(); + _Py_atomic_fence_release(); + Py_RETURN_NONE; +} + +// NOTE: all tests should start with "test_atomic_" to be included +// in test_pyatomic.py + +#define BIND_TEST_ADD(suffix, dtype) \ + {"test_atomic_add_" #suffix, test_atomic_add_##suffix, METH_NOARGS}, +#define BIND_TEST_COMPARE_EXCHANGE(suffix, dtype) \ + {"test_atomic_compare_exchange_" #suffix, test_atomic_compare_exchange_##suffix, METH_NOARGS}, +#define BIND_TEST_EXCHANGE(suffix, dtype) \ + {"test_atomic_exchange_" #suffix, test_atomic_exchange_##suffix, METH_NOARGS}, +#define BIND_TEST_LOAD_STORE(suffix, dtype) \ + {"test_atomic_load_store_" #suffix, test_atomic_load_store_##suffix, METH_NOARGS}, +#define BIND_TEST_AND_OR(suffix, dtype) \ + {"test_atomic_and_or_" #suffix, test_atomic_and_or_##suffix, METH_NOARGS}, + +static PyMethodDef test_methods[] = { + FOR_ARITHMETIC_TYPES(BIND_TEST_ADD) + FOR_ALL_TYPES(BIND_TEST_COMPARE_EXCHANGE) + FOR_ALL_TYPES(BIND_TEST_EXCHANGE) + FOR_ALL_TYPES(BIND_TEST_LOAD_STORE) + FOR_BITWISE_TYPES(BIND_TEST_AND_OR) + {"test_atomic_fences", test_atomic_fences, METH_NOARGS}, + {NULL, NULL} /* sentinel */ +}; + +int +_PyTestCapi_Init_PyAtomic(PyObject *mod) +{ + if (PyModule_AddFunctions(mod, test_methods) < 0) { + return -1; + } + return 0; +} diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index a7a98d1eea5bd15..1094b4c544b2790 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -4325,6 +4325,9 @@ PyInit__testcapi(void) if (_PyTestCapi_Init_GC(m) < 0) { return NULL; } + if (_PyTestCapi_Init_PyAtomic(m) < 0) { + return NULL; + } #ifndef LIMITED_API_AVAILABLE PyModule_AddObjectRef(m, "LIMITED_API_AVAILABLE", Py_False); diff --git a/PCbuild/_testcapi.vcxproj b/PCbuild/_testcapi.vcxproj index 8c0fd0cf052b0ec..0a02929db438b80 100644 --- a/PCbuild/_testcapi.vcxproj +++ b/PCbuild/_testcapi.vcxproj @@ -112,6 +112,7 @@ + diff --git a/PCbuild/_testcapi.vcxproj.filters b/PCbuild/_testcapi.vcxproj.filters index 87d33ebe28e4750..4ba6011d8af5b97 100644 --- a/PCbuild/_testcapi.vcxproj.filters +++ b/PCbuild/_testcapi.vcxproj.filters @@ -66,6 +66,9 @@ Source Files + + Source Files + Source Files diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index b0e62864421e17c..7f9f24f9274df0f 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -166,6 +166,8 @@ + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index d5f61e9c5d7c899..b9d09716df42f37 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -423,6 +423,12 @@ Include + + Include + + + Include + Include