mirror of
https://github.com/Karaka-Management/cOMS.git
synced 2026-01-10 19:08:39 +00:00
prepare for changes
This commit is contained in:
parent
7d48cfaa37
commit
2883ca0841
69
compiler/TypeName.h
Normal file
69
compiler/TypeName.h
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_COMPILER_COMPILER_UTILS_H
|
||||
#define COMS_COMPILER_COMPILER_UTILS_H
|
||||
|
||||
#include "../utils/TestUtils.h"
|
||||
#include "../stdlib/Types.h"
|
||||
|
||||
#if _MSC_VER
|
||||
#include "msvc/TypeName.h"
|
||||
#else
|
||||
#include "gcc/TypeName.h"
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
struct NormalizedTypeName {
|
||||
static constexpr const char* Get() {
|
||||
return GetRawTypeName<T>();
|
||||
}
|
||||
};
|
||||
|
||||
#define REGISTER_TYPENAME(Actual) \
|
||||
template<> \
|
||||
struct NormalizedTypeName<Actual> { \
|
||||
static constexpr const char* Get() { return #Actual; } \
|
||||
};
|
||||
|
||||
constexpr const char* RemoveQualifiers(const char* typeName) {
|
||||
const char* qualifiers[] = {"enum ", "struct ", "class ", "const ", "volatile ", "restrict "};
|
||||
|
||||
for (const char* qual : qualifiers) {
|
||||
size_t len = str_length_constexpr(qual);
|
||||
if (str_compare(typeName, qual, len) == 0) {
|
||||
return typeName + len;
|
||||
}
|
||||
}
|
||||
return typeName;
|
||||
}
|
||||
|
||||
// Not allowed since uint8 = unsigned char
|
||||
//REGISTER_TYPENAME(byte)
|
||||
REGISTER_TYPENAME(bool)
|
||||
|
||||
REGISTER_TYPENAME(uint8)
|
||||
REGISTER_TYPENAME(uint16)
|
||||
REGISTER_TYPENAME(uint32)
|
||||
REGISTER_TYPENAME(uint64)
|
||||
|
||||
REGISTER_TYPENAME(int8)
|
||||
REGISTER_TYPENAME(int16)
|
||||
REGISTER_TYPENAME(int32)
|
||||
REGISTER_TYPENAME(int64)
|
||||
|
||||
REGISTER_TYPENAME(f32)
|
||||
REGISTER_TYPENAME(f64)
|
||||
|
||||
template<typename T>
|
||||
constexpr const char* GetTypeName() {
|
||||
const char* raw_name = NormalizedTypeName<T>::Get();
|
||||
return RemoveQualifiers(raw_name);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -18,34 +18,36 @@ typedef union { f64 f; int64 l; } _atomic_64;
|
|||
FORCE_INLINE void atomic_set_relaxed(void** target, void* value) noexcept { __atomic_store_n(target, value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void* atomic_get_relaxed(void** target) noexcept { return __atomic_load_n(target, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int8* value, int8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int16* value, int16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int32* value, int32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int64* value, int64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_relaxed(volatile int8* value, int8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_relaxed(volatile int16* value, int16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_relaxed(volatile int32* value, int32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_relaxed(volatile int64* value, int64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_relaxed(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_relaxed(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_relaxed(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int8 atomic_get_relaxed(volatile int8* value) noexcept { return __atomic_load_n((int8 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_get_relaxed(volatile int16* value) noexcept { return __atomic_load_n((int16 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_get_relaxed(volatile int32* value) noexcept { return __atomic_load_n((int32 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_get_relaxed(volatile int64* value) noexcept { return __atomic_load_n((int64 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_get_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n((int16 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_get_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n((int32 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_get_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n((int64 *) value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int8 atomic_increment_relaxed(volatile int8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int8 atomic_decrement_relaxed(volatile int8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_increment_relaxed(volatile int16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_decrement_relaxed(volatile int16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_increment_relaxed(volatile int32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_decrement_relaxed(volatile int32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_increment_relaxed(volatile int64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_decrement_relaxed(volatile int64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_increment_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_decrement_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_increment_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_decrement_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_increment_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_decrement_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int8* value, int8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int8* value, int8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int16* value, int16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int16* value, int16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int32* value, int32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int32* value, int32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int64* value, int64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int64* value, int64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_relaxed(volatile f32* value, f32* expected, f32 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 4) == 0);
|
||||
|
||||
volatile _atomic_32* value_as_union = (volatile _atomic_32*)value;
|
||||
_atomic_32* expected_as_union = (_atomic_32*)expected;
|
||||
_atomic_32 desired_as_union;
|
||||
|
|
@ -59,6 +61,7 @@ FORCE_INLINE f32 atomic_compare_exchange_strong_relaxed(volatile f32* value, f32
|
|||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_relaxed(volatile f64* value, f64* expected, f64 desired) noexcept {
|
||||
|
||||
volatile _atomic_64* value_as_union = (volatile _atomic_64*)value;
|
||||
_atomic_64* expected_as_union = (_atomic_64*)expected;
|
||||
_atomic_64 desired_as_union;
|
||||
|
|
@ -71,97 +74,99 @@ FORCE_INLINE f64 atomic_compare_exchange_strong_relaxed(volatile f64* value, f64
|
|||
|
||||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_relaxed(volatile int32* value, int32* expected, int32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_relaxed(volatile int64* value, int64* expected, int64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_relaxed(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_relaxed(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE int8 atomic_fetch_add_relaxed(volatile int8* value, int8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_relaxed(volatile int8* value, int8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_relaxed(volatile int16* value, int16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_relaxed(volatile int16* value, int16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_relaxed(volatile int32* value, int32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_relaxed(volatile int32* value, int32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_relaxed(volatile int64* value, int64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_relaxed(volatile int64* value, int64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_relaxed(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_relaxed(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_relaxed(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_relaxed(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_relaxed(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_relaxed(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint8* value, uint8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_relaxed(volatile uint8* value, uint8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint8 atomic_get_relaxed(volatile uint8* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_get_relaxed(volatile uint16* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_get_relaxed(volatile uint32* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_get_relaxed(volatile uint64* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_get_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_get_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_get_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n(value, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint8 atomic_increment_relaxed(volatile uint8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint8 atomic_decrement_relaxed(volatile uint8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_increment_relaxed(volatile uint16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_decrement_relaxed(volatile uint16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_increment_relaxed(volatile uint32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_decrement_relaxed(volatile uint32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_increment_relaxed(volatile uint64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_decrement_relaxed(volatile uint64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_increment_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_decrement_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_increment_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_decrement_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_increment_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_decrement_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint8* value, uint8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint8* value, uint8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint16* value, uint16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint16* value, uint16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint32* value, uint32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint32* value, uint32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint64* value, uint64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint64* value, uint64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_relaxed(volatile uint32* value, uint32* expected, uint32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_relaxed(volatile uint64* value, uint64* expected, uint64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_relaxed(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_relaxed(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_relaxed(volatile uint8* value, uint8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_relaxed(volatile uint8* value, uint8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_relaxed(volatile uint16* value, uint16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_relaxed(volatile uint16* value, uint16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_relaxed(volatile uint32* value, uint32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_relaxed(volatile uint32* value, uint32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_relaxed(volatile uint64* value, uint64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_relaxed(volatile uint64* value, uint64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_relaxed(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_relaxed(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_relaxed(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_relaxed(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_relaxed(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_relaxed(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int8* value, int8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int16* value, int16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int32* value, int32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int64* value, int64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int8* value, int8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int16* value, int16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int32* value, int32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int64* value, int64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELAXED); }
|
||||
|
||||
FORCE_INLINE void* atomic_get_acquire(void** target) noexcept { return __atomic_load_n(target, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_acquire(volatile int8* value, int8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire(volatile int16* value, int16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire(volatile int32* value, int32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire(volatile int64* value, int64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int8 atomic_get_acquire(volatile int8* value) noexcept { return __atomic_load_n((int8 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_get_acquire(volatile int16* value) noexcept { return __atomic_load_n((int16 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_get_acquire(volatile int32* value) noexcept { return __atomic_load_n((int32 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_get_acquire(volatile int64* value) noexcept { return __atomic_load_n((int64 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_get_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n((int16 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_get_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n((int32 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_get_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n((int64 *) value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int8 atomic_increment_acquire(volatile int8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int8 atomic_decrement_acquire(volatile int8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire(volatile int16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire(volatile int16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire(volatile int32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire(volatile int32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire(volatile int64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire(volatile int64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int8* value, int8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int8* value, int8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int16* value, int16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int16* value, int16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int32* value, int32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int32* value, int32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int64* value, int64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int64* value, int64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire(volatile f32* value, f32* expected, f32 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 4) == 0);
|
||||
|
||||
volatile _atomic_32* value_as_union = (volatile _atomic_32*)value;
|
||||
_atomic_32* expected_as_union = (_atomic_32*)expected;
|
||||
_atomic_32 desired_as_union;
|
||||
|
|
@ -175,6 +180,8 @@ FORCE_INLINE f32 atomic_compare_exchange_strong_acquire(volatile f32* value, f32
|
|||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire(volatile f64* value, f64* expected, f64 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 8) == 0);
|
||||
|
||||
volatile _atomic_64* value_as_union = (volatile _atomic_64*)value;
|
||||
_atomic_64* expected_as_union = (_atomic_64*)expected;
|
||||
_atomic_64 desired_as_union;
|
||||
|
|
@ -187,102 +194,104 @@ FORCE_INLINE f64 atomic_compare_exchange_strong_acquire(volatile f64* value, f64
|
|||
|
||||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire(volatile int32* value, int32* expected, int32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire(volatile int64* value, int64* expected, int64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE int8 atomic_fetch_add_acquire(volatile int8* value, int8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_acquire(volatile int8* value, int8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire(volatile int16* value, int16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire(volatile int16* value, int16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire(volatile int32* value, int32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire(volatile int32* value, int32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire(volatile int64* value, int64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire(volatile int64* value, int64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint8* value, uint8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint16* value, uint16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint32* value, uint32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint64* value, uint64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_acquire(volatile uint8* value, uint8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire(volatile uint16* value, uint16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire(volatile uint32* value, uint32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire(volatile uint64* value, uint64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint8 atomic_get_acquire(volatile uint8* value) noexcept { return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire(volatile uint16* value) noexcept { return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire(volatile uint32* value) noexcept { return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire(volatile uint64* value) noexcept { return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n(value, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint8 atomic_increment_acquire(volatile uint8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint8 atomic_decrement_acquire(volatile uint8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire(volatile uint16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire(volatile uint16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire(volatile uint32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire(volatile uint32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire(volatile uint64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire(volatile uint64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint8* value, uint8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint8* value, uint8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint16* value, uint16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint16* value, uint16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint32* value, uint32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint32* value, uint32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint64* value, uint64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint64* value, uint64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire(volatile uint32* value, uint32* expected, uint32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire(volatile uint64* value, uint64* expected, uint64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return *expected; }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_acquire(volatile uint8* value, uint8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_acquire(volatile uint8* value, uint8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire(volatile uint16* value, uint16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire(volatile uint16* value, uint16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire(volatile uint32* value, uint32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire(volatile uint32* value, uint32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire(volatile uint64* value, uint64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire(volatile uint64* value, uint64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int8* value, int8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int16* value, int16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int32* value, int32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int64* value, int64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int8* value, int8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int16* value, int16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int32* value, int32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int64* value, int64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_ACQUIRE); }
|
||||
|
||||
FORCE_INLINE void atomic_set_release(void** target, void* value) noexcept { __atomic_store_n(target, value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void* atomic_get_release(void** target) noexcept { return __atomic_load_n(target, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int8* value, int8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int16* value, int16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int32* value, int32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int64* value, int64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_release(volatile int8* value, int8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_release(volatile int16* value, int16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_release(volatile int32* value, int32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_release(volatile int64* value, int64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int8 atomic_get_release(volatile int8* value) noexcept { return __atomic_load_n((int8 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_get_release(volatile int16* value) noexcept { return __atomic_load_n((int16 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_get_release(volatile int32* value) noexcept { return __atomic_load_n((int32 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_get_release(volatile int64* value) noexcept { return __atomic_load_n((int64 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_get_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n((int16 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_get_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n((int32 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_get_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n((int64 *) value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int8 atomic_increment_release(volatile int8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int8 atomic_decrement_release(volatile int8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_increment_release(volatile int16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_decrement_release(volatile int16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_increment_release(volatile int32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_decrement_release(volatile int32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_increment_release(volatile int64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_decrement_release(volatile int64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_increment_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_decrement_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_increment_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_decrement_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_increment_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_decrement_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int8* value, int8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int8* value, int8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int16* value, int16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int16* value, int16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int32* value, int32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int32* value, int32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int64* value, int64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int64* value, int64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_release(volatile f32* value, f32* expected, f32 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 4) == 0);
|
||||
|
||||
volatile _atomic_32* value_as_union = (volatile _atomic_32*)value;
|
||||
_atomic_32* expected_as_union = (_atomic_32*)expected;
|
||||
_atomic_32 desired_as_union;
|
||||
|
|
@ -296,6 +305,8 @@ FORCE_INLINE f32 atomic_compare_exchange_strong_release(volatile f32* value, f32
|
|||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_release(volatile f64* value, f64* expected, f64 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 8) == 0);
|
||||
|
||||
volatile _atomic_64* value_as_union = (volatile _atomic_64*)value;
|
||||
_atomic_64* expected_as_union = (_atomic_64*)expected;
|
||||
_atomic_64 desired_as_union;
|
||||
|
|
@ -308,191 +319,200 @@ FORCE_INLINE f64 atomic_compare_exchange_strong_release(volatile f64* value, f64
|
|||
|
||||
return expected_as_union->f;
|
||||
}
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_release(volatile int32* value, int32* expected, int32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELEASE); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_release(volatile int64* value, int64* expected, int64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELEASE); return *expected; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_release(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELEASE); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_release(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELEASE); return *expected; }
|
||||
FORCE_INLINE int8 atomic_fetch_add_release(volatile int8* value, int8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_release(volatile int8* value, int8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_release(volatile int16* value, int16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_release(volatile int16* value, int16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_release(volatile int32* value, int32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_release(volatile int32* value, int32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_release(volatile int64* value, int64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_release(volatile int64* value, int64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_and_release(volatile int64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_and_fetch(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE int64 atomic_fetch_or_release(volatile int64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_or_fetch(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_and_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_and_fetch(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_or_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_or_fetch(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint8* value, uint8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint16* value, uint16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint32* value, uint32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint64* value, uint64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_release(volatile uint8* value, uint8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_release(volatile uint16* value, uint16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_release(volatile uint32* value, uint32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_release(volatile uint64* value, uint64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint8 atomic_get_release(volatile uint8* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_get_release(volatile uint16* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_get_release(volatile uint32* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_get_release(volatile uint64* value) noexcept { return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_get_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_get_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_get_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n(value, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint8 atomic_increment_release(volatile uint8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint8 atomic_decrement_release(volatile uint8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_increment_release(volatile uint16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_decrement_release(volatile uint16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_increment_release(volatile uint32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_decrement_release(volatile uint32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_increment_release(volatile uint64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_decrement_release(volatile uint64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_increment_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_decrement_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_increment_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_decrement_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_increment_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_decrement_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint8* value, uint8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint8* value, uint8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint16* value, uint16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint16* value, uint16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint32* value, uint32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint32* value, uint32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint64* value, uint64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint64* value, uint64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_RELEASE); }
|
||||
// @bug Wrong implementation, see strong_acquire_release
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_release(volatile uint8* value, uint8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_release(volatile uint8* value, uint8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_release(volatile uint16* value, uint16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_release(volatile uint16* value, uint16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_release(volatile uint32* value, uint32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_release(volatile uint32* value, uint32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_release(volatile uint64* value, uint64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_release(volatile uint64* value, uint64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int8* value, int8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int16* value, int16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int32* value, int32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int64* value, int64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int8* value, int8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int16* value, int16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int32* value, int32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int64* value, int64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_RELEASE); }
|
||||
|
||||
FORCE_INLINE void atomic_set_acquire_release(void** target, void* value) noexcept { __atomic_store_n(target, value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void* atomic_get_acquire_release(void** target) noexcept { return __atomic_load_n(target, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int8* value, int8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int16* value, int16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int32* value, int32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int64* value, int64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_acquire_release(volatile int8* value, int8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire_release(volatile int16* value, int16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire_release(volatile int32* value, int32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire_release(volatile int64* value, int64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int8 atomic_get_acquire_release(volatile int8* value) noexcept { return __atomic_load_n((int8 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_get_acquire_release(volatile int16* value) noexcept { return __atomic_load_n((int16 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_get_acquire_release(volatile int32* value) noexcept { return __atomic_load_n((int32 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_get_acquire_release(volatile int64* value) noexcept { return __atomic_load_n((int64 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_get_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n((int16 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_get_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n((int32 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_get_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n((int64 *) value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int8 atomic_increment_acquire_release(volatile int8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int8 atomic_decrement_acquire_release(volatile int8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire_release(volatile int16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire_release(volatile int16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire_release(volatile int32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire_release(volatile int32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire_release(volatile int64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire_release(volatile int64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int8* value, int8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int8* value, int8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int16* value, int16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int16* value, int16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int32* value, int32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int32* value, int32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int64* value, int64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int64* value, int64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire_release(volatile f32* value, f32* expected, f32 desired) noexcept {
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire_release(volatile f32* value, f32 expected, f32 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 4) == 0);
|
||||
|
||||
volatile _atomic_32* value_as_union = (volatile _atomic_32*)value;
|
||||
_atomic_32* expected_as_union = (_atomic_32*)expected;
|
||||
_atomic_32 expected_as_union = (_atomic_32)expected;
|
||||
_atomic_32 desired_as_union;
|
||||
desired_as_union.f = desired;
|
||||
|
||||
__atomic_compare_exchange_n(
|
||||
&value_as_union->l, &expected_as_union->l, desired_as_union.l, 0,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST
|
||||
&value_as_union->l, &expected_as_union.l, desired_as_union.l, 0,
|
||||
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE
|
||||
);
|
||||
|
||||
return expected_as_union->f;
|
||||
return expected_as_union.f;
|
||||
}
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire_release(volatile f64* value, f64* expected, f64 desired) noexcept {
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire_release(volatile f64* value, f64 expected, f64 desired) noexcept {
|
||||
ASSERT_STRICT(((uintptr_t) value % 8) == 0);
|
||||
|
||||
volatile _atomic_64* value_as_union = (volatile _atomic_64*)value;
|
||||
_atomic_64* expected_as_union = (_atomic_64*)expected;
|
||||
_atomic_64 expected_as_union = (_atomic_64)expected;
|
||||
_atomic_64 desired_as_union;
|
||||
desired_as_union.f = desired;
|
||||
|
||||
__atomic_compare_exchange_n(
|
||||
&value_as_union->l, &expected_as_union->l, desired_as_union.l, 0,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST
|
||||
&value_as_union->l, &expected_as_union.l, desired_as_union.l, 0,
|
||||
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE
|
||||
);
|
||||
|
||||
return expected_as_union->f;
|
||||
return expected_as_union.f;
|
||||
}
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire_release(volatile int32* value, int32* expected, int32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return *expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire_release(volatile int64* value, int64* expected, int64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return *expected; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire_release(volatile int32* value, int32 expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, &expected, desired, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); return expected; }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire_release(volatile int64* value, int64 expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, &expected, desired, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); return expected; }
|
||||
FORCE_INLINE int8 atomic_fetch_add_acquire_release(volatile int8* value, int8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_acquire_release(volatile int8* value, int8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire_release(volatile int16* value, int16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire_release(volatile int16* value, int16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire_release(volatile int32* value, int32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire_release(volatile int32* value, int32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire_release(volatile int64* value, int64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire_release(volatile int64* value, int64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint8* value, uint8 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_store_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_acquire_release(volatile uint8* value, uint8 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_exchange_n(value, new_value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint8 atomic_get_acquire_release(volatile uint8* value) noexcept { return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire_release(volatile uint16* value) noexcept { return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire_release(volatile uint32* value) noexcept { return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire_release(volatile uint64* value) noexcept { return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_load_n(value, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint8 atomic_increment_acquire_release(volatile uint8* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint8 atomic_decrement_acquire_release(volatile uint8* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire_release(volatile uint16* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire_release(volatile uint16* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire_release(volatile uint32* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire_release(volatile uint32* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire_release(volatile uint64* value) noexcept { return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire_release(volatile uint64* value) noexcept { return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, 1, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint8* value, uint8 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint8* value, uint8 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint16* value, uint16 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint16* value, uint16 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint32* value, uint32 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint32* value, uint32 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint64* value, uint64 increment) noexcept { __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint64* value, uint64 decrement) noexcept { __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { __atomic_compare_exchange_n(value, expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); return *expected; }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_add_fetch(value, increment, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_sub_fetch(value, decrement, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire_release(volatile uint32* value, uint32 expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_compare_exchange_n(value, &expected, desired, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); return expected; }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire_release(volatile uint64* value, uint64 expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_compare_exchange_n(value, &expected, desired, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); return expected; }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_acquire_release(volatile uint8* value, uint8 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_acquire_release(volatile uint8* value, uint8 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire_release(volatile uint16* value, uint16 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire_release(volatile uint16* value, uint16 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire_release(volatile uint32* value, uint32 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire_release(volatile uint32* value, uint32 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire_release(volatile uint64* value, uint64 operand) noexcept { return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire_release(volatile uint64* value, uint64 operand) noexcept { return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_add_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return __atomic_sub_fetch(value, operand, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int8* value, int8 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int16* value, int16 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int32* value, int32 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int64* value, int64 mask) noexcept { __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_and(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint8* value, uint8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int8* value, int8 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint16* value, uint16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int16* value, int16 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint32* value, uint32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int32* value, int32 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint64* value, uint64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int64* value, int64 mask) noexcept { __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); __atomic_fetch_or(value, mask, __ATOMIC_SEQ_CST); }
|
||||
|
||||
// Check out the intrinsic functions fence_memory and fence_write
|
||||
// These are much faster and could accomplish what you are doing
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef COMS_COMPILER_GCC_COMPILER_UTILS_H
|
||||
#define COMS_COMPILER_GCC_COMPILER_UTILS_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../../utils/TestUtils.h"
|
||||
|
||||
#define PACKED_STRUCT __attribute__((__packed__))
|
||||
|
|
|
|||
47
compiler/gcc/TypeName.h
Normal file
47
compiler/gcc/TypeName.h
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_COMPILER_GCC_TYPE_NAME_H
|
||||
#define COMS_COMPILER_GCC_TYPE_NAME_H
|
||||
|
||||
#include "CompilerUtils.h"
|
||||
#include "../../utils/StringUtils.h"
|
||||
|
||||
template<typename T>
|
||||
constexpr auto GetRawTypeName() {
|
||||
constexpr const char* fn = __PRETTY_FUNCTION__;
|
||||
constexpr const char* prefix = "T = ";
|
||||
constexpr const char* suffix = "]";
|
||||
|
||||
constexpr const char* start = str_find_constexpr(fn, prefix);
|
||||
constexpr const char* adjusted_start = start ? start + str_length_constexpr(prefix) : fn;
|
||||
|
||||
constexpr const char* end = str_find_constexpr(adjusted_start, suffix);
|
||||
constexpr const char* final_start = end ? adjusted_start : fn;
|
||||
constexpr size_t length = end ? (end - adjusted_start) : str_length_constexpr(adjusted_start);
|
||||
|
||||
// Create a struct that holds the string in a constexpr-friendly way
|
||||
struct Result {
|
||||
char str[128] = {};
|
||||
|
||||
constexpr Result() {
|
||||
for (size_t i = 0; i < length && i < 127; ++i) {
|
||||
str[i] = final_start[i];
|
||||
}
|
||||
str[length < 127 ? length : 127] = '\0';
|
||||
}
|
||||
|
||||
constexpr const char* Get() const { return str; }
|
||||
};
|
||||
|
||||
// This will create a static storage duration object when used at runtime
|
||||
static constexpr Result result;
|
||||
return result.Get();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -10,6 +10,7 @@
|
|||
#define COMS_COMPILER_MSVC_COMPILER_UTILS_H
|
||||
|
||||
#include "../../utils/TestUtils.h"
|
||||
#include "../../stdlib/Types.h"
|
||||
#include <basetsd.h>
|
||||
#include <intrin.h>
|
||||
|
||||
|
|
@ -83,5 +84,4 @@ void compiler_cpuid(uint32 cpu_info[4], int32 function_id, int32 level = 0) noex
|
|||
__cpuidex(cpu_info, function_id, level);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
46
compiler/msvc/TypeName.h
Normal file
46
compiler/msvc/TypeName.h
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_COMPILER_MSVC_TYPE_NAME_H
|
||||
#define COMS_COMPILER_MSVC_TYPE_NAME_H
|
||||
|
||||
#include "CompilerUtils.h"
|
||||
#include "../../utils/StringUtils.h"
|
||||
|
||||
template<typename T>
|
||||
constexpr auto GetRawTypeName() {
|
||||
constexpr const char* fn = __FUNCSIG__;
|
||||
constexpr const char* prefix = "GetRawTypeName<";
|
||||
constexpr const char* suffix = ">(";
|
||||
constexpr const char* start = str_find_constexpr(fn, prefix);
|
||||
constexpr const char* adjusted_start = start ? start + str_length_constexpr(prefix) : fn;
|
||||
|
||||
constexpr const char* end = str_find_constexpr(adjusted_start, suffix);
|
||||
constexpr const char* final_start = end ? adjusted_start : fn;
|
||||
constexpr size_t length = end ? (end - adjusted_start) : str_length_constexpr(adjusted_start);
|
||||
|
||||
// Create a struct that holds the string in a constexpr-friendly way
|
||||
struct Result {
|
||||
char str[128] = {};
|
||||
|
||||
constexpr Result() {
|
||||
for (size_t i = 0; i < length && i < 127; ++i) {
|
||||
str[i] = final_start[i];
|
||||
}
|
||||
str[length < 127 ? length : 127] = '\0';
|
||||
}
|
||||
|
||||
constexpr const char* Get() const { return str; }
|
||||
};
|
||||
|
||||
// This will create a static storage duration object when used at runtime
|
||||
static constexpr Result result;
|
||||
return result.Get();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -21,6 +21,7 @@
|
|||
#endif
|
||||
|
||||
#if DB_PSQL
|
||||
#include "psql/PsqlDatabase.h"
|
||||
#else
|
||||
int32 db_open_psql(void*) { return 0; };
|
||||
void db_close_psql(void*) {};
|
||||
|
|
@ -34,43 +35,46 @@
|
|||
#endif
|
||||
|
||||
inline
|
||||
int32 db_open(DatabaseConnection* con)
|
||||
int32 db_open(DatabaseConnection* db)
|
||||
{
|
||||
switch (con->type) {
|
||||
case DB_TYPE_SQLITE: {
|
||||
return db_open_sqlite(con);
|
||||
}
|
||||
case DB_TYPE_MARIA: {
|
||||
switch (db->type) {
|
||||
case DB_TYPE_SQLITE:
|
||||
return db_open_sqlite(db);
|
||||
case DB_TYPE_MARIA:
|
||||
return 0;
|
||||
}
|
||||
case DB_TYPE_PSQL: {
|
||||
case DB_TYPE_PSQL:
|
||||
return db_open_psql(db);
|
||||
case DB_TYPE_MSSQL:
|
||||
return 0;
|
||||
}
|
||||
case DB_TYPE_MSSQL: {
|
||||
case DB_TYPE_UNKNOWN:
|
||||
return 0;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline
|
||||
void db_close(DatabaseConnection* con)
|
||||
void db_close(DatabaseConnection* db)
|
||||
{
|
||||
switch (con->type) {
|
||||
switch (db->type) {
|
||||
case DB_TYPE_SQLITE: {
|
||||
db_close_sqlite(con);
|
||||
db_close_sqlite(db);
|
||||
return;
|
||||
}
|
||||
case DB_TYPE_MARIA: {
|
||||
return;
|
||||
}
|
||||
case DB_TYPE_PSQL: {
|
||||
db_close_psql(db);
|
||||
return;
|
||||
}
|
||||
case DB_TYPE_MSSQL: {
|
||||
return;
|
||||
}
|
||||
case DB_TYPE_UNKNOWN:
|
||||
return;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,12 +13,15 @@
|
|||
#include "DatabaseType.h"
|
||||
|
||||
struct DatabaseConnection {
|
||||
void* con;
|
||||
byte con[32];
|
||||
|
||||
uint32 id; // Internal id to identify the connection
|
||||
DatabaseType type;
|
||||
uint16 port;
|
||||
char* host;
|
||||
char* name; // databse name
|
||||
const char* host;
|
||||
const char* name;
|
||||
const char* user;
|
||||
const char* pass;
|
||||
};
|
||||
|
||||
#endif
|
||||
82
database/DatabasePool.h
Normal file
82
database/DatabasePool.h
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_DATABASE_POOL_H
|
||||
#define COMS_DATABASE_POOL_H
|
||||
|
||||
#include "../stdlib/Types.h"
|
||||
#include "../log/Log.h"
|
||||
#include "../log/Stats.h"
|
||||
#include "../log/PerformanceProfiler.h"
|
||||
#include "../compiler/CompilerUtils.h"
|
||||
#include "../thread/Atomic.h"
|
||||
#include "../system/Allocator.h"
|
||||
#include "DatabaseConnection.h"
|
||||
#include "Database.h"
|
||||
#include "../memory/ThreadedChunkMemory.h"
|
||||
|
||||
struct DatabasePool {
|
||||
// How many connections does this pool support?
|
||||
uint8 count;
|
||||
int16 pos = -1;
|
||||
|
||||
DatabaseConnection* connections;
|
||||
|
||||
// Bitfield showing which connections are free and which are in use
|
||||
alignas(8) atomic_64 uint64* free;
|
||||
};
|
||||
|
||||
void db_pool_alloc(DatabasePool* pool, uint8 count) {
|
||||
ASSERT_SIMPLE(count);
|
||||
PROFILE(PROFILE_DB_POOL_ALLOC, NULL, false, true);
|
||||
LOG_1("Allocating DatabasePool");
|
||||
|
||||
uint64 size = count * sizeof(DatabaseConnection)
|
||||
+ sizeof(uint64) * CEIL_DIV(count, 64) // free
|
||||
+ 64 * 2; // overhead for alignment
|
||||
|
||||
pool->connections = (DatabaseConnection *) platform_alloc_aligned(size, 64);
|
||||
pool->free = (uint64 *) ROUND_TO_NEAREST((uintptr_t) (pool->connections + count * sizeof(DatabaseConnection)), 64);
|
||||
pool->count = count;
|
||||
|
||||
LOG_1("Allocated DatabasePool: %n B", {{LOG_DATA_UINT64, &pool->count}});
|
||||
}
|
||||
|
||||
void db_pool_add(DatabasePool* __restrict pool, DatabaseConnection* __restrict db) noexcept {
|
||||
db->id = ++pool->pos;
|
||||
memcpy(&pool->connections[pool->pos], db, sizeof(DatabaseConnection));
|
||||
}
|
||||
|
||||
void db_pool_free(DatabasePool* pool) {
|
||||
for (int32 i = 0; i < pool->count; ++i) {
|
||||
db_close(&pool->connections[i]);
|
||||
}
|
||||
|
||||
platform_aligned_free((void **) &pool->connections);
|
||||
pool->free = NULL;
|
||||
pool->count = 0;
|
||||
|
||||
LOG_1("Freed DatabasePool");
|
||||
}
|
||||
|
||||
// Returns free database connection or null if none could be found
|
||||
// @todo implement db_pool_get_wait(pool, waittime)
|
||||
inline
|
||||
const DatabaseConnection* db_pool_get(DatabasePool* pool) noexcept {
|
||||
int32 id = thrd_chunk_get_unset(pool->free, (pool->count - 1) / 64, 0);
|
||||
|
||||
return id >= 0 ? &pool->connections[id] : NULL;
|
||||
}
|
||||
|
||||
// releases the database connection for use
|
||||
FORCE_INLINE
|
||||
void db_pool_release(DatabasePool* pool, int32 id) noexcept {
|
||||
thrd_chunk_set_unset(id, pool->free);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -17,7 +17,7 @@ enum DatabaseType : byte {
|
|||
DB_TYPE_MSSQL
|
||||
};
|
||||
|
||||
DatabaseType database_type_from_string(const char* str)
|
||||
DatabaseType db_type_from_string(const char* str)
|
||||
{
|
||||
if (str_compare(str, "sqlite", sizeof("sqlite") - 1) == 0) {
|
||||
return DB_TYPE_SQLITE;
|
||||
|
|
|
|||
48
database/DbParam.h
Normal file
48
database/DbParam.h
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_DATABASE_PARAM_H
|
||||
#define COMS_DATABASE_PARAM_H
|
||||
|
||||
#include "../stdlib/Types.h"
|
||||
|
||||
enum DbParamType : byte {
|
||||
DB_PARAM_INT8,
|
||||
DB_PARAM_INT16,
|
||||
DB_PARAM_INT32,
|
||||
DB_PARAM_INT64,
|
||||
DB_PARAM_F32,
|
||||
DB_PARAM_F64,
|
||||
DB_PARAM_TEXT,
|
||||
DB_PARAM_NULL
|
||||
};
|
||||
|
||||
struct DbParam {
|
||||
DbParamType type;
|
||||
union {
|
||||
int8 int8_val;
|
||||
int16 int16_val;
|
||||
int32 int32_val;
|
||||
int64 int64_val;
|
||||
f32 f32_val;
|
||||
f64 f64_val;
|
||||
const char* text_val;
|
||||
};
|
||||
};
|
||||
|
||||
// Helper macros for cleaner calling syntax
|
||||
#define DB_INT8(x) {DB_PARAM_INT8, .int8_val=(x)}
|
||||
#define DB_INT16(x) {DB_PARAM_INT16, .int16_val=(x)}
|
||||
#define DB_INT32(x) {DB_PARAM_INT32, .int32_val=(x)}
|
||||
#define DB_INT64(x) {DB_PARAM_INT64, .int64_val=(x)}
|
||||
#define DB_F32(x) {DB_PARAM_F32, .f32_val=(x)}
|
||||
#define DB_F64(x) {DB_PARAM_F64, .f64_val=(x)}
|
||||
#define DB_TEXT(x) {DB_PARAM_TEXT, .text_val=(x)}
|
||||
#define DB_END {DB_PARAM_NULL}
|
||||
|
||||
#endif
|
||||
229
database/psql/PsqlDatabase.h
Normal file
229
database/psql/PsqlDatabase.h
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_DATABASE_PSQL_H
|
||||
#define COMS_DATABASE_PSQL_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../DbParam.h"
|
||||
|
||||
#if _WIN32
|
||||
#include "../../dependencies/psql/libpq-fe.h"
|
||||
#else
|
||||
#include <libpq-fe.h>
|
||||
#endif
|
||||
|
||||
inline
|
||||
int32 db_open_psql(DatabaseConnection* db)
|
||||
{
|
||||
ASSERT_SIMPLE(sizeof(db->con) >= sizeof(pqxx::connection));
|
||||
|
||||
PGconn* db_con = (PGconn *) db->con;
|
||||
|
||||
char conninfo[256];
|
||||
sprintf_fast(
|
||||
conninfo, sizeof(conninfo),
|
||||
"host=%s port=%d dbname=%s user=%s password=%s",
|
||||
db->host, db->port, db->name, db->user, db->pass
|
||||
);
|
||||
|
||||
db_con = PQconnectdb(conninfo);
|
||||
if (PQstatus(db_con) != CONNECTION_OK) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
inline
|
||||
void db_close_psql(DatabaseConnection* db) {
|
||||
PQfinish((PGconn *) db->con);
|
||||
memset(db->con, 0, sizeof(db->con));
|
||||
}
|
||||
|
||||
inline
|
||||
void* db_prepare_psql(void* con, const char* name, const char* query) {
|
||||
PGresult* res = PQprepare((PGconn *) con, name, query, 0, NULL);
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
// @todo Handle error
|
||||
|
||||
if (res) {
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
PQclear(res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
inline
|
||||
void db_unprepare_psql(void* con, const char* name) {
|
||||
char dealloc[64];
|
||||
memcpy(dealloc, "DEALLOCATE ", sizeof("DEALLOCATE ") - 1);
|
||||
str_concat_append(dealloc + sizeof("DEALLOCATE ") - 1, name);
|
||||
|
||||
PGresult *del_res = PQexec((PGconn *) con, dealloc);
|
||||
PQclear(del_res);
|
||||
}
|
||||
|
||||
// WARNING: uint8 is chosen to ensure no overflow in dealloc, considering 64 bytes per deallocation
|
||||
void db_unprepare_psql(void* con, const char** name, uint8 length) {
|
||||
char dealloc[16 * KILOBYTE];
|
||||
memcpy(dealloc, "DEALLOCATE", sizeof("DEALLOCATE") - 1);
|
||||
|
||||
int32 offset = sizeof("DEALLOCATE") - 1;
|
||||
for (int32 i = 0 i < length; ++i) {
|
||||
dealloc[offset] = ' ';
|
||||
++offset;
|
||||
str_concat_append(dealloc + offset, name);
|
||||
offset += str_length(name);
|
||||
++name;
|
||||
}
|
||||
|
||||
dealloc[offset] = '\0';
|
||||
|
||||
PGresult *del_res = PQexec((PGconn *) con, dealloc);
|
||||
PQclear(del_res);
|
||||
}
|
||||
|
||||
inline
|
||||
void db_unprepare_psql(void* con) {
|
||||
PGresult *del_res = PQexec((PGconn *) con, "DEALLOCATE ALL");
|
||||
PQclear(del_res);
|
||||
}
|
||||
|
||||
void* db_execute_prepared(
|
||||
void* con,
|
||||
const char* name,
|
||||
const DbParam* params,
|
||||
int32 param_count,
|
||||
RingMemory* ring
|
||||
) {
|
||||
char** values = (char **) ring_get_memory(ring, sizeof(char *) * param_count);
|
||||
int32* formats = (int32 *) ring_get_memory(ring, sizeof(int32) * param_count);
|
||||
int32* lengths = (int32 *) ring_get_memory(ring, sizeof(int32) * param_count);
|
||||
|
||||
char* local_arena = (char *) ring_get_memory(ring, 4 * KILOBYTE, 64, true);
|
||||
char* local_arena_end = local_arena + 4 * KILOBYTE;
|
||||
|
||||
for (int32 i = 0; i < param_count; ++i) {
|
||||
switch (params[i].type) {
|
||||
case DB_PARAM_INT8: {
|
||||
const int32 data_length = sizeof(int8);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((int8 *) values[i]) = params[i].int8_val;
|
||||
lengths[i] = sizeof(int8);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_INT16: {
|
||||
const int32 data_length = sizeof(int16);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((int16 *) values[i]) = htons(params[i].int16_val);
|
||||
lengths[i] = sizeof(int16);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_INT32: {
|
||||
const int32 data_length = sizeof(int32);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((int32 *) values[i]) = htonl(params[i].int32_val);
|
||||
lengths[i] = sizeof(int32);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_INT64: {
|
||||
const int32 data_length = sizeof(int64);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((int64 *) htonll[i]) = htonl(params[i].int64_val);
|
||||
lengths[i] = sizeof(int64);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_F32: {
|
||||
const int32 data_length = sizeof(f32);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((f32 *) values[i]) = params[i].f32_val;
|
||||
lengths[i] = sizeof(f32);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_F64: {
|
||||
const int32 data_length = sizeof(f64);
|
||||
if (local_arena + data_length < local_arena_end) {
|
||||
values[i] = local_arena;
|
||||
local_arena += data_length;
|
||||
} else {
|
||||
values[i] = (char *) ring_get_memory(ring, data_length, 4, true);
|
||||
}
|
||||
|
||||
*((f64 *) values[i]) = params[i].f64_val;
|
||||
lengths[i] = sizeof(f64);
|
||||
formats[i] = 1;
|
||||
} break;
|
||||
case DB_PARAM_TEXT: {
|
||||
const int32 data_length = sizeof(char *);
|
||||
values[i] = (char *) params[i].text_val;
|
||||
lengths[i] = (int32) str_length(params[i].text_val)
|
||||
formats[i] = 0;
|
||||
} break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
PGresult* res = PQexecPrepared(
|
||||
(PGconn *) con, name, param_count,
|
||||
values, lengths,
|
||||
formats, 0
|
||||
);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
inline
|
||||
void* db_execute(void* con, const char* query) {
|
||||
PGresult* res = PQexec((PGconn *) con, query);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
inline
|
||||
void db_result_free(void* result) {
|
||||
PQclear(result);
|
||||
}
|
||||
|
||||
#endif
|
||||
78
database/query/DbQueryBuilder.h
Normal file
78
database/query/DbQueryBuilder.h
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_DATABASE_QUERY_BUILDER_H
|
||||
#define COMS_DATABASE_QUERY_BUILDER_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "query/grammar/DbQueryGrammar.h"
|
||||
|
||||
struct DbQueryValue {
|
||||
char column_name[64];
|
||||
DbQueryBuilder query;
|
||||
};
|
||||
|
||||
struct DbJoin {
|
||||
DbQueryValue join;
|
||||
|
||||
uint8 on_length;
|
||||
DbJoinOn* ons;
|
||||
};
|
||||
|
||||
struct DbJoinOn {
|
||||
uint8 boolean;
|
||||
DbQueryValue column;
|
||||
DbOperator operator;
|
||||
DbQueryValue value;
|
||||
};
|
||||
|
||||
struct DbWhere {
|
||||
uint8 boolean;
|
||||
DbQueryValue column;
|
||||
DbOperator operator;
|
||||
DbQueryValue value;
|
||||
};
|
||||
|
||||
struct DbQueryBuilder {
|
||||
// 1 - log
|
||||
// 2 - read only
|
||||
// 3 - use prepared stmt
|
||||
uint8 flags;
|
||||
|
||||
DbQueryType type;
|
||||
|
||||
DbQueryGrammar grammar;
|
||||
|
||||
// Used for select, update, delete, insert, ...
|
||||
uint8 value_length;
|
||||
DbQueryValue* columns;
|
||||
|
||||
// Used for into and from
|
||||
const char table[24];
|
||||
|
||||
// Used for values in insert/update etc.
|
||||
uint8 value_length;
|
||||
DbQueryValue* values;
|
||||
|
||||
uint8 join_length;
|
||||
DbJoin* joins;
|
||||
|
||||
uint8 where_length;
|
||||
DbWhere* wheres;
|
||||
|
||||
uint8 group_length;
|
||||
DbGroup* groups;
|
||||
|
||||
uint8 order_length;
|
||||
DbGroup* orders;
|
||||
|
||||
uint64 limit;
|
||||
uint64 offset;
|
||||
};
|
||||
|
||||
#endif
|
||||
17
database/query/grammar/DbQueryGrammar.h
Normal file
17
database/query/grammar/DbQueryGrammar.h
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_DATABASE_QUERY_GRAMMAR_H
|
||||
#define COMS_DATABASE_QUERY_GRAMMAR_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
|
||||
struct DbQueryGrammar {
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -12,28 +12,32 @@
|
|||
#include "../../stdlib/Types.h"
|
||||
|
||||
#if _WIN32
|
||||
#include "../../EngineDependencies/sqlite/src/sqlite3.h"
|
||||
#include "../../dependencies/sqlite/src/sqlite3.h"
|
||||
#else
|
||||
#include <sqlite3.h>
|
||||
#endif
|
||||
|
||||
inline
|
||||
int32 db_open_sqlite(DatabaseConnection* con)
|
||||
int32 db_open_sqlite(DatabaseConnection* db)
|
||||
{
|
||||
int32 rc;
|
||||
rc = sqlite3_open(con->host, &con->db_sqlite);
|
||||
ASSERT_SIMPLE(sizeof(db->con) >= sizeof(sqlite3*));
|
||||
|
||||
if (rc) {
|
||||
return rc;
|
||||
int32 rc;
|
||||
rc = sqlite3_open(db->host, (sqlite3 **) &db->con);
|
||||
|
||||
db->con = rc;
|
||||
|
||||
if (!rc) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
inline
|
||||
void db_close_sqlite(DatabaseConnection* con)
|
||||
void db_close_sqlite(DatabaseConnection* db)
|
||||
{
|
||||
sqlite3_close(con->db_sqlite);
|
||||
sqlite3_close((sqlite3 *) db->con);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_HTML_TEMPLATE_H
|
||||
#define COMS_HTML_TEMPLATE_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
|
||||
struct HtmlTemplate {
|
||||
|
||||
};
|
||||
|
||||
bool html_template_load_txt() {
|
||||
|
||||
}
|
||||
|
||||
void html_template_save_bin() {
|
||||
|
||||
}
|
||||
|
||||
void html_template_load_bin() {
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -12,78 +12,10 @@
|
|||
#include "../../stdlib/Types.h"
|
||||
#include "../../stdlib/PerfectHashMap.h"
|
||||
#include "../../memory/RingMemory.h"
|
||||
#include "../../stdlib/PerfectHashMap.h"
|
||||
#include "../../system/FileUtils.cpp"
|
||||
#include "../../html/template/HtmlTemplateInterpreter.h"
|
||||
|
||||
struct HtmlTemplateCache {
|
||||
// Contains the offsets into the cache
|
||||
PerfectHashMap hm;
|
||||
|
||||
// the layout of the cache is as follows:
|
||||
// * Perfect hash map memory which contains the offsets into this cache where the root AST node per template can be found (hash_entries)
|
||||
// * Per template memory:
|
||||
// * minified template string (64 byte aligned)
|
||||
// * AST, with it's own values or alternatively a pointer into the template string depending on the data (32 byte aligned for EVERY AST node)
|
||||
byte* cache;
|
||||
|
||||
// Total cache size
|
||||
// It has to contain the templates and the AST of the template
|
||||
uint32 cache_size;
|
||||
|
||||
// Current position
|
||||
uint32 cache_pos;
|
||||
};
|
||||
|
||||
static
|
||||
void html_template_find(const char* path, va_list args) {
|
||||
char** paths = va_arg(args, char**);
|
||||
uint32* path_count = va_arg(args, uint32*);
|
||||
uint32* max_path_count = va_arg(args, uint32*);
|
||||
uint32* total_file_size = va_arg(args, uint32*);
|
||||
RingMemory* ring = va_arg(args, RingMemory*);
|
||||
|
||||
if (path_count == max_path_count) {
|
||||
uint32 old_max_path_count = *max_path_count;
|
||||
|
||||
*max_path_count += 1000;
|
||||
char* new_paths = (char *) ring_get_memory(ring, (*max_path_count) * 256 * sizeof(char), 8, true);
|
||||
memcpy(new_paths, *paths, old_max_path_count * 256 * sizeof(char));
|
||||
*paths = new_paths;
|
||||
}
|
||||
|
||||
*total_file_size += file_size(path);
|
||||
str_copy_short(paths[*path_count], path, 256);
|
||||
++(*path_count);
|
||||
}
|
||||
|
||||
void html_template_cache_alloc(HtmlTemplateCache* cache, const char* basedir, RingMemory* ring, int32 alignment = 64) {
|
||||
// @todo limit the maximum cache size in the dynamic resize
|
||||
|
||||
uint32 max_path_count = 1000;
|
||||
uint32 path_count = 0;
|
||||
char* paths = (char *) ring_get_memory(ring, max_path_count * 256 * sizeof(char), 8, true);
|
||||
uint32 total_file_size = 0;
|
||||
|
||||
iterate_directory(basedir, ".tpl.html", html_template_find, &paths, &path_count, &max_path_count, &total_file_size, ring);
|
||||
cache->cache_size = OMS_MAX((uint64) (total_file_size * 1.2f), (uint64) (total_file_size + 1 * KILOBYTE));
|
||||
|
||||
uint32 buffer_size = ROUND_TO_NEAREST(cache->cache_size + perfect_hashmap_size(path_count, sizeof(PerfectHashEntryInt32)), 4096);
|
||||
byte* buf = (byte *) platform_alloc_aligned(buffer_size, alignment);
|
||||
perfect_hashmap_create(&cache->hm, path_count, sizeof(PerfectHashEntryInt32), buf);
|
||||
|
||||
cache->cache = (byte *) ROUND_TO_NEAREST((uintptr_t) (buf + perfect_hashmap_size(path_count, sizeof(PerfectHashEntryInt32))), alignment);
|
||||
perfect_hashmap_prepare(&cache->hm, (const char*) paths, path_count, 256, 10000, ring);
|
||||
|
||||
LOG_1(
|
||||
"Created HtmlTemplateCache with %n B for %n templates with %n B in uncompressed file size",
|
||||
{
|
||||
{LOG_DATA_INT64, &cache->cache_size},
|
||||
{LOG_DATA_INT32, &path_count},
|
||||
{LOG_DATA_INT32, &total_file_size}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
bool html_template_in_control_structure(const char* str, const char** controls, int32 control_length) {
|
||||
for (int32 i = 0; i < control_length; ++i) {
|
||||
if (str_compare(controls[i], str) == 0) {
|
||||
|
|
@ -94,8 +26,12 @@ bool html_template_in_control_structure(const char* str, const char** controls,
|
|||
return false;
|
||||
}
|
||||
|
||||
void html_template_cache_load(HtmlTemplateCache* cache, const char* key, const char* str, int32 alignment = 64) {
|
||||
char* minified = (char *) ROUND_TO_NEAREST((uintptr_t) cache->cache + (uintptr_t) cache->cache_pos, alignment);
|
||||
// @performance This combines load and build, that should be two separate functions
|
||||
// Data layout:
|
||||
// 1. minified text file
|
||||
// 2. AST
|
||||
void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const char* str, int32 alignment = 64) {
|
||||
char* minified = (char *) ROUND_TO_NEAREST((uintptr_t) cache->data + (uintptr_t) cache->data_pos, alignment);
|
||||
char* minified_start = minified;
|
||||
|
||||
static const char* CONTROL_STRUCTURE_START[] = {
|
||||
|
|
@ -142,8 +78,9 @@ void html_template_cache_load(HtmlTemplateCache* cache, const char* key, const c
|
|||
*minified++ = *str++;
|
||||
}
|
||||
|
||||
cache->cache_pos += ((uintptr_t) minified - (uintptr_t) minified_start);
|
||||
cache->data_pos += ((uintptr_t) minified - (uintptr_t) minified_start);
|
||||
|
||||
// Now add AST to cache
|
||||
HtmlTemplateToken current_token = html_template_token_next((const char**) &minified_start, HTML_TEMPLATE_CONTEXT_FLAG_HTML);
|
||||
|
||||
HtmlTemplateContextStack context_stack = {};
|
||||
|
|
@ -152,7 +89,7 @@ void html_template_cache_load(HtmlTemplateCache* cache, const char* key, const c
|
|||
|
||||
// @todo Instead of doing this, we want to use the cache.memory
|
||||
// For this to work we need to pass the current memory position however into this function
|
||||
byte* memory_start = cache->cache + cache->cache_pos;
|
||||
byte* memory_start = cache->data + cache->data_pos;
|
||||
byte* memory = memory_start;
|
||||
HtmlTemplateASTNode* ast = html_template_statement_parse(
|
||||
(const char**) &minified_start,
|
||||
|
|
@ -164,16 +101,16 @@ void html_template_cache_load(HtmlTemplateCache* cache, const char* key, const c
|
|||
&memory
|
||||
);
|
||||
|
||||
cache->cache_pos += ((uintptr_t) memory - (uintptr_t) memory_start);
|
||||
cache->data_pos += ((uintptr_t) memory - (uintptr_t) memory_start);
|
||||
|
||||
ASSERT_SIMPLE(ast);
|
||||
ASSERT_SIMPLE(((uintptr_t) ast) % alignment == 0);
|
||||
perfect_hashmap_insert(&cache->hm, key, (int32) ((uintptr_t) ast - (uintptr_t) cache->cache));
|
||||
// We only store the AST index in the hash map
|
||||
perfect_hashmap_insert(&cache->hm, key, (int32) ((uintptr_t) ast - (uintptr_t) cache->data));
|
||||
}
|
||||
|
||||
static
|
||||
void html_template_cache_iter(const char* path, va_list args) {
|
||||
HtmlTemplateCache* cache = va_arg(args, HtmlTemplateCache*);
|
||||
PerfectHashMapRef* cache = va_arg(args, PerfectHashMapRef*);
|
||||
RingMemory* ring = va_arg(args, RingMemory*);
|
||||
|
||||
char full_path[MAX_PATH];
|
||||
|
|
@ -185,19 +122,22 @@ void html_template_cache_iter(const char* path, va_list args) {
|
|||
html_template_cache_load(cache, path, (const char *) file.content);
|
||||
}
|
||||
|
||||
void html_template_cache_load_all(HtmlTemplateCache* cache, const char* basedir, RingMemory* ring) {
|
||||
iterate_directory(basedir, ".tpl.html", html_template_cache_iter, cache, ring);
|
||||
LOG_1("Loaded all html templates with %n in cache size", {{LOG_DATA_INT32, &cache->cache_pos}});
|
||||
void raw_file_cache_iter(const char* path, va_list args) {
|
||||
PerfectHashMapRef* cache = va_arg(args, PerfectHashMapRef*);
|
||||
RingMemory* ring = va_arg(args, RingMemory*);
|
||||
|
||||
char full_path[MAX_PATH];
|
||||
relative_to_absolute(path, full_path);
|
||||
|
||||
FileBody file = {};
|
||||
file_read(full_path, &file, ring);
|
||||
|
||||
perfect_hashmap_insert(cache, path, file.content, file.size);
|
||||
}
|
||||
|
||||
HtmlTemplateASTNode* html_template_cache_get(const HtmlTemplateCache* cache, const char* key)
|
||||
HtmlTemplateASTNode* html_template_cache_get(const PerfectHashMapRef* cache, const char* key)
|
||||
{
|
||||
const PerfectHashEntryInt32* entry = (PerfectHashEntryInt32 *) perfect_hashmap_get_entry(&cache->hm, key);
|
||||
if (!entry) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (HtmlTemplateASTNode *) (cache->cache + entry->value);
|
||||
return (HtmlTemplateASTNode *) perfect_hashmap_get_value(cache, key);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -269,7 +269,12 @@ bool html_template_condition_eval(HtmlTemplateASTNode *node, HtmlTemplateContext
|
|||
}
|
||||
|
||||
// @todo should take in a buffer for template output
|
||||
int32 html_template_interpret(HtmlTemplateASTNode *node, char* buffer, int32 buffer_size, HtmlTemplateContextStack *context_stack) {
|
||||
int32 html_template_interpret(
|
||||
HtmlTemplateASTNode *node,
|
||||
char* buffer,
|
||||
int32 buffer_size,
|
||||
HtmlTemplateContextStack *context_stack
|
||||
) {
|
||||
int32 out_length = 0;
|
||||
|
||||
switch (node->type) {
|
||||
|
|
|
|||
2
html/template/HtmlTemplateTranspiler.h
Normal file
2
html/template/HtmlTemplateTranspiler.h
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
// Transpile to js template -> create template file for the frontend from the backend template file
|
||||
// Transpile to propriatary application/data format -> creates frontend response for data only request
|
||||
77
html/template/TemplateCache.h
Normal file
77
html/template/TemplateCache.h
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_HTML_TEMPLATE_H
|
||||
#define COMS_HTML_TEMPLATE_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../../stdlib/PerfectHashMap.h"
|
||||
#include "../../memory/RingMemory.h"
|
||||
#include "../../system/FileUtils.cpp"
|
||||
|
||||
static
|
||||
void template_find(const char* path, va_list args) {
|
||||
char** paths = va_arg(args, char**);
|
||||
uint32* path_count = va_arg(args, uint32*);
|
||||
uint32* max_path_count = va_arg(args, uint32*);
|
||||
uint32* total_file_size = va_arg(args, uint32*);
|
||||
RingMemory* ring = va_arg(args, RingMemory*);
|
||||
|
||||
if (path_count == max_path_count) {
|
||||
uint32 old_max_path_count = *max_path_count;
|
||||
|
||||
*max_path_count += 1000;
|
||||
char* new_paths = (char *) ring_get_memory(ring, (*max_path_count) * 256 * sizeof(char), 8, true);
|
||||
memcpy(new_paths, *paths, old_max_path_count * 256 * sizeof(char));
|
||||
*paths = new_paths;
|
||||
}
|
||||
|
||||
*total_file_size += file_size(path);
|
||||
str_copy_short(paths[*path_count], path, 256);
|
||||
++(*path_count);
|
||||
}
|
||||
|
||||
void template_cache_alloc(
|
||||
PerfectHashMapRef* cache,
|
||||
const char* basedir,
|
||||
const char* file_ending,
|
||||
RingMemory* ring
|
||||
) {
|
||||
// @todo limit the maximum cache size in the dynamic resize
|
||||
|
||||
// the layout of the cache is as follows:
|
||||
// * Perfect hash map memory which contains the offsets into this cache where the root AST node per template can be found (hash_entries)
|
||||
// * Per template memory:
|
||||
// * minified template string (64 byte aligned)
|
||||
// * AST, with it'
|
||||
|
||||
uint32 max_path_count = 1000;
|
||||
uint32 path_count = 0;
|
||||
char* paths = (char *) ring_get_memory(ring, max_path_count * 256 * sizeof(char), 8, true);
|
||||
uint32 total_file_size = 0;
|
||||
|
||||
iterate_directory(basedir, file_ending, template_find, &paths, &path_count, &max_path_count, &total_file_size, ring);
|
||||
|
||||
perfect_hashmap_alloc(
|
||||
cache,
|
||||
path_count,
|
||||
OMS_MAX((uint64) (total_file_size * 1.2f), (uint64) (total_file_size + 1 * KILOBYTE))
|
||||
);
|
||||
|
||||
perfect_hashmap_prepare(&cache->hm, (const char*) paths, path_count, 256, 10000, ring);
|
||||
|
||||
LOG_1(
|
||||
"Created template cache for %n templates with %n B in uncompressed file size",
|
||||
{
|
||||
{LOG_DATA_INT32, &path_count},
|
||||
{LOG_DATA_INT32, &total_file_size}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -27,7 +27,11 @@
|
|||
// 3 = debug logging
|
||||
// 4 = most verbose (probably has significant performance impacts)
|
||||
#if DEBUG
|
||||
#if DEBUG_STRICT
|
||||
#define LOG_LEVEL 4
|
||||
#else
|
||||
#define LOG_LEVEL 3
|
||||
#endif
|
||||
#elif INTERNAL
|
||||
#define LOG_LEVEL 2
|
||||
#elif RELEASE
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@
|
|||
PROFILE_BUFFER_ALLOC,
|
||||
PROFILE_CHUNK_ALLOC,
|
||||
PROFILE_RING_ALLOC,
|
||||
PROFILE_DB_POOL_ALLOC,
|
||||
PROFILE_THREAD_POOL_ALLOC,
|
||||
PROFILE_CMD_ITERATE,
|
||||
PROFILE_CMD_FONT_LOAD_SYNC,
|
||||
|
|
|
|||
|
|
@ -166,7 +166,8 @@ byte* chunk_get_element(ChunkMemory* buf, uint32 element, bool zeroed = false) n
|
|||
return offset;
|
||||
}
|
||||
|
||||
int32 chunk_get_unset(uint64* state, uint32 state_count, int32 start_index = 0) {
|
||||
// This is a special case of the chunk_reserve code where we try to find n unset elements
|
||||
int32 chunk_get_unset(uint64* state, uint32 state_count, int32 start_index = 0) noexcept {
|
||||
if ((uint32) start_index >= state_count) {
|
||||
start_index = 0;
|
||||
}
|
||||
|
|
@ -181,12 +182,23 @@ int32 chunk_get_unset(uint64* state, uint32 state_count, int32 start_index = 0)
|
|||
return free_index * 64 + bit_index;
|
||||
}
|
||||
|
||||
for (uint32 i = 0; i < state_count; ++i) {
|
||||
for (uint32 i = 0; i < state_count; i+= 64) {
|
||||
if (state[free_index] != 0xFFFFFFFFFFFFFFFF) {
|
||||
bit_index = compiler_find_first_bit_r2l(~state[free_index]);
|
||||
|
||||
uint32 id = free_index * 64 + bit_index;
|
||||
if (id >= state_count) {
|
||||
++free_index;
|
||||
if (free_index * 64 >= state_count) {
|
||||
free_index = 0;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
state[free_index] |= (1ULL << bit_index);
|
||||
|
||||
return free_index * 64 + bit_index;
|
||||
return id;
|
||||
}
|
||||
|
||||
++free_index;
|
||||
|
|
|
|||
|
|
@ -153,60 +153,57 @@ byte* thrd_chunk_get_element(ThreadedChunkMemory* buf, uint32 element, bool zero
|
|||
return chunk_get_element((ChunkMemory *) buf, element, zeroed);
|
||||
}
|
||||
|
||||
void thrd_chunk_set_unset(uint32 element, atomic_64 uint64* state) {
|
||||
uint32 free_index = element / 64;
|
||||
uint32 bit_index = element & 63;
|
||||
inline
|
||||
void thrd_chunk_set_unset(uint32 element, atomic_64 uint64* state) noexcept {
|
||||
int32 free_index = element / 64;
|
||||
int32 bit_index = element & 63;
|
||||
|
||||
alignas(8) atomic_64 uint64* target = &state[free_index];
|
||||
uint64 old_value, new_value;
|
||||
|
||||
do {
|
||||
old_value = atomic_get_relaxed(target);
|
||||
new_value = old_value | (1ULL << bit_index);
|
||||
|
||||
if (old_value == new_value) {
|
||||
return;
|
||||
}
|
||||
} while (!atomic_compare_exchange_strong_release(target, &old_value, new_value));
|
||||
uint64 mask = ~(1ULL << bit_index);
|
||||
atomic_fetch_and_release(&state[free_index], mask);
|
||||
}
|
||||
|
||||
int32 thrd_chunk_get_unset(const ThreadedChunkMemory* buf, atomic_64 uint64* state, int32 start_index = 0) {
|
||||
if ((uint32) start_index >= buf->count) {
|
||||
int32 thrd_chunk_get_unset(atomic_64 uint64* state, uint32 state_count, int32 start_index = 0) noexcept {
|
||||
if ((uint32) start_index >= state_count) {
|
||||
start_index = 0;
|
||||
}
|
||||
|
||||
uint32 free_index = start_index / 64;
|
||||
uint32 bit_index = start_index & 63;
|
||||
|
||||
if (!IS_BIT_SET_64_R2L(state[free_index], bit_index)) {
|
||||
uint64 expected = atomic_get_relaxed(&state[free_index]);
|
||||
expected &= ~(1ULL << bit_index);
|
||||
uint64 desired = expected | (1ULL << bit_index);
|
||||
|
||||
if (atomic_compare_exchange_strong_release(&state[free_index], &expected, desired)) {
|
||||
// Check standard simple solution
|
||||
uint64 current = atomic_get_acquire(&state[free_index]);
|
||||
if (!(current & (1ULL << bit_index))) {
|
||||
uint64_t desired = current | (1ULL << bit_index);
|
||||
if (atomic_compare_exchange_strong_acquire_release(&state[free_index], current, desired) == current) {
|
||||
return free_index * 64 + bit_index;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32 i = 0; i < buf->count; ++i) {
|
||||
for (uint32 i = 0; i < state_count; i += 64) {
|
||||
if (state[free_index] != 0xFFFFFFFFFFFFFFFF) {
|
||||
// We will try 3 times, usually this would be a while but since compiler_find_... doesn't use atomics
|
||||
// we might get the same index over and over again
|
||||
for (uint32 j = 0; j < 3; ++j) {
|
||||
bit_index = compiler_find_first_bit_r2l(~state[free_index]);
|
||||
uint64 current_free = atomic_get_acquire(&state[free_index]);
|
||||
uint64 inverted = ~current_free;
|
||||
|
||||
uint64 expected = atomic_get_relaxed(&state[free_index]);
|
||||
expected &= ~(1ULL << bit_index);
|
||||
uint64 desired = expected | (1ULL << bit_index);
|
||||
|
||||
if (atomic_compare_exchange_strong_release(&state[free_index], &expected, desired)) {
|
||||
return free_index * 64 + bit_index;
|
||||
int32 bit_index;
|
||||
int32 j = 0; // We will only try 3 times to avoid infinite or long loops
|
||||
while (j < 3 && (bit_index = compiler_find_first_bit_r2l(inverted)) >= 0) {
|
||||
uint32 id = free_index * 64 + bit_index;
|
||||
if (id >= state_count) {
|
||||
break;
|
||||
}
|
||||
|
||||
uint64 new_free = current_free | (1ULL << bit_index);
|
||||
if ((new_free = atomic_compare_exchange_strong_acquire_release(&state[free_index], current_free, new_free)) == current_free) {
|
||||
return id;
|
||||
}
|
||||
|
||||
inverted = ~new_free;
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
++free_index;
|
||||
if (free_index * 64 >= buf->count) {
|
||||
if (free_index * 64 >= state_count) {
|
||||
free_index = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -237,6 +234,7 @@ void thrd_chunk_free_element(ThreadedChunkMemory* buf, uint64 free_index, int32
|
|||
if (old_value == new_value) {
|
||||
return;
|
||||
}
|
||||
// @bug Wrong use
|
||||
} while (!atomic_compare_exchange_strong_release(target, &old_value, new_value));
|
||||
|
||||
DEBUG_MEMORY_DELETE((uintptr_t) (buf->memory + (free_index * 64 + bit_index) * buf->chunk_size), buf->chunk_size);
|
||||
|
|
@ -270,6 +268,7 @@ void thrd_chunk_free_elements(ThreadedChunkMemory* buf, uint64 element, uint32 e
|
|||
if (old_value == new_value) {
|
||||
break;
|
||||
}
|
||||
// @bug Wrong use
|
||||
} while (!atomic_compare_exchange_strong_release(target, &old_value, new_value));
|
||||
|
||||
// Update the counters and indices
|
||||
|
|
|
|||
|
|
@ -1,67 +0,0 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_MODELS_ACCOUNT_H
|
||||
#define COMS_MODELS_ACCOUNT_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
|
||||
#ifndef MAX_CHAR_NAME_LENGTH
|
||||
#define MAX_CHAR_NAME_LENGTH 32
|
||||
#endif
|
||||
|
||||
struct Account {
|
||||
uint64 id;
|
||||
char name[MAX_CHAR_NAME_LENGTH];
|
||||
|
||||
// relative memory position
|
||||
int64 index;
|
||||
|
||||
// @question Maybe add pointers to Player, PacketCache?
|
||||
};
|
||||
|
||||
/**
|
||||
* Whenever a user connects to the game server we immediately need to allocate a fixed amount of data.
|
||||
* Instead of putting this data willy-nilly into memory we can put all the user/account data always at the
|
||||
* same offset respective to the memory area of that data type.
|
||||
*
|
||||
* e.g. this means that if the account with the id X is found at position 12 in memory (not in the hashmap)
|
||||
* the player and packet cache are also all located at position 12 in memory in their respective buffers
|
||||
* this means we only have to find the memory position ONCE and we know where all the other data is of that account
|
||||
* this also means we don't have to chase too many pointers
|
||||
*
|
||||
* @performance It might be faster to make Player and PacketCache part of the Account?
|
||||
* It really depends on how we perform the game loop
|
||||
* Are we handling account by account OR (probably should be inside account for L1 cache)
|
||||
* Are we handling one data type by data type (then this is correct)
|
||||
*/
|
||||
void account_init_game_connect(Account* accounts, Player* players, PacketCache* packet_cache)
|
||||
{
|
||||
int64 index = -1;
|
||||
|
||||
if (index < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// init account
|
||||
Account* temp_acc = accounts[index];
|
||||
temp_acc->index = index;
|
||||
|
||||
// init player
|
||||
Player* temp_player = players[index];
|
||||
|
||||
// init packet cache
|
||||
PacketCache* temp_packets = packet_cache[index];
|
||||
}
|
||||
|
||||
void account_free_game_disconnect()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
64
models/base/Address.h
Normal file
64
models/base/Address.h
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_MODELS_BASE_ADDRESS_H
|
||||
#define COMS_MODELS_BASE_ADDRESS_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../../utils/StringUtils.h"
|
||||
#include "../../serialize/WebBinary.h"
|
||||
#include "GeoLocation.h"
|
||||
|
||||
#ifndef MAX_LOCATION_POSTAL_LENGTH
|
||||
#define MAX_LOCATION_POSTAL_LENGTH 32
|
||||
#endif
|
||||
|
||||
#ifndef MAX_LOCATION_CITY_LENGTH
|
||||
#define MAX_LOCATION_CITY_LENGTH 32
|
||||
#endif
|
||||
|
||||
#ifndef MAX_LOCATION_ADDRESS_LENGTH
|
||||
#define MAX_LOCATION_ADDRESS_LENGTH 32
|
||||
#endif
|
||||
|
||||
#ifndef MAX_LOCATION_STATE_LENGTH
|
||||
#define MAX_LOCATION_STATE_LENGTH 32
|
||||
#endif
|
||||
|
||||
struct Location {
|
||||
uint64 id;
|
||||
|
||||
char postal[MAX_LOCATION_POSTAL_LENGTH];
|
||||
char city[MAX_LOCATION_CITY_LENGTH];
|
||||
char address[MAX_LOCATION_ADDRESS_LENGTH];
|
||||
char state[MAX_LOCATION_ADDRESS_LENGTH];
|
||||
|
||||
GeoLocation geo;
|
||||
|
||||
//ISO3166 country;
|
||||
//AddressType type;
|
||||
};
|
||||
|
||||
// @question do we really want this to be part of the binary or should this be stored in a file/template?
|
||||
constexpr WebBinaryValue LocationSchemaStruct[] = {
|
||||
WEB_BINARY_FIELD(Location, id),
|
||||
WEB_BINARY_FIELD(Location, postal),
|
||||
WEB_BINARY_FIELD(Location, city),
|
||||
WEB_BINARY_FIELD(Location, address),
|
||||
WEB_BINARY_FIELD(Location, state),
|
||||
WEB_BINARY_FIELD_WITH_SCHEMA(Location, geo, GeoLocationSchema)
|
||||
//WEB_BINARY_FIELD(Location, country),
|
||||
//WEB_BINARY_FIELD(Location, type)
|
||||
};
|
||||
|
||||
constexpr auto LocationSchema = web_binary_schema<
|
||||
LocationSchemaStruct,
|
||||
ARRAY_COUNT(LocationSchemaStruct)
|
||||
>();
|
||||
|
||||
#endif
|
||||
30
models/base/GeoLocation.h
Normal file
30
models/base/GeoLocation.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_MODELS_BASE_GEO_LOCATION_H
|
||||
#define COMS_MODELS_BASE_GEO_LOCATION_H
|
||||
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../../serialize/WebBinary.h"
|
||||
|
||||
struct GeoLocation {
|
||||
f64 lat;
|
||||
f64 lon;
|
||||
};
|
||||
|
||||
constexpr WebBinaryValue GeoLocationSchemaStruct[] = {
|
||||
WEB_BINARY_FIELD(GeoLocation, lat),
|
||||
WEB_BINARY_FIELD(GeoLocation, lon)
|
||||
};
|
||||
|
||||
constexpr auto GeoLocationSchema = web_binary_schema<
|
||||
GeoLocationSchemaStruct,
|
||||
ARRAY_COUNT(GeoLocationSchemaStruct)
|
||||
>();
|
||||
|
||||
#endif
|
||||
|
|
@ -14,19 +14,18 @@
|
|||
#if _WIN32
|
||||
#include <winsock2.h>
|
||||
#include <ws2ipdef.h>
|
||||
|
||||
typedef SOCKET socketid;
|
||||
#else
|
||||
#include <netdb.h>
|
||||
#include <unistd.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
typedef int32 socketid;
|
||||
#endif
|
||||
|
||||
struct SocketConnection {
|
||||
#if _WIN32
|
||||
SOCKET sd;
|
||||
#else
|
||||
int32 sd;
|
||||
#endif
|
||||
|
||||
socketid sd;
|
||||
sockaddr_in6 addr;
|
||||
uint16 port;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -28,399 +28,403 @@ typedef union { f64 f; LONG64 l; } _atomic_64;
|
|||
FORCE_INLINE void atomic_set_relaxed(void** target, void* new_pointer) noexcept { InterlockedExchangePointerNoFence(target, new_pointer); }
|
||||
FORCE_INLINE void* atomic_get_relaxed(void** target) noexcept { return InterlockedCompareExchangePointerNoFence(target, NULL, NULL); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int8* value, int8 new_value) noexcept { InterlockedExchangeNoFence8((volatile char *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int16* value, int16 new_value) noexcept { InterlockedExchangeNoFence16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int32* value, int32 new_value) noexcept { InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int64* value, int64 new_value) noexcept { InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile f32* value, f32 new_value) noexcept { _atomic_32 temp = {.f = new_value}; InterlockedExchangeNoFence((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile f64* value, f64 new_value) noexcept { _atomic_64 temp = {.f = new_value}; InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeNoFence16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile f32* value, f32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.f = new_value}; InterlockedExchangeNoFence((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile f64* value, f64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.f = new_value}; InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_relaxed(volatile int8* value, int8 new_value) noexcept { return (int8) InterlockedExchangeNoFence8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_relaxed(volatile int16* value, int16 new_value) noexcept { return (int16) InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_relaxed(volatile int32* value, int32 new_value) noexcept { return (int32) InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_relaxed(volatile int64* value, int64 new_value) noexcept { return (int64) InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_relaxed(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_relaxed(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_relaxed(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int8 atomic_get_relaxed(volatile int8* value) noexcept { return (int8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_relaxed(volatile int16* value) noexcept { return (int16) InterlockedCompareExchangeNoFence16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_relaxed(volatile int32* value) noexcept { return (int32) InterlockedCompareExchangeNoFence((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_relaxed(volatile int64* value) noexcept { return (int64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedCompareExchangeNoFence16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeNoFence((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE f32 atomic_get_relaxed(volatile f32* value) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeNoFence((volatile long *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_get_relaxed(volatile f64* value) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE int8 atomic_increment_relaxed(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE int8 atomic_decrement_relaxed(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE int16 atomic_increment_relaxed(volatile int16* value) noexcept { return InterlockedIncrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_relaxed(volatile int16* value) noexcept { return InterlockedDecrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_relaxed(volatile int32* value) noexcept { return InterlockedIncrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_relaxed(volatile int32* value) noexcept { return InterlockedDecrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_relaxed(volatile int64* value) noexcept { return InterlockedIncrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_relaxed(volatile int64* value) noexcept { return InterlockedDecrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int16 atomic_increment_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_relaxed(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_relaxed(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_relaxed(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int8* value, int8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int8* value, int8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int16* value, int16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int16* value, int16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int32* value, int32 increment) noexcept { InterlockedAddNoFence((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int32* value, int32 decrement) noexcept { InterlockedAddNoFence((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int64* value, int64 increment) noexcept { InterlockedAddNoFence64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int64* value, int64 decrement) noexcept { InterlockedAddNoFence64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_relaxed(volatile f32* value, f32* expected, f32 desired) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeNoFence((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_relaxed(volatile f64* value, f64* expected, f64 desired) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_relaxed(volatile int32* value, int32* expected, int32 desired) noexcept { return (int32) InterlockedCompareExchangeNoFence((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_relaxed(volatile int64* value, int64* expected, int64 desired) noexcept { return (int64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddNoFence((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddNoFence((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddNoFence64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddNoFence64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_relaxed(volatile f32* value, f32* expected, f32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.l = InterlockedCompareExchangeNoFence((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_relaxed(volatile f64* value, f64* expected, f64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.l = InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_relaxed(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeNoFence((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_relaxed(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE int8 atomic_fetch_add_relaxed(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_relaxed(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_relaxed(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_relaxed(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_relaxed(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddNoFence((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_relaxed(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddNoFence((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_relaxed(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAddNoFence64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_relaxed(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_relaxed(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_relaxed(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_relaxed(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddNoFence((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_relaxed(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddNoFence((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_relaxed(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAddNoFence64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_relaxed(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint8* value, uint8 new_value) noexcept { InterlockedExchangeNoFence8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_relaxed(volatile uint8* value, uint8 new_value) noexcept { return (uint8) InterlockedExchangeNoFence8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { return (uint16) InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { return (uint32) InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { return (uint64) InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_relaxed(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeNoFence16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_relaxed(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeNoFence((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_relaxed(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeNoFence64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_get_relaxed(volatile uint8* value) noexcept { return (uint8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_relaxed(volatile uint16* value) noexcept { return (uint16) InterlockedCompareExchangeNoFence16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_relaxed(volatile uint32* value) noexcept { return (uint32) InterlockedCompareExchangeNoFence((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_relaxed(volatile uint64* value) noexcept { return (uint64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedCompareExchangeNoFence16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeNoFence((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint8 atomic_increment_relaxed(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE uint8 atomic_decrement_relaxed(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE uint16 atomic_increment_relaxed(volatile uint16* value) noexcept { return InterlockedIncrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_relaxed(volatile uint16* value) noexcept { return InterlockedDecrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_relaxed(volatile uint32* value) noexcept { return InterlockedIncrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_relaxed(volatile uint32* value) noexcept { return InterlockedDecrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_relaxed(volatile uint64* value) noexcept { return InterlockedIncrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_relaxed(volatile uint64* value) noexcept { return InterlockedDecrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint16 atomic_increment_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_relaxed(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementNoFence16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_relaxed(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementNoFence((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_relaxed(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementNoFence64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint8* value, uint8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint8* value, uint8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint16* value, uint16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint16* value, uint16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint32* value, uint32 increment) noexcept { InterlockedAddNoFence((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint32* value, uint32 decrement) noexcept { InterlockedAddNoFence((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint64* value, uint64 increment) noexcept { InterlockedAddNoFence64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint64* value, uint64 decrement) noexcept { InterlockedAddNoFence64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_relaxed(volatile uint32* value, uint32* expected, uint32 desired) noexcept { return (uint32) InterlockedCompareExchangeNoFence((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_relaxed(volatile uint64* value, uint64* expected, uint64 desired) noexcept { return (uint64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddNoFence((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddNoFence((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_relaxed(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddNoFence64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_relaxed(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddNoFence64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_relaxed(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeNoFence((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_relaxed(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeNoFence64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_relaxed(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_relaxed(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_relaxed(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_relaxed(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_relaxed(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddNoFence((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_relaxed(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddNoFence((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_relaxed(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAddNoFence64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_relaxed(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_relaxed(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_relaxed(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_relaxed(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddNoFence((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_relaxed(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddNoFence((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_relaxed(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAddNoFence64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_relaxed(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint8* value, uint8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int8* value, int8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint16* value, uint16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int16* value, int16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint32* value, uint32 mask) noexcept { InterlockedAndNoFence((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int32* value, int32 mask) noexcept { InterlockedAndNoFence((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint64* value, uint64 mask) noexcept { InterlockedAnd64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int64* value, int64 mask) noexcept { InterlockedAnd64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndNoFence((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndNoFence((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_relaxed(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint8* value, uint8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int8* value, int8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint16* value, uint16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int16* value, int16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint32* value, uint32 mask) noexcept { InterlockedOrNoFence((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int32* value, int32 mask) noexcept { InterlockedOrNoFence((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint64* value, uint64 mask) noexcept { InterlockedOr64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int64* value, int64 mask) noexcept { InterlockedOr64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrNoFence((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrNoFence((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64NoFence((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_relaxed(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64NoFence((volatile LONG64 *) value, mask); }
|
||||
|
||||
FORCE_INLINE void* atomic_get_acquire(void** target) noexcept { return InterlockedCompareExchangePointerAcquire(target, NULL, NULL); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_acquire(volatile int8* value, int8 new_value) noexcept { return (int8) InterlockedExchangeAcquire8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire(volatile int16* value, int16 new_value) noexcept { return (int16) InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire(volatile int32* value, int32 new_value) noexcept { return (int32) InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire(volatile int64* value, int64 new_value) noexcept { return (int64) InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int8 atomic_get_acquire(volatile int8* value) noexcept { return (int8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_acquire(volatile int16* value) noexcept { return (int16) InterlockedCompareExchangeAcquire16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_acquire(volatile int32* value) noexcept { return (int32) InterlockedCompareExchangeAcquire((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_acquire(volatile int64* value) noexcept { return (int64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedCompareExchangeAcquire16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeAcquire((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE f32 atomic_get_acquire(volatile f32* value) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeAcquire((volatile long *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_get_acquire(volatile f64* value) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE int8 atomic_increment_acquire(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE int8 atomic_decrement_acquire(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire(volatile int16* value) noexcept { return InterlockedIncrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire(volatile int16* value) noexcept { return InterlockedDecrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire(volatile int32* value) noexcept { return InterlockedIncrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire(volatile int32* value) noexcept { return InterlockedDecrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire(volatile int64* value) noexcept { return InterlockedIncrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire(volatile int64* value) noexcept { return InterlockedDecrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int8* value, int8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int8* value, int8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int16* value, int16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int16* value, int16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int32* value, int32 increment) noexcept { InterlockedAddAcquire((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int32* value, int32 decrement) noexcept { InterlockedAddAcquire((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int64* value, int64 increment) noexcept { InterlockedAddAcquire64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int64* value, int64 decrement) noexcept { InterlockedAddAcquire64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire(volatile f32* value, f32* expected, f32 desired) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeAcquire((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire(volatile f64* value, f64* expected, f64 desired) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire(volatile int32* value, int32* expected, int32 desired) noexcept { return (int32) InterlockedCompareExchangeAcquire((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire(volatile int64* value, int64* expected, int64 desired) noexcept { return (int64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddAcquire((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddAcquire((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddAcquire64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddAcquire64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire(volatile f32* value, f32* expected, f32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.l = InterlockedCompareExchangeAcquire((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire(volatile f64* value, f64* expected, f64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.l = InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeAcquire((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE int8 atomic_fetch_add_acquire(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_acquire(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddAcquire((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddAcquire((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAddAcquire64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddAcquire((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddAcquire((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAddAcquire64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint8* value, uint8 new_value) noexcept { InterlockedExchangeAcquire8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint16* value, uint16 new_value) noexcept { InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint32* value, uint32 new_value) noexcept { InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint64* value, uint64 new_value) noexcept { InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_acquire(volatile uint8* value, uint8 new_value) noexcept { return (uint8) InterlockedExchangeAcquire8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire(volatile uint16* value, uint16 new_value) noexcept { return (uint16) InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire(volatile uint32* value, uint32 new_value) noexcept { return (uint32) InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire(volatile uint64* value, uint64 new_value) noexcept { return (uint64) InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAcquire16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAcquire((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAcquire64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_get_acquire(volatile uint8* value) noexcept { return (uint8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire(volatile uint16* value) noexcept { return (uint16) InterlockedCompareExchangeAcquire16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire(volatile uint32* value) noexcept { return (uint32) InterlockedCompareExchangeAcquire((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire(volatile uint64* value) noexcept { return (uint64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedCompareExchangeAcquire16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeAcquire((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint8 atomic_increment_acquire(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE uint8 atomic_decrement_acquire(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire(volatile uint16* value) noexcept { return InterlockedIncrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire(volatile uint16* value) noexcept { return InterlockedDecrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire(volatile uint32* value) noexcept { return InterlockedIncrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire(volatile uint32* value) noexcept { return InterlockedDecrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire(volatile uint64* value) noexcept { return InterlockedIncrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire(volatile uint64* value) noexcept { return InterlockedDecrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementAcquire16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementAcquire((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementAcquire64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint8* value, uint8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint8* value, uint8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint16* value, uint16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint16* value, uint16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint32* value, uint32 increment) noexcept { InterlockedAddAcquire((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint32* value, uint32 decrement) noexcept { InterlockedAddAcquire((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint64* value, uint64 increment) noexcept { InterlockedAddAcquire64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint64* value, uint64 decrement) noexcept { InterlockedAddAcquire64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire(volatile uint32* value, uint32* expected, uint32 desired) noexcept { return (uint32) InterlockedCompareExchangeAcquire((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire(volatile uint64* value, uint64* expected, uint64 desired) noexcept { return (uint64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddAcquire((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddAcquire((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddAcquire64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddAcquire64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeAcquire((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeAcquire64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_acquire(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_acquire(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddAcquire((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddAcquire((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAddAcquire64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddAcquire((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddAcquire((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAddAcquire64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint8* value, uint8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int8* value, int8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint16* value, uint16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int16* value, int16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint32* value, uint32 mask) noexcept { InterlockedAndAcquire((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int32* value, int32 mask) noexcept { InterlockedAndAcquire((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint64* value, uint64 mask) noexcept { InterlockedAnd64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int64* value, int64 mask) noexcept { InterlockedAnd64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndAcquire((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndAcquire((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint8* value, uint8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int8* value, int8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint16* value, uint16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int16* value, int16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint32* value, uint32 mask) noexcept { InterlockedOrAcquire((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int32* value, int32 mask) noexcept { InterlockedOrAcquire((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint64* value, uint64 mask) noexcept { InterlockedOr64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int64* value, int64 mask) noexcept { InterlockedOr64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrAcquire((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrAcquire((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64Acquire((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64Acquire((volatile LONG64 *) value, mask); }
|
||||
|
||||
FORCE_INLINE void atomic_set_release(void** target, void* new_pointer) noexcept { InterlockedExchangePointer(target, new_pointer); }
|
||||
FORCE_INLINE void* atomic_get_release(void** target) noexcept { return InterlockedCompareExchangePointerRelease(target, NULL, NULL); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int8* value, int8 new_value) noexcept { InterlockedExchange8((volatile char *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int16* value, int16 new_value) noexcept { InterlockedExchange16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int32* value, int32 new_value) noexcept { InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int64* value, int64 new_value) noexcept { InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile f32* value, f32 new_value) noexcept { _atomic_32 temp = {.f = new_value}; InterlockedExchange((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_release(volatile f64* value, f64 new_value) noexcept { _atomic_64 temp = {.f = new_value}; InterlockedExchange64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchange16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile f32* value, f32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.f = new_value}; InterlockedExchange((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_release(volatile f64* value, f64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.f = new_value}; InterlockedExchange64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_release(volatile int8* value, int8 new_value) noexcept { return (int8) InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_release(volatile int16* value, int16 new_value) noexcept { return (int16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_release(volatile int32* value, int32 new_value) noexcept { return (int32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_release(volatile int64* value, int64 new_value) noexcept { return (int64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int8 atomic_get_release(volatile int8* value) noexcept { return (int8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_release(volatile int16* value) noexcept { return (int16) InterlockedCompareExchangeRelease16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_release(volatile int32* value) noexcept { return (int32) InterlockedCompareExchangeRelease((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_release(volatile int64* value) noexcept { return (int64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedCompareExchangeRelease16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeRelease((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE f32 atomic_get_release(volatile f32* value) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeRelease((volatile long *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_get_release(volatile f64* value) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeRelease64((volatile LONG64 *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE int8 atomic_increment_release(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE int8 atomic_decrement_release(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE int16 atomic_increment_release(volatile int16* value) noexcept { return InterlockedIncrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_release(volatile int16* value) noexcept { return InterlockedDecrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_release(volatile int32* value) noexcept { return InterlockedIncrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_release(volatile int32* value) noexcept { return InterlockedDecrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_release(volatile int64* value) noexcept { return InterlockedIncrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_release(volatile int64* value) noexcept { return InterlockedDecrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int16 atomic_increment_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int8* value, int8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int8* value, int8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int16* value, int16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int16* value, int16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int32* value, int32 increment) noexcept { InterlockedAddRelease((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int32* value, int32 decrement) noexcept { InterlockedAddRelease((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int64* value, int64 increment) noexcept { InterlockedAddRelease64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int64* value, int64 decrement) noexcept { InterlockedAddRelease64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_release(volatile f32* value, f32* expected, f32 desired) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchangeRelease((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_release(volatile f64* value, f64* expected, f64 desired) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_release(volatile int32* value, int32* expected, int32 desired) noexcept { return (int32) InterlockedCompareExchangeRelease((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_release(volatile int64* value, int64* expected, int64 desired) noexcept { return (int64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddRelease((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddRelease((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_release(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddRelease64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddRelease64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_release(volatile f32* value, f32* expected, f32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.l = InterlockedCompareExchangeRelease((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_release(volatile f64* value, f64* expected, f64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.l = InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_release(volatile int32* value, int32* expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchangeRelease((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_release(volatile int64* value, int64* expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE int8 atomic_fetch_add_release(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_release(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_release(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_release(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_release(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddRelease((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_release(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAddRelease((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_release(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAddRelease64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_release(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddRelease((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAddRelease((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAddRelease64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_and_release(volatile int64* value, uint64 mask) noexcept { return (int64) InterlockedAnd64Release((volatile LONG64 *) value, (LONG64) mask); }
|
||||
FORCE_INLINE int64 atomic_fetch_or_release(volatile int64* value, uint64 mask) noexcept { return (int64) InterlockedOr64Release((volatile LONG64 *) value, (LONG64) mask); }
|
||||
FORCE_INLINE uint64 atomic_fetch_and_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedAnd64Release((volatile LONG64 *) value, (LONG64) mask); }
|
||||
FORCE_INLINE uint64 atomic_fetch_or_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedOr64Release((volatile LONG64 *) value, (LONG64) mask); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint8* value, uint8 new_value) noexcept { InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint16* value, uint16 new_value) noexcept { InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint32* value, uint32 new_value) noexcept { InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint64* value, uint64 new_value) noexcept { InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_release(volatile uint8* value, uint8 new_value) noexcept { return (uint8) InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_release(volatile uint16* value, uint16 new_value) noexcept { return (uint16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_release(volatile uint32* value, uint32 new_value) noexcept { return (uint32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_release(volatile uint64* value, uint64 new_value) noexcept { return (uint64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_get_release(volatile uint8* value) noexcept { return (uint8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_release(volatile uint16* value) noexcept { return (uint16) InterlockedCompareExchangeRelease16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_release(volatile uint32* value) noexcept { return (uint32) InterlockedCompareExchangeRelease((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_release(volatile uint64* value) noexcept { return (uint64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedCompareExchangeRelease16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeRelease((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint8 atomic_increment_release(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE uint8 atomic_decrement_release(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE uint16 atomic_increment_release(volatile uint16* value) noexcept { return InterlockedIncrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_release(volatile uint16* value) noexcept { return InterlockedDecrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_release(volatile uint32* value) noexcept { return InterlockedIncrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_release(volatile uint32* value) noexcept { return InterlockedDecrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_release(volatile uint64* value) noexcept { return InterlockedIncrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_release(volatile uint64* value) noexcept { return InterlockedDecrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint16 atomic_increment_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrementRelease16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrementRelease((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrementRelease64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint8* value, uint8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint8* value, uint8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint16* value, uint16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint16* value, uint16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint32* value, uint32 increment) noexcept { InterlockedAddRelease((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint32* value, uint32 decrement) noexcept { InterlockedAddRelease((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint64* value, uint64 increment) noexcept { InterlockedAddRelease64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint64* value, uint64 decrement) noexcept { InterlockedAddRelease64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { return (uint32) InterlockedCompareExchangeRelease((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { return (uint64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddRelease((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAddRelease((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_release(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddRelease64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_release(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAddRelease64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchangeRelease((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchangeRelease64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_release(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_release(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_release(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_release(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_release(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddRelease((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_release(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAddRelease((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_release(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAddRelease64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_release(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddRelease((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAddRelease((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAddRelease64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint8* value, uint8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int8* value, int8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint16* value, uint16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int16* value, int16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint32* value, uint32 mask) noexcept { InterlockedAndRelease((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int32* value, int32 mask) noexcept { InterlockedAndRelease((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint64* value, uint64 mask) noexcept { InterlockedAnd64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int64* value, int64 mask) noexcept { InterlockedAnd64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndRelease((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAndRelease((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint8* value, uint8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int8* value, int8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint16* value, uint16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int16* value, int16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint32* value, uint32 mask) noexcept { InterlockedOrRelease((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int32* value, int32 mask) noexcept { InterlockedOrRelease((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint64* value, uint64 mask) noexcept { InterlockedOr64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int64* value, int64 mask) noexcept { InterlockedOr64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrRelease((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOrRelease((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64Release((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64Release((volatile LONG64 *) value, mask); }
|
||||
|
||||
FORCE_INLINE void atomic_set_acquire_release(void** target, void* new_pointer) noexcept { InterlockedExchangePointer(target, new_pointer); }
|
||||
FORCE_INLINE void* atomic_get_acquire_release(void** target) noexcept { return InterlockedCompareExchangePointer(target, NULL, NULL); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int8* value, int8 new_value) noexcept { InterlockedExchange8((volatile char *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int16* value, int16 new_value) noexcept { InterlockedExchange16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int32* value, int32 new_value) noexcept { InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int64* value, int64 new_value) noexcept { InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile f32* value, f32 new_value) noexcept { _atomic_32 temp = {.f = new_value}; InterlockedExchange((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile f64* value, f64 new_value) noexcept { _atomic_64 temp = {.f = new_value}; InterlockedExchange64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchange16((volatile short *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile f32* value, f32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.f = new_value}; InterlockedExchange((volatile long *) value, (long) temp.l); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile f64* value, f64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.f = new_value}; InterlockedExchange64((volatile LONG64 *) value, (LONG64) temp.l); }
|
||||
FORCE_INLINE int8 atomic_fetch_set_acquire_release(volatile int8* value, int8 new_value) noexcept { return (int8) InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire_release(volatile int16* value, int16 new_value) noexcept { return (int16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire_release(volatile int32* value, int32 new_value) noexcept { return (int32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire_release(volatile int64* value, int64 new_value) noexcept { return (int64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int16 atomic_fetch_set_acquire_release(volatile int16* value, int16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE int32 atomic_fetch_set_acquire_release(volatile int32* value, int32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE int64 atomic_fetch_set_acquire_release(volatile int64* value, int64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE int8 atomic_get_acquire_release(volatile int8* value) noexcept { return (int8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_acquire_release(volatile int16* value) noexcept { return (int16) InterlockedCompareExchange16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_acquire_release(volatile int32* value) noexcept { return (int32) InterlockedCompareExchange((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_acquire_release(volatile int64* value) noexcept { return (int64) InterlockedCompareExchange64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE int16 atomic_get_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedCompareExchange16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE int32 atomic_get_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchange((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE int64 atomic_get_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchange64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE f32 atomic_get_acquire_release(volatile f32* value) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchange((volatile long *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_get_acquire_release(volatile f64* value) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchange64((volatile LONG64 *) value, 0, 0)}; return temp.f; }
|
||||
FORCE_INLINE int8 atomic_increment_acquire_release(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE int8 atomic_decrement_acquire_release(volatile int8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire_release(volatile int16* value) noexcept { return InterlockedIncrement16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire_release(volatile int16* value) noexcept { return InterlockedDecrement16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire_release(volatile int32* value) noexcept { return InterlockedIncrement((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire_release(volatile int32* value) noexcept { return InterlockedDecrement((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire_release(volatile int64* value) noexcept { return InterlockedIncrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire_release(volatile int64* value) noexcept { return InterlockedDecrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int16 atomic_increment_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrement16((volatile short *) value); }
|
||||
FORCE_INLINE int16 atomic_decrement_acquire_release(volatile int16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrement16((volatile short *) value); }
|
||||
FORCE_INLINE int32 atomic_increment_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrement((volatile long *) value); }
|
||||
FORCE_INLINE int32 atomic_decrement_acquire_release(volatile int32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrement((volatile long *) value); }
|
||||
FORCE_INLINE int64 atomic_increment_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE int64 atomic_decrement_acquire_release(volatile int64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int8* value, int8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int8* value, int8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int16* value, int16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int16* value, int16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int32* value, int32 increment) noexcept { InterlockedAdd((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int32* value, int32 decrement) noexcept { InterlockedAdd((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int64* value, int64 increment) noexcept { InterlockedAdd64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int64* value, int64 decrement) noexcept { InterlockedAdd64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire_release(volatile f32* value, f32* expected, f32 desired) noexcept { _atomic_32 temp = {.l = InterlockedCompareExchange((volatile long *) value, (long) desired, (long) *expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire_release(volatile f64* value, f64* expected, f64 desired) noexcept { _atomic_64 temp = {.l = InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire_release(volatile int32* value, int32* expected, int32 desired) noexcept { return (int32) InterlockedCompareExchange((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire_release(volatile int64* value, int64* expected, int64 desired) noexcept { return (int64) InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int16* value, int16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int16* value, int16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int32* value, int32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAdd((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int32* value, int32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAdd((volatile long *) value, -decrement); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile int64* value, int64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAdd64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile int64* value, int64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAdd64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE f32 atomic_compare_exchange_strong_acquire_release(volatile f32* value, f32 expected, f32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); _atomic_32 temp = {.l = InterlockedCompareExchange((volatile long *) value, (long) desired, (long) expected) }; return temp.f; }
|
||||
FORCE_INLINE f64 atomic_compare_exchange_strong_acquire_release(volatile f64* value, f64 expected, f64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); _atomic_64 temp = {.l = InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) expected) }; return temp.f; }
|
||||
FORCE_INLINE int32 atomic_compare_exchange_strong_acquire_release(volatile int32* value, int32 expected, int32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedCompareExchange((volatile long *) value, desired, expected); }
|
||||
FORCE_INLINE int64 atomic_compare_exchange_strong_acquire_release(volatile int64* value, int64 expected, int64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) expected); }
|
||||
FORCE_INLINE int8 atomic_fetch_add_acquire_release(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE int8 atomic_fetch_sub_acquire_release(volatile int8* value, int8 operand) noexcept { return (int8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire_release(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire_release(volatile int16* value, int16 operand) noexcept { return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire_release(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAdd((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire_release(volatile int32* value, int32 operand) noexcept { return (int32) InterlockedExchangeAdd((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire_release(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire_release(volatile int64* value, int64 operand) noexcept { return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE int16 atomic_fetch_add_acquire_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE int16 atomic_fetch_sub_acquire_release(volatile int16* value, int16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (int16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE int32 atomic_fetch_add_acquire_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAdd((volatile long *) value, operand); }
|
||||
FORCE_INLINE int32 atomic_fetch_sub_acquire_release(volatile int32* value, int32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (int32) InterlockedExchangeAdd((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE int64 atomic_fetch_add_acquire_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE int64 atomic_fetch_sub_acquire_release(volatile int64* value, int64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (int64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint8* value, uint8 new_value) noexcept { InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE void atomic_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_fetch_set_acquire_release(volatile uint8* value, uint8 new_value) noexcept { return (uint8) InterlockedExchange8((volatile char *) value, (char) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { return (uint16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { return (uint32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { return (uint64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint16 atomic_fetch_set_acquire_release(volatile uint16* value, uint16 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchange16((volatile short *) value, (short) new_value); }
|
||||
FORCE_INLINE uint32 atomic_fetch_set_acquire_release(volatile uint32* value, uint32 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchange((volatile long *) value, new_value); }
|
||||
FORCE_INLINE uint64 atomic_fetch_set_acquire_release(volatile uint64* value, uint64 new_value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchange64((volatile LONG64 *) value, (LONG64) new_value); }
|
||||
FORCE_INLINE uint8 atomic_get_acquire_release(volatile uint8* value) noexcept { return (uint8) _InterlockedCompareExchange8((volatile char *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire_release(volatile uint16* value) noexcept { return (uint16) InterlockedCompareExchange16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire_release(volatile uint32* value) noexcept { return (uint32) InterlockedCompareExchange((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire_release(volatile uint64* value) noexcept { return (uint64) InterlockedCompareExchange64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint16 atomic_get_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedCompareExchange16((volatile short *) value, 0, 0); }
|
||||
FORCE_INLINE uint32 atomic_get_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchange((volatile long *) value, 0, 0); }
|
||||
FORCE_INLINE uint64 atomic_get_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchange64((volatile LONG64 *) value, 0, 0); }
|
||||
FORCE_INLINE uint8 atomic_increment_acquire_release(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, 1); }
|
||||
FORCE_INLINE uint8 atomic_decrement_acquire_release(volatile uint8* value) noexcept { return InterlockedExchangeAdd8((volatile char *) value, -1); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire_release(volatile uint16* value) noexcept { return InterlockedIncrement16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire_release(volatile uint16* value) noexcept { return InterlockedDecrement16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire_release(volatile uint32* value) noexcept { return InterlockedIncrement((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire_release(volatile uint32* value) noexcept { return InterlockedDecrement((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire_release(volatile uint64* value) noexcept { return InterlockedIncrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire_release(volatile uint64* value) noexcept { return InterlockedDecrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint16 atomic_increment_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedIncrement16((volatile short *) value); }
|
||||
FORCE_INLINE uint16 atomic_decrement_acquire_release(volatile uint16* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return InterlockedDecrement16((volatile short *) value); }
|
||||
FORCE_INLINE uint32 atomic_increment_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedIncrement((volatile long *) value); }
|
||||
FORCE_INLINE uint32 atomic_decrement_acquire_release(volatile uint32* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return InterlockedDecrement((volatile long *) value); }
|
||||
FORCE_INLINE uint64 atomic_increment_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedIncrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE uint64 atomic_decrement_acquire_release(volatile uint64* value) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return InterlockedDecrement64((volatile LONG64 *) value); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint8* value, uint8 increment) noexcept { InterlockedExchangeAdd8((volatile char *) value, (char) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint8* value, uint8 decrement) noexcept { InterlockedExchangeAdd8((volatile char *) value, -((char) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint16* value, uint16 increment) noexcept { InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint16* value, uint16 decrement) noexcept { InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint32* value, uint32 increment) noexcept { InterlockedAdd((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint32* value, uint32 decrement) noexcept { InterlockedAdd((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint64* value, uint64 increment) noexcept { InterlockedAdd64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint64* value, uint64 decrement) noexcept { InterlockedAdd64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire_release(volatile uint32* value, uint32* expected, uint32 desired) noexcept { return (uint32) InterlockedCompareExchange((volatile long *) value, desired, *expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire_release(volatile uint64* value, uint64* expected, uint64 desired) noexcept { return (uint64) InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) *expected); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint16* value, uint16 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, (short) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint16* value, uint16 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedExchangeAdd16((volatile short *) value, -((short) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint32* value, uint32 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAdd((volatile long *) value, increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint32* value, uint32 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAdd((volatile long *) value, -1 * ((int32) decrement)); }
|
||||
FORCE_INLINE void atomic_add_acquire_release(volatile uint64* value, uint64 increment) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAdd64((volatile LONG64 *) value, (LONG64) increment); }
|
||||
FORCE_INLINE void atomic_sub_acquire_release(volatile uint64* value, uint64 decrement) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAdd64((volatile LONG64 *) value, -((LONG64) decrement)); }
|
||||
FORCE_INLINE uint32 atomic_compare_exchange_strong_acquire_release(volatile uint32* value, uint32 expected, uint32 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedCompareExchange((volatile long *) value, desired, expected); }
|
||||
FORCE_INLINE uint64 atomic_compare_exchange_strong_acquire_release(volatile uint64* value, uint64 expected, uint64 desired) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedCompareExchange64((volatile LONG64 *) value, (LONG64) desired, (LONG64) expected); }
|
||||
FORCE_INLINE uint8 atomic_fetch_add_acquire_release(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, (char) operand); }
|
||||
FORCE_INLINE uint8 atomic_fetch_sub_acquire_release(volatile uint8* value, uint8 operand) noexcept { return (uint8) InterlockedExchangeAdd8((volatile char *) value, -((char) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire_release(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire_release(volatile uint16* value, uint16 operand) noexcept { return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire_release(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAdd((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire_release(volatile uint32* value, uint32 operand) noexcept { return (uint32) InterlockedExchangeAdd((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire_release(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire_release(volatile uint64* value, uint64 operand) noexcept { return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE uint16 atomic_fetch_add_acquire_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, (short) operand); }
|
||||
FORCE_INLINE uint16 atomic_fetch_sub_acquire_release(volatile uint16* value, uint16 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); return (uint16) InterlockedExchangeAdd16((volatile short *) value, -((short) operand)); }
|
||||
FORCE_INLINE uint32 atomic_fetch_add_acquire_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAdd((volatile long *) value, operand); }
|
||||
FORCE_INLINE uint32 atomic_fetch_sub_acquire_release(volatile uint32* value, uint32 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); return (uint32) InterlockedExchangeAdd((volatile unsigned long *) value, -((long) operand)); }
|
||||
FORCE_INLINE uint64 atomic_fetch_add_acquire_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, (LONG64) operand); }
|
||||
FORCE_INLINE uint64 atomic_fetch_sub_acquire_release(volatile uint64* value, uint64 operand) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); return (uint64) InterlockedExchangeAdd64((volatile LONG64 *) value, -((LONG64) operand)); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint8* value, uint8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int8* value, int8 mask) noexcept { InterlockedAnd8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint16* value, uint16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int16* value, int16 mask) noexcept { InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint32* value, uint32 mask) noexcept { InterlockedAnd((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int32* value, int32 mask) noexcept { InterlockedAnd((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint64* value, uint64 mask) noexcept { InterlockedAnd64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int64* value, int64 mask) noexcept { InterlockedAnd64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedAnd16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAnd((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedAnd((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_and_acquire_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedAnd64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint8* value, uint8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int8* value, int8 mask) noexcept { InterlockedOr8((volatile char *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint16* value, uint16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int16* value, int16 mask) noexcept { InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint32* value, uint32 mask) noexcept { InterlockedOr((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int32* value, int32 mask) noexcept { InterlockedOr((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint64* value, uint64 mask) noexcept { InterlockedOr64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int64* value, int64 mask) noexcept { InterlockedOr64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint16* value, uint16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int16* value, int16 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 2) == 0); InterlockedOr16((volatile short *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint32* value, uint32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOr((volatile LONG *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int32* value, int32 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 4) == 0); InterlockedOr((volatile LONG *) value, (LONG)mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile uint64* value, uint64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64((volatile LONG64 *) value, mask); }
|
||||
FORCE_INLINE void atomic_or_acquire_release(volatile int64* value, int64 mask) noexcept { ASSERT_STRICT(((uintptr_t) value % 8) == 0); InterlockedOr64((volatile LONG64 *) value, mask); }
|
||||
|
||||
// Check out the intrinsic functions fence_memory and fence_write
|
||||
// These are much faster and could accomplish what you are doing
|
||||
|
|
|
|||
99
serialize/WebBinary.h
Normal file
99
serialize/WebBinary.h
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_SERIALICE_WEB_BINARY
|
||||
#define COMS_SERIALICE_WEB_BINARY
|
||||
|
||||
#include <string.h>
|
||||
#include "../stdlib/Types.h"
|
||||
#include "../utils/StringUtils.h"
|
||||
#include "../compiler/TypeName.h"
|
||||
|
||||
struct WebBinaryValue {
|
||||
const char* name;
|
||||
const char* type;
|
||||
const char* nested_schema = NULL;
|
||||
};
|
||||
|
||||
constexpr
|
||||
void web_binary_copy(char* dest, int32 src) {
|
||||
for (size_t i = 0; i < sizeof(int32); ++i) {
|
||||
dest[i] = static_cast<char>(src >> (8 * i));
|
||||
}
|
||||
}
|
||||
|
||||
template<const WebBinaryValue* binary_struct, int32 count>
|
||||
constexpr int32 web_binary_schema_size() {
|
||||
int32 size = 0;
|
||||
for (int32 i = 0; i < count; ++i) {
|
||||
size += str_length_constexpr(binary_struct[i].name) + 1;
|
||||
size += str_length_constexpr(binary_struct[i].type) + 1;
|
||||
|
||||
// Add size for nested schema if present
|
||||
if (binary_struct[i].nested_schema) {
|
||||
size += str_length_constexpr(binary_struct[i].nested_schema) + 1;
|
||||
} else {
|
||||
size += 1; // Empty string for no nested schema
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template<size_t N>
|
||||
struct WebBinarySchema {
|
||||
char data[N];
|
||||
constexpr WebBinarySchema() : data{} {}
|
||||
constexpr const char* c_str() const { return data; }
|
||||
};
|
||||
|
||||
template<const WebBinaryValue* binary_struct, int32 count>
|
||||
constexpr auto web_binary_schema() {
|
||||
constexpr int32 size = web_binary_schema_size<binary_struct, count>();
|
||||
WebBinarySchema<size> schema;
|
||||
|
||||
char* buffer = schema.data;
|
||||
for (int32 i = 0; i < count; ++i) {
|
||||
str_copy_short(buffer, binary_struct[i].name);
|
||||
buffer += str_length_constexpr(binary_struct[i].name) + 1;
|
||||
|
||||
str_copy_short(buffer, binary_struct[i].type);
|
||||
buffer += str_length_constexpr(binary_struct[i].type) + 1;
|
||||
|
||||
// Write nested schema if present
|
||||
if (binary_struct[i].nested_schema) {
|
||||
str_copy_short(buffer, binary_struct[i].nested_schema);
|
||||
buffer += str_length_constexpr(binary_struct[i].nested_schema) + 1;
|
||||
} else {
|
||||
*buffer++ = '\0'; // Empty string
|
||||
}
|
||||
|
||||
web_binary_copy(buffer, binary_struct[i].offset);
|
||||
buffer += sizeof(int32);
|
||||
|
||||
web_binary_copy(buffer, binary_struct[i].length);
|
||||
buffer += sizeof(int32);
|
||||
}
|
||||
|
||||
return schema;
|
||||
}
|
||||
|
||||
#define WEB_BINARY_FIELD(StructType, Field) \
|
||||
{ \
|
||||
#Field, \
|
||||
GetTypeName<decltype(StructType::Field)>() \
|
||||
}
|
||||
|
||||
#define WEB_BINARY_FIELD_WITH_SCHEMA(StructType, Field, Schema) \
|
||||
{ \
|
||||
#Field, \
|
||||
GetTypeName<decltype(StructType::Field)>(), \
|
||||
Schema.c_str() \
|
||||
}
|
||||
|
||||
#endif
|
||||
17
session/Session.h
Normal file
17
session/Session.h
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
/**
|
||||
* Jingga
|
||||
*
|
||||
* @copyright Jingga
|
||||
* @license OMS License 2.0
|
||||
* @version 1.0.0
|
||||
* @link https://jingga.app
|
||||
*/
|
||||
#ifndef COMS_SESSION_H
|
||||
#define COMS_SESSION_H
|
||||
|
||||
#include "../stdlib/Types.h"
|
||||
|
||||
struct Session {
|
||||
};
|
||||
|
||||
#endif
|
||||
115
stdlib/HashMap.h
115
stdlib/HashMap.h
|
|
@ -32,6 +32,16 @@ struct HashEntryInt32 {
|
|||
int32 value;
|
||||
};
|
||||
|
||||
// This struct is often used for hash maps that are implemented with dynamic length content
|
||||
// value = stores the offset into the buffer array
|
||||
// value2 = stores the length of the data
|
||||
struct HashEntryInt32Int32 {
|
||||
char key[HASH_MAP_MAX_KEY_LENGTH];
|
||||
uint16 next;
|
||||
int32 value;
|
||||
int32 value2;
|
||||
};
|
||||
|
||||
struct HashEntryInt64 {
|
||||
char key[HASH_MAP_MAX_KEY_LENGTH];
|
||||
uint16 next;
|
||||
|
|
@ -115,13 +125,23 @@ struct HashEntryKeyInt32 {
|
|||
|
||||
// HashMaps are limited to 4GB in total size
|
||||
struct HashMap {
|
||||
// Contains the chunk memory index for a provided key/hash
|
||||
// Values are 1-indexed/offset since 0 means not used/found
|
||||
uint16* table;
|
||||
|
||||
// Contains the actual data of the hash map
|
||||
// Careful, some hash map implementations don't store the value in here but an offset for use in another array
|
||||
// @question We might want to align the ChunkMemory memory to 8byte, currently it's either 4 or 8 byte depending on the length
|
||||
ChunkMemory buf;
|
||||
};
|
||||
|
||||
// The ref hash map is used if the value size is dynamic per element (e.g. files, cache data etc.)
|
||||
struct HashMapRef {
|
||||
HashMap hm;
|
||||
|
||||
ChunkMemory data;
|
||||
};
|
||||
|
||||
// @todo Change so the hashmap can grow or maybe even better create a static and dynamic version
|
||||
inline
|
||||
void hashmap_alloc(HashMap* hm, int32 count, int32 element_size, int32 alignment = 64)
|
||||
|
|
@ -136,6 +156,22 @@ void hashmap_alloc(HashMap* hm, int32 count, int32 element_size, int32 alignment
|
|||
chunk_init(&hm->buf, data + sizeof(uint16) * count, count, element_size, 8);
|
||||
}
|
||||
|
||||
inline
|
||||
void hashmap_alloc(HashMapRef* hmr, int32 count, int32 data_element_size, int32 alignment = 64)
|
||||
{
|
||||
int32 element_size = sizeof(HashEntryInt32Int32);
|
||||
LOG_1("Allocate HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
|
||||
byte* data = (byte *) platform_alloc(
|
||||
count * (sizeof(uint16) + element_size)
|
||||
+ CEIL_DIV(count, alignment) * sizeof(hmr->hm.buf.free)
|
||||
+ count * data_element_size
|
||||
);
|
||||
|
||||
hmr->hm.table = (uint16 *) data;
|
||||
chunk_init(&hmr->hm.buf, data + sizeof(uint16) * count, count, element_size, 8);
|
||||
chunk_init(&hmr->data, data + hmr->hm.buf.size, count, data_element_size);
|
||||
}
|
||||
|
||||
inline
|
||||
void hashmap_free(HashMap* hm)
|
||||
{
|
||||
|
|
@ -258,6 +294,67 @@ void hashmap_insert(HashMap* hm, const char* key, int64 value) noexcept {
|
|||
*target = (uint16) (element + 1);
|
||||
}
|
||||
|
||||
void hashmap_insert(HashMap* hm, const char* key, int32 value1, int32 value2) noexcept {
|
||||
uint64 index = hash_djb2(key) % hm->buf.count;
|
||||
|
||||
int32 element = chunk_reserve(&hm->buf, 1);
|
||||
HashEntryInt32Int32* entry = (HashEntryInt32Int32 *) chunk_get_element(&hm->buf, element, true);
|
||||
|
||||
// Ensure key length
|
||||
str_move_to_pos(&key, -HASH_MAP_MAX_KEY_LENGTH);
|
||||
str_copy_short(entry->key, key, HASH_MAP_MAX_KEY_LENGTH);
|
||||
entry->key[HASH_MAP_MAX_KEY_LENGTH - 1] = '\0';
|
||||
|
||||
entry->value = value1;
|
||||
entry->value2 = value2;
|
||||
entry->next = 0;
|
||||
|
||||
uint16* target = &hm->table[index];
|
||||
while (*target) {
|
||||
HashEntryInt32Int32* tmp = (HashEntryInt32Int32*) chunk_get_element(&hm->buf, *target - 1, false);
|
||||
target = &tmp->next;
|
||||
}
|
||||
*target = (uint16) (element + 1);
|
||||
}
|
||||
|
||||
void hashmap_insert(HashMapRef* hmr, const char* key, byte* data, int32 data_size) noexcept {
|
||||
// Data chunk
|
||||
int32 chunk_count = (int32) ((data_size + hmr->data.chunk_size - 1) / hmr->data.chunk_size);
|
||||
int32 chunk_offset = chunk_reserve(&hmr->data, chunk_count);
|
||||
|
||||
if (chunk_offset < 0) {
|
||||
ASSERT_SIMPLE(chunk_offset >= 0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert Data
|
||||
// NOTE: The data and the hash map entry are in two separate memory areas
|
||||
byte* data_mem = chunk_get_element(&hmr->data, chunk_offset);
|
||||
memcpy(data_mem, data, data_size);
|
||||
|
||||
// Handle hash map entry
|
||||
uint64 index = hash_djb2(key) % hmr->hm.buf.count;
|
||||
|
||||
int32 element = chunk_reserve(&hmr->hm.buf, 1);
|
||||
HashEntryInt32Int32* entry = (HashEntryInt32Int32 *) chunk_get_element(&hmr->hm.buf, element, true);
|
||||
|
||||
// Ensure key length
|
||||
str_move_to_pos(&key, -HASH_MAP_MAX_KEY_LENGTH);
|
||||
str_copy_short(entry->key, key, HASH_MAP_MAX_KEY_LENGTH);
|
||||
entry->key[HASH_MAP_MAX_KEY_LENGTH - 1] = '\0';
|
||||
|
||||
entry->value = chunk_offset;
|
||||
entry->value2 = chunk_count;
|
||||
entry->next = 0;
|
||||
|
||||
uint16* target = &hmr->hm.table[index];
|
||||
while (*target) {
|
||||
HashEntryInt32Int32* tmp = (HashEntryInt32Int32*) chunk_get_element(&hmr->hm.buf, *target - 1, false);
|
||||
target = &tmp->next;
|
||||
}
|
||||
*target = (uint16) (element + 1);
|
||||
}
|
||||
|
||||
void hashmap_insert(HashMap* hm, const char* key, uintptr_t value) noexcept {
|
||||
uint64 index = hash_djb2(key) % hm->buf.count;
|
||||
|
||||
|
|
@ -464,6 +561,24 @@ HashEntry* hashmap_get_entry(HashMap* hm, const char* key) noexcept {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
byte* hashmap_get_value(HashMapRef* hmr, const char* key) noexcept {
|
||||
uint64 index = hash_djb2(key) % hmr->hm.buf.count;
|
||||
HashEntryInt32Int32* entry = (HashEntryInt32Int32 *) chunk_get_element(&hmr->hm.buf, hmr->hm.table[index] - 1, false);
|
||||
|
||||
str_move_to_pos(&key, -HASH_MAP_MAX_KEY_LENGTH);
|
||||
|
||||
while (entry != NULL) {
|
||||
if (str_compare(entry->key, key) == 0) {
|
||||
DEBUG_MEMORY_READ((uintptr_t) entry, sizeof(HashEntryInt32Int32));
|
||||
return chunk_get_element(&hmr->data, entry->value);
|
||||
}
|
||||
|
||||
entry = (HashEntryInt32Int32 *) chunk_get_element(&hmr->hm.buf, entry->next - 1, false);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint32 hashmap_get_element(const HashMap* hm, const char* key) noexcept {
|
||||
uint64 index = hash_djb2(key) % hm->buf.count;
|
||||
const HashEntry* entry = (const HashEntry *) chunk_get_element((ChunkMemory *) &hm->buf, hm->table[index] - 1, false);
|
||||
|
|
|
|||
|
|
@ -38,6 +38,12 @@ struct PerfectHashEntryInt64 {
|
|||
int64 value;
|
||||
};
|
||||
|
||||
struct PerfectHashEntryInt32Int32 {
|
||||
char key[PERFECT_HASH_MAP_MAX_KEY_LENGTH];
|
||||
int32 value;
|
||||
int32 value2;
|
||||
};
|
||||
|
||||
struct PerfectHashEntryUIntPtr {
|
||||
char key[PERFECT_HASH_MAP_MAX_KEY_LENGTH];
|
||||
uintptr_t value;
|
||||
|
|
@ -63,6 +69,7 @@ struct PerfectHashEntry {
|
|||
byte* value;
|
||||
};
|
||||
|
||||
// Currently we assume that a perfect hash map doesn't change after initialization (incl. inserting all elements)
|
||||
struct PerfectHashMap {
|
||||
int32 hash_seed;
|
||||
PerfectHashFunction hash_function;
|
||||
|
|
@ -72,6 +79,15 @@ struct PerfectHashMap {
|
|||
byte* hash_entries;
|
||||
};
|
||||
|
||||
// The ref hash map is used if the value size is dynamic per element (e.g. files, cache data etc.)
|
||||
struct PerfectHashMapRef {
|
||||
PerfectHashMap hm;
|
||||
|
||||
int32 data_pos;
|
||||
int32 data_size;
|
||||
byte* data;
|
||||
};
|
||||
|
||||
PerfectHashMap* perfect_hashmap_prepare(PerfectHashMap* hm, const char** keys, int32 key_count, int32 seed_tries, RingMemory* ring)
|
||||
{
|
||||
int32* indices = (int32 *) ring_get_memory(ring, hm->map_count * sizeof(int32), 4);
|
||||
|
|
@ -155,6 +171,29 @@ PerfectHashMap* perfect_hashmap_prepare(PerfectHashMap* hm, const char* keys, in
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void perfect_hashmap_alloc(PerfectHashMap* hm, int32 count, int32 element_size)
|
||||
{
|
||||
LOG_1("Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
|
||||
hm->map_count = count;
|
||||
hm->entry_size = element_size;
|
||||
hm->hash_entries = (byte *) platform_alloc(count * element_size);
|
||||
}
|
||||
|
||||
void perfect_hashmap_alloc(PerfectHashMapRef* hmr, int32 count, int32 total_data_size)
|
||||
{
|
||||
hmr->hm.entry_size = sizeof(PerfectHashEntryInt32Int32);
|
||||
LOG_1("Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &hmr->hm.entry_size}});
|
||||
hmr->hm.map_count = count;
|
||||
hmr->hm.hash_entries = (byte *) platform_alloc(
|
||||
count * hmr->hm.entry_size
|
||||
+ total_data_size
|
||||
);
|
||||
|
||||
hmr->data_pos = 0;
|
||||
hmr->data_size = total_data_size;
|
||||
hmr->data = hmr->hm.hash_entries + count * hmr->hm.entry_size;
|
||||
}
|
||||
|
||||
// WARNING: element_size = element size + remaining HashEntry data size
|
||||
void perfect_hashmap_create(PerfectHashMap* hm, int32 count, int32 element_size, BufferMemory* buf)
|
||||
{
|
||||
|
|
@ -192,7 +231,7 @@ int64 perfect_hashmap_size(const PerfectHashMap* hm)
|
|||
|
||||
// @bug the insert functions don't handle too long keys like the HashMap does
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int32 value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int32 value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryInt32* entry = (PerfectHashEntryInt32 *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -205,7 +244,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int32 value) {
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int64 value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int64 value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryInt64* entry = (PerfectHashEntryInt64 *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -218,7 +257,46 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int64 value) {
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, uintptr_t value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, int32 value1, int32 value2) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryInt32Int32* entry = (PerfectHashEntryInt32Int32 *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
// Ensure key length
|
||||
str_move_to_pos(&key, -PERFECT_HASH_MAP_MAX_KEY_LENGTH);
|
||||
str_copy_short(entry->key, key, PERFECT_HASH_MAP_MAX_KEY_LENGTH);
|
||||
entry->key[PERFECT_HASH_MAP_MAX_KEY_LENGTH - 1] = '\0';
|
||||
|
||||
entry->value = value1;
|
||||
entry->value2 = value2;
|
||||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMapRef* hmr, const char* key, byte* data, int32 data_size) noexcept {
|
||||
if (hmr->data_pos + data_size > hmr->data_size) {
|
||||
ASSERT_SIMPLE(hmr->data_pos + data_size <= hmr->data_size);
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert data
|
||||
// NOTE: The data and the hash map entry are in two separate memory areas
|
||||
memcpy(hmr->data + hmr->data_pos, data, data_size);
|
||||
|
||||
// Handle hash map entry
|
||||
int32 index = hmr->hm.hash_function(key, hmr->hm.hash_seed) % hmr->hm.map_count;
|
||||
PerfectHashEntryInt32Int32* entry = (PerfectHashEntryInt32Int32 *) (hmr->hm.hash_entries + hmr->hm.entry_size * index);
|
||||
|
||||
// Ensure key length
|
||||
str_move_to_pos(&key, -PERFECT_HASH_MAP_MAX_KEY_LENGTH);
|
||||
str_copy_short(entry->key, key, PERFECT_HASH_MAP_MAX_KEY_LENGTH);
|
||||
entry->key[PERFECT_HASH_MAP_MAX_KEY_LENGTH - 1] = '\0';
|
||||
|
||||
entry->value = hmr->data_pos;
|
||||
entry->value2 = data_size;
|
||||
hmr->data_pos += data_size;
|
||||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, uintptr_t value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryUIntPtr* entry = (PerfectHashEntryUIntPtr *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -231,7 +309,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, uintptr_t value
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, void* value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, void* value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryVoidP* entry = (PerfectHashEntryVoidP *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -244,7 +322,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, void* value) {
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, f32 value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, f32 value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryFloat* entry = (PerfectHashEntryFloat *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -257,7 +335,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, f32 value) {
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const char* value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const char* value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryStr* entry = (PerfectHashEntryStr *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -270,7 +348,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const char* val
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const byte* value) {
|
||||
void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const byte* value) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntryStr* entry = (PerfectHashEntryStr *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -283,7 +361,7 @@ void perfect_hashmap_insert(PerfectHashMap* hm, const char* key, const byte* val
|
|||
}
|
||||
|
||||
inline
|
||||
PerfectHashEntry* perfect_hashmap_get_entry(const PerfectHashMap* hm, const char* key) {
|
||||
PerfectHashEntry* perfect_hashmap_get_entry(const PerfectHashMap* hm, const char* key) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntry* entry = (PerfectHashEntry *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
@ -293,7 +371,17 @@ PerfectHashEntry* perfect_hashmap_get_entry(const PerfectHashMap* hm, const char
|
|||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_delete_entry(PerfectHashMap* hm, const char* key) {
|
||||
byte* perfect_hashmap_get_value(const PerfectHashMapRef* hmr, const char* key) noexcept {
|
||||
int32 index = hmr->hm.hash_function(key, hmr->hm.hash_seed) % hmr->hm.map_count;
|
||||
PerfectHashEntryInt32Int32* entry = (PerfectHashEntryInt32Int32 *) (hmr->hm.hash_entries + hmr->hm.entry_size * index);
|
||||
|
||||
str_move_to_pos(&key, -HASH_MAP_MAX_KEY_LENGTH);
|
||||
|
||||
return str_compare(entry->key, key) == 0 ? hmr->data + entry->value : NULL;
|
||||
}
|
||||
|
||||
inline
|
||||
void perfect_hashmap_delete_entry(PerfectHashMap* hm, const char* key) noexcept {
|
||||
int32 index = hm->hash_function(key, hm->hash_seed) % hm->map_count;
|
||||
PerfectHashEntry* entry = (PerfectHashEntry *) (hm->hash_entries + hm->entry_size * index);
|
||||
|
||||
|
|
|
|||
|
|
@ -124,10 +124,6 @@ DEFINE_BITCAST_FUNCTION(uint64, f64)
|
|||
#define MHZ 1000000
|
||||
#define GHZ 1000000000
|
||||
|
||||
#define internal static // only allows local "file" access
|
||||
#define local_persist static
|
||||
#define global_persist static
|
||||
|
||||
struct v3_byte {
|
||||
union {
|
||||
struct {
|
||||
|
|
|
|||
|
|
@ -21,9 +21,6 @@ typedef void (*ThreadPoolJobFunc)(void*);
|
|||
struct PoolWorker {
|
||||
alignas(4) atomic_32 int32 id;
|
||||
alignas(4) atomic_32 int32 state;
|
||||
void* arg;
|
||||
void* result;
|
||||
RingMemory ring;
|
||||
ThreadPoolJobFunc func;
|
||||
ThreadPoolJobFunc callback;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -29,12 +29,14 @@ struct ThreadPool {
|
|||
alignas(4) atomic_32 int32 thread_cnt;
|
||||
|
||||
int32 size;
|
||||
int32 element_size;
|
||||
alignas(4) atomic_32 int32 state;
|
||||
|
||||
alignas(4) atomic_32 int32 id_counter;
|
||||
};
|
||||
|
||||
static THREAD_RETURN thread_pool_worker(void* arg)
|
||||
// @performance Can we optimize this? This is a critical function
|
||||
static
|
||||
THREAD_RETURN thread_pool_worker(void* arg)
|
||||
{
|
||||
ThreadPool* pool = (ThreadPool *) arg;
|
||||
PoolWorker* work;
|
||||
|
|
@ -51,6 +53,9 @@ static THREAD_RETURN thread_pool_worker(void* arg)
|
|||
break;
|
||||
}
|
||||
|
||||
// We define a queue element as free based on it's id
|
||||
// So even if we "keep" it in the queue the pool will not overwrite it as long as the id > 0 (see pool_add)
|
||||
// This is only a ThreadPool specific queue behavior to avoid additional copies
|
||||
work = (PoolWorker *) queue_dequeue_keep(&pool->work_queue);
|
||||
mutex_unlock(&pool->work_mutex);
|
||||
|
||||
|
|
@ -63,11 +68,12 @@ static THREAD_RETURN thread_pool_worker(void* arg)
|
|||
LOG_2("ThreadPool worker started");
|
||||
work->func(work);
|
||||
LOG_2("ThreadPool worker ended");
|
||||
// At the end of a thread the ring memory automatically is considered freed
|
||||
DEBUG_MEMORY_FREE((uintptr_t) work->ring.memory);
|
||||
LOG_2("Freed thread RingMemory: %n B", {{LOG_DATA_UINT64, &work->ring.size}});
|
||||
atomic_set_release(&work->state, 1);
|
||||
|
||||
if (work->callback) {
|
||||
work->callback(work);
|
||||
}
|
||||
|
||||
// Job gets marked after completion -> can be overwritten now
|
||||
if (atomic_get_relaxed(&work->id) == -1) {
|
||||
atomic_set_release(&work->id, 0);
|
||||
|
|
@ -86,8 +92,13 @@ static THREAD_RETURN thread_pool_worker(void* arg)
|
|||
return (THREAD_RETURN) NULL;
|
||||
}
|
||||
|
||||
void thread_pool_alloc(ThreadPool* pool, int32 thread_count, int32 worker_count, int32 alignment = 64)
|
||||
{
|
||||
void thread_pool_alloc(
|
||||
ThreadPool* pool,
|
||||
int32 element_size,
|
||||
int32 thread_count,
|
||||
int32 worker_count,
|
||||
int32 alignment = 64
|
||||
) {
|
||||
PROFILE(PROFILE_THREAD_POOL_ALLOC);
|
||||
LOG_1(
|
||||
"Allocating thread pool with %d threads and %d queue length",
|
||||
|
|
@ -97,8 +108,9 @@ void thread_pool_alloc(ThreadPool* pool, int32 thread_count, int32 worker_count,
|
|||
}
|
||||
);
|
||||
|
||||
queue_alloc(&pool->work_queue, worker_count, sizeof(PoolWorker), alignment);
|
||||
queue_alloc(&pool->work_queue, worker_count, element_size, alignment);
|
||||
|
||||
pool->element_size = element_size;
|
||||
pool->thread_cnt = thread_count;
|
||||
|
||||
// @todo switch from pool mutex and pool cond to threadjob mutex/cond
|
||||
|
|
@ -114,8 +126,14 @@ void thread_pool_alloc(ThreadPool* pool, int32 thread_count, int32 worker_count,
|
|||
}
|
||||
}
|
||||
|
||||
void thread_pool_create(ThreadPool* pool, BufferMemory* buf, int32 thread_count, int32 worker_count, int32 alignment = 64)
|
||||
{
|
||||
void thread_pool_create(
|
||||
ThreadPool* pool,
|
||||
BufferMemory* buf,
|
||||
int32 element_size,
|
||||
int32 thread_count,
|
||||
int32 worker_count,
|
||||
int32 alignment = 64
|
||||
) {
|
||||
PROFILE(PROFILE_THREAD_POOL_ALLOC);
|
||||
LOG_1(
|
||||
"Creating thread pool with %d threads and %d queue length",
|
||||
|
|
@ -125,8 +143,9 @@ void thread_pool_create(ThreadPool* pool, BufferMemory* buf, int32 thread_count,
|
|||
}
|
||||
);
|
||||
|
||||
queue_init(&pool->work_queue, buf, worker_count, sizeof(PoolWorker), alignment);
|
||||
queue_init(&pool->work_queue, buf, worker_count, element_size, alignment);
|
||||
|
||||
pool->element_size = element_size;
|
||||
pool->thread_cnt = thread_count;
|
||||
|
||||
// @todo switch from pool mutex and pool cond to threadjob mutex/cond
|
||||
|
|
@ -170,7 +189,7 @@ void thread_pool_destroy(ThreadPool* pool)
|
|||
PoolWorker* thread_pool_add_work(ThreadPool* pool, const PoolWorker* job)
|
||||
{
|
||||
mutex_lock(&pool->work_mutex);
|
||||
PoolWorker* temp_job = (PoolWorker *) ring_get_memory_nomove((RingMemory *) &pool->work_queue, sizeof(PoolWorker), 8);
|
||||
PoolWorker* temp_job = (PoolWorker *) ring_get_memory_nomove((RingMemory *) &pool->work_queue, pool->element_size, 8);
|
||||
if (atomic_get_relaxed(&temp_job->id) > 0) {
|
||||
mutex_unlock(&pool->work_mutex);
|
||||
ASSERT_SIMPLE(temp_job->id == 0);
|
||||
|
|
@ -178,8 +197,8 @@ PoolWorker* thread_pool_add_work(ThreadPool* pool, const PoolWorker* job)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(temp_job, job, sizeof(PoolWorker));
|
||||
ring_move_pointer((RingMemory *) &pool->work_queue, &pool->work_queue.head, sizeof(PoolWorker), 8);
|
||||
memcpy(temp_job, job, pool->element_size);
|
||||
ring_move_pointer((RingMemory *) &pool->work_queue, &pool->work_queue.head, pool->element_size, 8);
|
||||
|
||||
if (temp_job->id == 0) {
|
||||
temp_job->id = atomic_fetch_add_acquire(&pool->id_counter, 1);
|
||||
|
|
|
|||
|
|
@ -17,6 +17,16 @@
|
|||
#define HAS_ZERO(x) (((x) - ((size_t)-1 / 0xFF)) & ~(x) & (((size_t)-1 / 0xFF) * (0xFF / 2 + 1)))
|
||||
#define HAS_CHAR(x, c) (HAS_ZERO((x) ^ (((size_t)-1 / 0xFF) * (c))))
|
||||
|
||||
// WARNING: We need this function because the other function relies on none-constexpr performance features
|
||||
constexpr
|
||||
size_t str_length_constexpr(const char* str) noexcept {
|
||||
size_t len = 0;
|
||||
while (str[len] != '\0') {
|
||||
++len;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
inline
|
||||
size_t str_length(const char* str) noexcept {
|
||||
const char* ptr = str;
|
||||
|
|
@ -43,6 +53,23 @@ size_t str_length(const char* str) noexcept {
|
|||
}
|
||||
}
|
||||
|
||||
// WARNING: We need this function because the other function relies on none-constexpr performance features
|
||||
inline constexpr
|
||||
const char* str_find_constexpr(const char* str, const char* needle) noexcept {
|
||||
size_t needle_len = str_length_constexpr(needle);
|
||||
size_t str_len = str_length_constexpr(str);
|
||||
size_t limit = str_len - needle_len + 1;
|
||||
|
||||
for (size_t i = 0; i < limit; ++i) {
|
||||
if (str[i] == needle[0] && memcmp(&str[i + 1], &needle[1], needle_len - 1) == 0) {
|
||||
return &str[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline
|
||||
const char* str_find(const char* str, const char* needle) noexcept {
|
||||
size_t needle_len = str_length(needle);
|
||||
size_t str_len = str_length(str);
|
||||
|
|
@ -848,7 +875,7 @@ void str_copy_until(char* __restrict dest, const char* __restrict src, const cha
|
|||
*dest = '\0';
|
||||
}
|
||||
|
||||
inline
|
||||
inline constexpr
|
||||
void str_copy_short(char* __restrict dest, const char* __restrict src, int32 length) noexcept
|
||||
{
|
||||
int32 i = -1;
|
||||
|
|
@ -859,7 +886,7 @@ void str_copy_short(char* __restrict dest, const char* __restrict src, int32 len
|
|||
*dest = '\0';
|
||||
}
|
||||
|
||||
inline
|
||||
inline constexpr
|
||||
void str_copy_short(char* __restrict dest, const char* __restrict src) noexcept
|
||||
{
|
||||
while (*src != '\0') {
|
||||
|
|
@ -869,6 +896,20 @@ void str_copy_short(char* __restrict dest, const char* __restrict src) noexcept
|
|||
*dest = '\0';
|
||||
}
|
||||
|
||||
inline constexpr
|
||||
int32 str_copy(char* __restrict dest, const char* __restrict src) noexcept
|
||||
{
|
||||
int32 length = 0;
|
||||
while (*src != '\0') {
|
||||
++length;
|
||||
*dest++ = *src++;
|
||||
}
|
||||
|
||||
*dest = '\0';
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
inline
|
||||
void str_copy_long(char* __restrict dest, const char* __restrict src) noexcept
|
||||
{
|
||||
|
|
@ -1145,7 +1186,7 @@ bool str_contains(const char* haystack, const char* needle, size_t length) noexc
|
|||
return false;
|
||||
}
|
||||
|
||||
inline
|
||||
inline constexpr
|
||||
int32 str_compare(const char* str1, const char* str2) noexcept
|
||||
{
|
||||
byte c1, c2;
|
||||
|
|
@ -1158,6 +1199,7 @@ int32 str_compare(const char* str1, const char* str2) noexcept
|
|||
return c1 - c2;
|
||||
}
|
||||
|
||||
constexpr
|
||||
int32 str_compare(const char* str1, const char* str2, size_t n) noexcept
|
||||
{
|
||||
byte c1 = '\0';
|
||||
|
|
|
|||
|
|
@ -18,9 +18,20 @@
|
|||
/* cppcheck-suppress nullPointer */ \
|
||||
*(volatile int *)0 = 0; \
|
||||
}
|
||||
|
||||
#if DEBUG_STRICT
|
||||
// This macro is only used during strict debugging
|
||||
// Strict debugging is a mode that performs a lot of assertions
|
||||
// This slows down the application by a lot and is therefore not applicable for normal debugging
|
||||
#define ASSERT_STRICT ASSERT_SIMPLE
|
||||
#else
|
||||
#define ASSERT_STRICT(a) ((void)0)
|
||||
#endif
|
||||
#else
|
||||
#define ASSERT_SIMPLE(a) ((void)0)
|
||||
#define ASSERT_SIMPLE_CONST(a) ((void)0)
|
||||
|
||||
#define ASSERT_STRICT(a) ((void)0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user