From 2a706fd2ba355808cada31ac1eed8ce28caa6b37 Mon Sep 17 00:00:00 2001 From: kobalicek Date: Tue, 14 Dec 2021 19:03:31 +0100 Subject: Reworked unaligned memory access to work better with GCC 11 --- src/asmjit/core/codeholder.cpp | 4 +- src/asmjit/core/codewriter.cpp | 4 +- src/asmjit/core/codewriter_p.h | 4 +- src/asmjit/core/support.cpp | 2 + src/asmjit/core/support.h | 428 ++++++++++++++++++++--------------------- 5 files changed, 220 insertions(+), 222 deletions(-) diff --git a/src/asmjit/core/codeholder.cpp b/src/asmjit/core/codeholder.cpp index 2beebff..25d8f90 100644 --- a/src/asmjit/core/codeholder.cpp +++ b/src/asmjit/core/codeholder.cpp @@ -1032,11 +1032,11 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { switch (re->format().valueSize()) { case 1: - Support::writeU8(buffer + valueOffset, uint32_t(value & 0xFFu)); + Support::writeU8(buffer + valueOffset, uint8_t(value & 0xFFu)); break; case 2: - Support::writeU16uLE(buffer + valueOffset, uint32_t(value & 0xFFFFu)); + Support::writeU16uLE(buffer + valueOffset, uint16_t(value & 0xFFFFu)); break; case 4: diff --git a/src/asmjit/core/codewriter.cpp b/src/asmjit/core/codewriter.cpp index 772146e..b416dec 100644 --- a/src/asmjit/core/codewriter.cpp +++ b/src/asmjit/core/codewriter.cpp @@ -94,7 +94,7 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma if (!encodeOffset32(&mask, offset64, format)) return false; - Support::writeU8(dst, Support::readU8(dst) | mask); + Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask)); return true; } @@ -103,7 +103,7 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma if (!encodeOffset32(&mask, offset64, format)) return false; - Support::writeU16uLE(dst, Support::readU16uLE(dst) | mask); + Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask)); return true; } diff --git a/src/asmjit/core/codewriter_p.h b/src/asmjit/core/codewriter_p.h index 8b120bd..c799241 100644 --- a/src/asmjit/core/codewriter_p.h +++ b/src/asmjit/core/codewriter_p.h @@ -66,14 +66,14 @@ public: template ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept { typedef typename std::make_unsigned::type U; - Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu)); + Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu)); _cursor += 2; } template ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept { typedef typename std::make_unsigned::type U; - Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu)); + Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu)); _cursor += 2; } diff --git a/src/asmjit/core/support.cpp b/src/asmjit/core/support.cpp index 34253fd..245398f 100644 --- a/src/asmjit/core/support.cpp +++ b/src/asmjit/core/support.cpp @@ -119,8 +119,10 @@ static void testBitUtils() noexcept { static void testIntUtils() noexcept { INFO("Support::byteswap()"); + EXPECT(Support::byteswap16(int32_t(0x0102)) == int32_t(0x0201)); EXPECT(Support::byteswap32(int32_t(0x01020304)) == int32_t(0x04030201)); EXPECT(Support::byteswap32(uint32_t(0x01020304)) == uint32_t(0x04030201)); + EXPECT(Support::byteswap64(uint64_t(0x0102030405060708)) == uint64_t(0x0807060504030201)); INFO("Support::bytepack()"); union BytePackData { diff --git a/src/asmjit/core/support.h b/src/asmjit/core/support.h index 5b142d4..a9091c3 100644 --- a/src/asmjit/core/support.h +++ b/src/asmjit/core/support.h @@ -21,22 +21,6 @@ ASMJIT_BEGIN_NAMESPACE //! here is considered internal and should not be used outside of AsmJit and related projects like AsmTK. namespace Support { -// Support - Architecture Features & Constraints -// ============================================= - -//! \cond INTERNAL -#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 11 -// There is a bug in GCC11+ that makes it unusable to use annotated unaligned loads/stores. -static constexpr bool kUnalignedAccess16 = false; -static constexpr bool kUnalignedAccess32 = false; -static constexpr bool kUnalignedAccess64 = false; -#else -static constexpr bool kUnalignedAccess16 = ASMJIT_ARCH_X86 != 0; -static constexpr bool kUnalignedAccess32 = ASMJIT_ARCH_X86 != 0; -static constexpr bool kUnalignedAccess64 = ASMJIT_ARCH_X86 != 0; -#endif -//! \endcond - // Support - Basic Traits // ====================== @@ -49,17 +33,18 @@ typedef uint32_t FastUInt8; //! \cond INTERNAL namespace Internal { template - struct AlignedInt {}; - - template<> struct AlignedInt { typedef uint16_t ASMJIT_ALIGN_TYPE(T, 1); }; - template<> struct AlignedInt { typedef uint16_t T; }; - template<> struct AlignedInt { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 1); }; - template<> struct AlignedInt { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 2); }; - template<> struct AlignedInt { typedef uint32_t T; }; - template<> struct AlignedInt { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 1); }; - template<> struct AlignedInt { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 2); }; - template<> struct AlignedInt { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 4); }; - template<> struct AlignedInt { typedef uint64_t T; }; + struct AliasedUInt {}; + + template<> struct AliasedUInt { typedef uint16_t ASMJIT_MAY_ALIAS T; }; + template<> struct AliasedUInt { typedef uint32_t ASMJIT_MAY_ALIAS T; }; + template<> struct AliasedUInt { typedef uint64_t ASMJIT_MAY_ALIAS T; }; + + template<> struct AliasedUInt { typedef uint16_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 1); }; + template<> struct AliasedUInt { typedef uint32_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 1); }; + template<> struct AliasedUInt { typedef uint32_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 2); }; + template<> struct AliasedUInt { typedef uint64_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 1); }; + template<> struct AliasedUInt { typedef uint64_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 2); }; + template<> struct AliasedUInt { typedef uint64_t ASMJIT_MAY_ALIAS ASMJIT_ALIGN_TYPE(T, 4); }; // StdInt - Make an int-type by size (signed or unsigned) that is the // same as types defined by . @@ -826,10 +811,25 @@ static bool inline isEncodableOffset64(int64_t offset, uint32_t nBits) noexcept // Support - ByteSwap // ================== -static constexpr uint32_t byteswap32(uint32_t x) noexcept { +static inline uint16_t byteswap16(uint16_t x) noexcept { + return uint16_t(((x >> 8) & 0xFFu) | ((x & 0xFFu) << 8)); +} + +static inline uint32_t byteswap32(uint32_t x) noexcept { return (x << 24) | (x >> 24) | ((x << 8) & 0x00FF0000u) | ((x >> 8) & 0x0000FF00); } +static inline uint64_t byteswap64(uint64_t x) noexcept { +#if (defined(__GNUC__) || defined(__clang__)) && !defined(ASMJIT_NO_INTRINSICS) + return uint64_t(__builtin_bswap64(uint64_t(x))); +#elif defined(_MSC_VER) && !defined(ASMJIT_NO_INTRINSICS) + return uint64_t(_byteswap_uint64(uint64_t(x))); +#else + return (uint64_t(byteswap32(uint32_t(uint64_t(x) >> 32 ))) ) | + (uint64_t(byteswap32(uint32_t(uint64_t(x) & 0xFFFFFFFFu))) << 32) ; +#endif +} + // Support - BytePack & Unpack // =========================== @@ -903,154 +903,170 @@ static ASMJIT_FORCE_INLINE int cmpInstName(const char* a, const char* b, size_t return int(uint8_t(a[size])); } -// Support - Memory Read & Write -// ============================= +// Support - Memory Read Access - 8 Bits +// ===================================== + +static inline uint8_t readU8(const void* p) noexcept { return static_cast(p)[0]; } +static inline int8_t readI8(const void* p) noexcept { return static_cast(p)[0]; } -static inline uint32_t readU8(const void* p) noexcept { return uint32_t(static_cast(p)[0]); } -static inline int32_t readI8(const void* p) noexcept { return int32_t(static_cast(p)[0]); } +// Support - Memory Read Access - 16 Bits +// ====================================== template -static inline uint32_t readU16x(const void* p) noexcept { - if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) { - typedef typename Internal::AlignedInt::T U16AlignedToN; - return uint32_t(static_cast(p)[0]); - } - else { - uint32_t hi = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 1 : 0)); - uint32_t lo = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 0 : 1)); - return shl(hi, 8) | lo; - } +static inline uint16_t readU16x(const void* p) noexcept { + typedef typename Internal::AliasedUInt::T U16AlignedToN; + uint16_t x = static_cast(p)[0]; + return BO == ByteOrder::kNative ? x : byteswap16(x); } +template +static inline uint16_t readU16u(const void* p) noexcept { return readU16x(p); } +template +static inline uint16_t readU16uLE(const void* p) noexcept { return readU16x(p); } +template +static inline uint16_t readU16uBE(const void* p) noexcept { return readU16x(p); } + +static inline uint16_t readU16a(const void* p) noexcept { return readU16x(p); } +static inline uint16_t readU16aLE(const void* p) noexcept { return readU16x(p); } +static inline uint16_t readU16aBE(const void* p) noexcept { return readU16x(p); } + template -static inline int32_t readI16x(const void* p) noexcept { - if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) { - typedef typename Internal::AlignedInt::T U16AlignedToN; - return int32_t(int16_t(static_cast(p)[0])); - } - else { - int32_t hi = readI8(static_cast(p) + (BO == ByteOrder::kLE ? 1 : 0)); - uint32_t lo = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 0 : 1)); - return shl(hi, 8) | int32_t(lo); - } -} +static inline int16_t readI16x(const void* p) noexcept { return int16_t(readU16x(p)); } + +template +static inline int16_t readI16u(const void* p) noexcept { return int16_t(readU16x(p)); } +template +static inline int16_t readI16uLE(const void* p) noexcept { return int16_t(readU16x(p)); } +template +static inline int16_t readI16uBE(const void* p) noexcept { return int16_t(readU16x(p)); } + +static inline int16_t readI16a(const void* p) noexcept { return int16_t(readU16x(p)); } +static inline int16_t readI16aLE(const void* p) noexcept { return int16_t(readU16x(p)); } +static inline int16_t readI16aBE(const void* p) noexcept { return int16_t(readU16x(p)); } + +// Support - Memory Read Access - 24 Bits +// ====================================== template static inline uint32_t readU24u(const void* p) noexcept { uint32_t b0 = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 2 : 0)); uint32_t b1 = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 1 : 1)); uint32_t b2 = readU8(static_cast(p) + (BO == ByteOrder::kLE ? 0 : 2)); - return shl(b0, 16) | shl(b1, 8) | b2; + return (b0 << 16) | (b1 << 8) | b2; } +static inline uint32_t readU24uLE(const void* p) noexcept { return readU24u(p); } +static inline uint32_t readU24uBE(const void* p) noexcept { return readU24u(p); } + +// Support - Memory Read Access - 32 Bits +// ====================================== + template static inline uint32_t readU32x(const void* p) noexcept { - if (kUnalignedAccess32 || Alignment >= 4) { - typedef typename Internal::AlignedInt::T U32AlignedToN; - uint32_t x = static_cast(p)[0]; - return BO == ByteOrder::kNative ? x : byteswap32(x); - } - else { - uint32_t hi = readU16x= 2 ? size_t(2) : Alignment>(static_cast(p) + (BO == ByteOrder::kLE ? 2 : 0)); - uint32_t lo = readU16x= 2 ? size_t(2) : Alignment>(static_cast(p) + (BO == ByteOrder::kLE ? 0 : 2)); - return shl(hi, 16) | lo; - } + typedef typename Internal::AliasedUInt::T U32AlignedToN; + uint32_t x = static_cast(p)[0]; + return BO == ByteOrder::kNative ? x : byteswap32(x); } +template +static inline uint32_t readU32u(const void* p) noexcept { return readU32x(p); } +template +static inline uint32_t readU32uLE(const void* p) noexcept { return readU32x(p); } +template +static inline uint32_t readU32uBE(const void* p) noexcept { return readU32x(p); } + +static inline uint32_t readU32a(const void* p) noexcept { return readU32x(p); } +static inline uint32_t readU32aLE(const void* p) noexcept { return readU32x(p); } +static inline uint32_t readU32aBE(const void* p) noexcept { return readU32x(p); } + +template +static inline uint32_t readI32x(const void* p) noexcept { return int32_t(readU32x(p)); } + +template +static inline int32_t readI32u(const void* p) noexcept { return int32_t(readU32x(p)); } +template +static inline int32_t readI32uLE(const void* p) noexcept { return int32_t(readU32x(p)); } +template +static inline int32_t readI32uBE(const void* p) noexcept { return int32_t(readU32x(p)); } + +static inline int32_t readI32a(const void* p) noexcept { return int32_t(readU32x(p)); } +static inline int32_t readI32aLE(const void* p) noexcept { return int32_t(readU32x(p)); } +static inline int32_t readI32aBE(const void* p) noexcept { return int32_t(readU32x(p)); } + +// Support - Memory Read Access - 64 Bits +// ====================================== + template static inline uint64_t readU64x(const void* p) noexcept { - if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) { - typedef typename Internal::AlignedInt::T U64AlignedToN; - return static_cast(p)[0]; - } - else { - uint32_t hi = readU32x= 4 ? size_t(4) : Alignment>(static_cast(p) + (BO == ByteOrder::kLE ? 4 : 0)); - uint32_t lo = readU32x= 4 ? size_t(4) : Alignment>(static_cast(p) + (BO == ByteOrder::kLE ? 0 : 4)); - return shl(uint64_t(hi), 32) | lo; - } + typedef typename Internal::AliasedUInt::T U64AlignedToN; + uint64_t x = static_cast(p)[0]; + return BO == ByteOrder::kNative ? x : byteswap64(x); } -template -static inline int32_t readI32x(const void* p) noexcept { return int32_t(readU32x(p)); } +template +static inline uint64_t readU64u(const void* p) noexcept { return readU64x(p); } +template +static inline uint64_t readU64uLE(const void* p) noexcept { return readU64x(p); } +template +static inline uint64_t readU64uBE(const void* p) noexcept { return readU64x(p); } + +static inline uint64_t readU64a(const void* p) noexcept { return readU64x(p); } +static inline uint64_t readU64aLE(const void* p) noexcept { return readU64x(p); } +static inline uint64_t readU64aBE(const void* p) noexcept { return readU64x(p); } template static inline int64_t readI64x(const void* p) noexcept { return int64_t(readU64x(p)); } -template static inline int32_t readI16xLE(const void* p) noexcept { return readI16x(p); } -template static inline int32_t readI16xBE(const void* p) noexcept { return readI16x(p); } -template static inline uint32_t readU16xLE(const void* p) noexcept { return readU16x(p); } -template static inline uint32_t readU16xBE(const void* p) noexcept { return readU16x(p); } -template static inline int32_t readI32xLE(const void* p) noexcept { return readI32x(p); } -template static inline int32_t readI32xBE(const void* p) noexcept { return readI32x(p); } -template static inline uint32_t readU32xLE(const void* p) noexcept { return readU32x(p); } -template static inline uint32_t readU32xBE(const void* p) noexcept { return readU32x(p); } -template static inline int64_t readI64xLE(const void* p) noexcept { return readI64x(p); } -template static inline int64_t readI64xBE(const void* p) noexcept { return readI64x(p); } -template static inline uint64_t readU64xLE(const void* p) noexcept { return readU64x(p); } -template static inline uint64_t readU64xBE(const void* p) noexcept { return readU64x(p); } - -static inline int32_t readI16a(const void* p) noexcept { return readI16x(p); } -static inline int32_t readI16u(const void* p) noexcept { return readI16x(p); } -static inline uint32_t readU16a(const void* p) noexcept { return readU16x(p); } -static inline uint32_t readU16u(const void* p) noexcept { return readU16x(p); } - -static inline int32_t readI16aLE(const void* p) noexcept { return readI16xLE<2>(p); } -static inline int32_t readI16uLE(const void* p) noexcept { return readI16xLE<1>(p); } -static inline uint32_t readU16aLE(const void* p) noexcept { return readU16xLE<2>(p); } -static inline uint32_t readU16uLE(const void* p) noexcept { return readU16xLE<1>(p); } - -static inline int32_t readI16aBE(const void* p) noexcept { return readI16xBE<2>(p); } -static inline int32_t readI16uBE(const void* p) noexcept { return readI16xBE<1>(p); } -static inline uint32_t readU16aBE(const void* p) noexcept { return readU16xBE<2>(p); } -static inline uint32_t readU16uBE(const void* p) noexcept { return readU16xBE<1>(p); } +template +static inline int64_t readI64u(const void* p) noexcept { return int64_t(readU64x(p)); } +template +static inline int64_t readI64uLE(const void* p) noexcept { return int64_t(readU64x(p)); } +template +static inline int64_t readI64uBE(const void* p) noexcept { return int64_t(readU64x(p)); } -static inline uint32_t readU24uLE(const void* p) noexcept { return readU24u(p); } -static inline uint32_t readU24uBE(const void* p) noexcept { return readU24u(p); } +static inline int64_t readI64a(const void* p) noexcept { return int64_t(readU64x(p)); } +static inline int64_t readI64aLE(const void* p) noexcept { return int64_t(readU64x(p)); } +static inline int64_t readI64aBE(const void* p) noexcept { return int64_t(readU64x(p)); } -static inline int32_t readI32a(const void* p) noexcept { return readI32x(p); } -static inline int32_t readI32u(const void* p) noexcept { return readI32x(p); } -static inline uint32_t readU32a(const void* p) noexcept { return readU32x(p); } -static inline uint32_t readU32u(const void* p) noexcept { return readU32x(p); } +// Support - Memory Write Access - 8 Bits +// ====================================== -static inline int32_t readI32aLE(const void* p) noexcept { return readI32xLE<4>(p); } -static inline int32_t readI32uLE(const void* p) noexcept { return readI32xLE<1>(p); } -static inline uint32_t readU32aLE(const void* p) noexcept { return readU32xLE<4>(p); } -static inline uint32_t readU32uLE(const void* p) noexcept { return readU32xLE<1>(p); } +static inline void writeU8(void* p, uint8_t x) noexcept { static_cast(p)[0] = x; } +static inline void writeI8(void* p, int8_t x) noexcept { static_cast(p)[0] = x; } -static inline int32_t readI32aBE(const void* p) noexcept { return readI32xBE<4>(p); } -static inline int32_t readI32uBE(const void* p) noexcept { return readI32xBE<1>(p); } -static inline uint32_t readU32aBE(const void* p) noexcept { return readU32xBE<4>(p); } -static inline uint32_t readU32uBE(const void* p) noexcept { return readU32xBE<1>(p); } +// Support - Memory Write Access - 16 Bits +// ======================================= -static inline int64_t readI64a(const void* p) noexcept { return readI64x(p); } -static inline int64_t readI64u(const void* p) noexcept { return readI64x(p); } -static inline uint64_t readU64a(const void* p) noexcept { return readU64x(p); } -static inline uint64_t readU64u(const void* p) noexcept { return readU64x(p); } +template +static inline void writeU16x(void* p, uint16_t x) noexcept { + typedef typename Internal::AliasedUInt::T U16AlignedToN; + static_cast(p)[0] = BO == ByteOrder::kNative ? x : byteswap16(x); +} -static inline int64_t readI64aLE(const void* p) noexcept { return readI64xLE<8>(p); } -static inline int64_t readI64uLE(const void* p) noexcept { return readI64xLE<1>(p); } -static inline uint64_t readU64aLE(const void* p) noexcept { return readU64xLE<8>(p); } -static inline uint64_t readU64uLE(const void* p) noexcept { return readU64xLE<1>(p); } +template +static inline void writeU16uLE(void* p, uint16_t x) noexcept { writeU16x(p, x); } +template +static inline void writeU16uBE(void* p, uint16_t x) noexcept { writeU16x(p, x); } -static inline int64_t readI64aBE(const void* p) noexcept { return readI64xBE<8>(p); } -static inline int64_t readI64uBE(const void* p) noexcept { return readI64xBE<1>(p); } -static inline uint64_t readU64aBE(const void* p) noexcept { return readU64xBE<8>(p); } -static inline uint64_t readU64uBE(const void* p) noexcept { return readU64xBE<1>(p); } +static inline void writeU16a(void* p, uint16_t x) noexcept { writeU16x(p, x); } +static inline void writeU16aLE(void* p, uint16_t x) noexcept { writeU16x(p, x); } +static inline void writeU16aBE(void* p, uint16_t x) noexcept { writeU16x(p, x); } -static inline void writeU8(void* p, uint32_t x) noexcept { static_cast(p)[0] = uint8_t(x & 0xFFu); } -static inline void writeI8(void* p, int32_t x) noexcept { static_cast(p)[0] = uint8_t(x & 0xFF); } template -static inline void writeU16x(void* p, uint32_t x) noexcept { - if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) { - typedef typename Internal::AlignedInt::T U16AlignedToN; - static_cast(p)[0] = uint16_t(x & 0xFFFFu); - } - else { - static_cast(p)[0] = uint8_t((x >> (BO == ByteOrder::kLE ? 0 : 8)) & 0xFFu); - static_cast(p)[1] = uint8_t((x >> (BO == ByteOrder::kLE ? 8 : 0)) & 0xFFu); - } -} +static inline void writeI16x(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } + +template +static inline void writeI16uLE(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } +template +static inline void writeI16uBE(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } + +static inline void writeI16a(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } +static inline void writeI16aLE(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } +static inline void writeI16aBE(void* p, int16_t x) noexcept { writeU16x(p, uint16_t(x)); } + +// Support - Memory Write Access - 24 Bits +// ======================================= template static inline void writeU24u(void* p, uint32_t v) noexcept { @@ -1059,96 +1075,76 @@ static inline void writeU24u(void* p, uint32_t v) noexcept { static_cast(p)[2] = uint8_t((v >> (BO == ByteOrder::kLE ? 16 : 0)) & 0xFFu); } -template -static inline void writeU32x(void* p, uint32_t x) noexcept { - if (kUnalignedAccess32 || Alignment >= 4) { - typedef typename Internal::AlignedInt::T U32AlignedToN; - static_cast(p)[0] = (BO == ByteOrder::kNative) ? x : Support::byteswap32(x); - } - else { - writeU16x= 2 ? size_t(2) : Alignment>(static_cast(p) + 0, x >> (BO == ByteOrder::kLE ? 0 : 16)); - writeU16x= 2 ? size_t(2) : Alignment>(static_cast(p) + 2, x >> (BO == ByteOrder::kLE ? 16 : 0)); - } -} +static inline void writeU24uLE(void* p, uint32_t v) noexcept { writeU24u(p, v); } +static inline void writeU24uBE(void* p, uint32_t v) noexcept { writeU24u(p, v); } + +// Support - Memory Write Access - 32 Bits +// ======================================= template -static inline void writeU64x(void* p, uint64_t x) noexcept { - if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) { - typedef typename Internal::AlignedInt::T U64AlignedToN; - static_cast(p)[0] = x; - } - else { - writeU32x= 4 ? size_t(4) : Alignment>(static_cast(p) + 0, uint32_t((x >> (BO == ByteOrder::kLE ? 0 : 32)) & 0xFFFFFFFFu)); - writeU32x= 4 ? size_t(4) : Alignment>(static_cast(p) + 4, uint32_t((x >> (BO == ByteOrder::kLE ? 32 : 0)) & 0xFFFFFFFFu)); - } +static inline void writeU32x(void* p, uint32_t x) noexcept { + typedef typename Internal::AliasedUInt::T U32AlignedToN; + static_cast(p)[0] = (BO == ByteOrder::kNative) ? x : Support::byteswap32(x); } -template static inline void writeI16x(void* p, int32_t x) noexcept { writeU16x(p, uint32_t(x)); } -template static inline void writeI32x(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } -template static inline void writeI64x(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } - -template static inline void writeI16xLE(void* p, int32_t x) noexcept { writeI16x(p, x); } -template static inline void writeI16xBE(void* p, int32_t x) noexcept { writeI16x(p, x); } -template static inline void writeU16xLE(void* p, uint32_t x) noexcept { writeU16x(p, x); } -template static inline void writeU16xBE(void* p, uint32_t x) noexcept { writeU16x(p, x); } - -template static inline void writeI32xLE(void* p, int32_t x) noexcept { writeI32x(p, x); } -template static inline void writeI32xBE(void* p, int32_t x) noexcept { writeI32x(p, x); } -template static inline void writeU32xLE(void* p, uint32_t x) noexcept { writeU32x(p, x); } -template static inline void writeU32xBE(void* p, uint32_t x) noexcept { writeU32x(p, x); } - -template static inline void writeI64xLE(void* p, int64_t x) noexcept { writeI64x(p, x); } -template static inline void writeI64xBE(void* p, int64_t x) noexcept { writeI64x(p, x); } -template static inline void writeU64xLE(void* p, uint64_t x) noexcept { writeU64x(p, x); } -template static inline void writeU64xBE(void* p, uint64_t x) noexcept { writeU64x(p, x); } +template +static inline void writeU32u(void* p, uint32_t x) noexcept { writeU32x(p, x); } +template +static inline void writeU32uLE(void* p, uint32_t x) noexcept { writeU32x(p, x); } +template +static inline void writeU32uBE(void* p, uint32_t x) noexcept { writeU32x(p, x); } -static inline void writeI16a(void* p, int32_t x) noexcept { writeI16x(p, x); } -static inline void writeI16u(void* p, int32_t x) noexcept { writeI16x(p, x); } -static inline void writeU16a(void* p, uint32_t x) noexcept { writeU16x(p, x); } -static inline void writeU16u(void* p, uint32_t x) noexcept { writeU16x(p, x); } +static inline void writeU32a(void* p, uint32_t x) noexcept { writeU32x(p, x); } +static inline void writeU32aLE(void* p, uint32_t x) noexcept { writeU32x(p, x); } +static inline void writeU32aBE(void* p, uint32_t x) noexcept { writeU32x(p, x); } -static inline void writeI16aLE(void* p, int32_t x) noexcept { writeI16xLE<2>(p, x); } -static inline void writeI16uLE(void* p, int32_t x) noexcept { writeI16xLE<1>(p, x); } -static inline void writeU16aLE(void* p, uint32_t x) noexcept { writeU16xLE<2>(p, x); } -static inline void writeU16uLE(void* p, uint32_t x) noexcept { writeU16xLE<1>(p, x); } +template +static inline void writeI32x(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } -static inline void writeI16aBE(void* p, int32_t x) noexcept { writeI16xBE<2>(p, x); } -static inline void writeI16uBE(void* p, int32_t x) noexcept { writeI16xBE<1>(p, x); } -static inline void writeU16aBE(void* p, uint32_t x) noexcept { writeU16xBE<2>(p, x); } -static inline void writeU16uBE(void* p, uint32_t x) noexcept { writeU16xBE<1>(p, x); } +template +static inline void writeI32u(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } +template +static inline void writeI32uLE(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } +template +static inline void writeI32uBE(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } -static inline void writeU24uLE(void* p, uint32_t v) noexcept { writeU24u(p, v); } -static inline void writeU24uBE(void* p, uint32_t v) noexcept { writeU24u(p, v); } +static inline void writeI32a(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } +static inline void writeI32aLE(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } +static inline void writeI32aBE(void* p, int32_t x) noexcept { writeU32x(p, uint32_t(x)); } -static inline void writeI32a(void* p, int32_t x) noexcept { writeI32x(p, x); } -static inline void writeI32u(void* p, int32_t x) noexcept { writeI32x(p, x); } -static inline void writeU32a(void* p, uint32_t x) noexcept { writeU32x(p, x); } -static inline void writeU32u(void* p, uint32_t x) noexcept { writeU32x(p, x); } +// Support - Memory Write Access - 64 Bits +// ======================================= -static inline void writeI32aLE(void* p, int32_t x) noexcept { writeI32xLE<4>(p, x); } -static inline void writeI32uLE(void* p, int32_t x) noexcept { writeI32xLE<1>(p, x); } -static inline void writeU32aLE(void* p, uint32_t x) noexcept { writeU32xLE<4>(p, x); } -static inline void writeU32uLE(void* p, uint32_t x) noexcept { writeU32xLE<1>(p, x); } +template +static inline void writeU64x(void* p, uint64_t x) noexcept { + typedef typename Internal::AliasedUInt::T U64AlignedToN; + static_cast(p)[0] = BO == ByteOrder::kNative ? x : byteswap64(x); +} -static inline void writeI32aBE(void* p, int32_t x) noexcept { writeI32xBE<4>(p, x); } -static inline void writeI32uBE(void* p, int32_t x) noexcept { writeI32xBE<1>(p, x); } -static inline void writeU32aBE(void* p, uint32_t x) noexcept { writeU32xBE<4>(p, x); } -static inline void writeU32uBE(void* p, uint32_t x) noexcept { writeU32xBE<1>(p, x); } +template +static inline void writeU64u(void* p, uint64_t x) noexcept { writeU64x(p, x); } +template +static inline void writeU64uLE(void* p, uint64_t x) noexcept { writeU64x(p, x); } +template +static inline void writeU64uBE(void* p, uint64_t x) noexcept { writeU64x(p, x); } -static inline void writeI64a(void* p, int64_t x) noexcept { writeI64x(p, x); } -static inline void writeI64u(void* p, int64_t x) noexcept { writeI64x(p, x); } static inline void writeU64a(void* p, uint64_t x) noexcept { writeU64x(p, x); } -static inline void writeU64u(void* p, uint64_t x) noexcept { writeU64x(p, x); } +static inline void writeU64aLE(void* p, uint64_t x) noexcept { writeU64x(p, x); } +static inline void writeU64aBE(void* p, uint64_t x) noexcept { writeU64x(p, x); } -static inline void writeI64aLE(void* p, int64_t x) noexcept { writeI64xLE<8>(p, x); } -static inline void writeI64uLE(void* p, int64_t x) noexcept { writeI64xLE<1>(p, x); } -static inline void writeU64aLE(void* p, uint64_t x) noexcept { writeU64xLE<8>(p, x); } -static inline void writeU64uLE(void* p, uint64_t x) noexcept { writeU64xLE<1>(p, x); } - -static inline void writeI64aBE(void* p, int64_t x) noexcept { writeI64xBE<8>(p, x); } -static inline void writeI64uBE(void* p, int64_t x) noexcept { writeI64xBE<1>(p, x); } -static inline void writeU64aBE(void* p, uint64_t x) noexcept { writeU64xBE<8>(p, x); } -static inline void writeU64uBE(void* p, uint64_t x) noexcept { writeU64xBE<1>(p, x); } +template +static inline void writeI64x(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } + +template +static inline void writeI64u(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } +template +static inline void writeI64uLE(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } +template +static inline void writeI64uBE(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } + +static inline void writeI64a(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } +static inline void writeI64aLE(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } +static inline void writeI64aBE(void* p, int64_t x) noexcept { writeU64x(p, uint64_t(x)); } // Support - Operators // =================== -- cgit v1.2.3