Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'util/bit_packing.hh')
-rw-r--r--util/bit_packing.hh24
1 files changed, 12 insertions, 12 deletions
diff --git a/util/bit_packing.hh b/util/bit_packing.hh
index 3cf3cd4a1..b24fd9c1f 100644
--- a/util/bit_packing.hh
+++ b/util/bit_packing.hh
@@ -1,7 +1,7 @@
#ifndef UTIL_BIT_PACKING_H
#define UTIL_BIT_PACKING_H
-/* Bit-level packing routines
+/* Bit-level packing routines
*
* WARNING WARNING WARNING:
* The write functions assume that memory is zero initially. This makes them
@@ -9,10 +9,10 @@
* These routines assume that unaligned access to uint64_t is fast. This is
* the case on x86_64. I'm not sure how fast unaligned 64-bit access is on
* x86 but my target audience is large language models for which 64-bit is
- * necessary.
+ * necessary.
*
* Call the BitPackingSanity function to sanity check. Calling once suffices,
- * but it may be called multiple times when that's inconvenient.
+ * but it may be called multiple times when that's inconvenient.
*
* ARM and MinGW ports contributed by Hideo Okuma and Tomoyuki Yoshimura at
* NICT.
@@ -25,14 +25,14 @@
#include <endian.h>
#elif !defined(_WIN32) && !defined(_WIN64)
#include <arpa/nameser_compat.h>
-#endif
+#endif
#include <stdint.h>
#include <cstring>
namespace util {
-// Fun fact: __BYTE_ORDER is wrong on Solaris Sparc, but the version without __ is correct.
+// Fun fact: __BYTE_ORDER is wrong on Solaris Sparc, but the version without __ is correct.
#if BYTE_ORDER == LITTLE_ENDIAN
inline uint8_t BitPackShift(uint8_t bit, uint8_t /*length*/) {
return bit;
@@ -56,15 +56,15 @@ inline uint64_t ReadOff(const void *base, uint64_t bit_off) {
#endif
}
-/* Pack integers up to 57 bits using their least significant digits.
+/* Pack integers up to 57 bits using their least significant digits.
* The length is specified using mask:
- * Assumes mask == (1 << length) - 1 where length <= 57.
+ * Assumes mask == (1 << length) - 1 where length <= 57.
*/
inline uint64_t ReadInt57(const void *base, uint64_t bit_off, uint8_t length, uint64_t mask) {
return (ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, length)) & mask;
}
/* Assumes value < (1 << length) and length <= 57.
- * Assumes the memory is zero initially.
+ * Assumes the memory is zero initially.
*/
inline void WriteInt57(void *base, uint64_t bit_off, uint8_t length, uint64_t value) {
#if defined(__arm) || defined(__arm__)
@@ -74,7 +74,7 @@ inline void WriteInt57(void *base, uint64_t bit_off, uint8_t length, uint64_t va
value64 |= (value << BitPackShift(bit_off & 7, length));
memcpy(base_off, &value64, sizeof(value64));
#else
- *reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(base) + (bit_off >> 3)) |=
+ *reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(base) + (bit_off >> 3)) |=
(value << BitPackShift(bit_off & 7, length));
#endif
}
@@ -99,7 +99,7 @@ inline void WriteInt25(void *base, uint64_t bit_off, uint8_t length, uint32_t va
value32 |= (value << BitPackShift(bit_off & 7, length));
memcpy(base_off, &value32, sizeof(value32));
#else
- *reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(base) + (bit_off >> 3)) |=
+ *reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(base) + (bit_off >> 3)) |=
(value << BitPackShift(bit_off & 7, length));
#endif
}
@@ -136,7 +136,7 @@ inline void UnsetSign(float &to) {
inline float ReadNonPositiveFloat31(const void *base, uint64_t bit_off) {
FloatEnc encoded;
encoded.i = ReadOff(base, bit_off) >> BitPackShift(bit_off & 7, 31);
- // Sign bit set means negative.
+ // Sign bit set means negative.
encoded.i |= kSignBit;
return encoded.f;
}
@@ -150,7 +150,7 @@ inline void WriteNonPositiveFloat31(void *base, uint64_t bit_off, float value) {
void BitPackingSanity();
// Return bits required to store integers upto max_value. Not the most
-// efficient implementation, but this is only called a few times to size tries.
+// efficient implementation, but this is only called a few times to size tries.
uint8_t RequiredBits(uint64_t max_value);
struct BitsMask {