Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/quite/celt.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jean-marc.valin@usherbrooke.ca>2008-01-28 14:28:54 +0300
committerJean-Marc Valin <jean-marc.valin@usherbrooke.ca>2008-01-28 14:28:54 +0300
commit6238bc0ece31ca1a1b9378de71de32d405919d81 (patch)
tree343c65c150f366cd2e2850da655bef3ab5405d48 /libcelt/ecintrin.h
parent94d4ea930543f83483e158111b53389704b583b7 (diff)
Moved the content of libentcode into libcelt to reduce dependencies,
especially now that we have a custom version of that code anyway. Moved the test code to tests/
Diffstat (limited to 'libcelt/ecintrin.h')
-rw-r--r--libcelt/ecintrin.h93
1 files changed, 93 insertions, 0 deletions
diff --git a/libcelt/ecintrin.h b/libcelt/ecintrin.h
new file mode 100644
index 0000000..f9a960f
--- /dev/null
+++ b/libcelt/ecintrin.h
@@ -0,0 +1,93 @@
+/*Some common macros for potential platform-specific optimization.*/
+#include <math.h>
+#include <limits.h>
+#if !defined(_ecintrin_H)
+# define _ecintrin_H (1)
+
+/*Some specific platforms may have optimized intrinsic or inline assembly
+ versions of these functions which can substantially improve performance.
+ We define macros for them to allow easy incorporation of these non-ANSI
+ features.*/
+
+/*Note that we do not provide a macro for abs(), because it is provided as a
+ library function, which we assume is translated into an intrinsic to avoid
+ the function call overhead and then implemented in the smartest way for the
+ target platform.
+ With modern gcc (4.x), this is true: it uses cmov instructions if the
+ architecture supports it and branchless bit-twiddling if it does not (the
+ speed difference between the two approaches is not measurable).
+ Interestingly, the bit-twiddling method was patented in 2000 (US 6,073,150)
+ by Sun Microsystems, despite prior art dating back to at least 1996:
+ http://web.archive.org/web/19961201174141/www.x86.org/ftp/articles/pentopt/PENTOPT.TXT
+ On gcc 3.x, however, our assumption is not true, as abs() is translated to a
+ conditional jump, which is horrible on deeply piplined architectures (e.g.,
+ all consumer architectures for the past decade or more) when the sign cannot
+ be reliably predicted.*/
+
+/*Modern gcc (4.x) can compile the naive versions of min and max with cmov if
+ given an appropriate architecture, but the branchless bit-twiddling versions
+ are just as fast, and do not require any special target architecture.
+ Earlier gcc versions (3.x) compiled both code to the same assembly
+ instructions, because of the way they represented ((_b)>(_a)) internally.*/
+#define EC_MAXI(_a,_b) ((_a)-((_a)-(_b)&-((_b)>(_a))))
+#define EC_MINI(_a,_b) ((_a)+((_b)-(_a)&-((_b)<(_a))))
+/*This has a chance of compiling branchless, and is just as fast as the
+ bit-twiddling method, which is slightly less portable, since it relies on a
+ sign-extended rightshift, which is not guaranteed by ANSI (but present on
+ every relevant platform).*/
+#define EC_SIGNI(_a) (((_a)>0)-((_a)<0))
+/*Slightly more portable than relying on a sign-extended right-shift (which is
+ not guaranteed by ANSI), and just as fast, since gcc (3.x and 4.x both)
+ compile it into the right-shift anyway.*/
+#define EC_SIGNMASK(_a) (-((_a)<0))
+/*Clamps an integer into the given range.
+ If _a>_c, then the lower bound _a is respected over the upper bound _c (this
+ behavior is required to meet our documented API behavior).
+ _a: The lower bound.
+ _b: The value to clamp.
+ _c: The upper boud.*/
+#define EC_CLAMPI(_a,_b,_c) (EC_MAXI(_a,EC_MINI(_b,_c)))
+/*Count leading zeros.
+ This macro should only be used for implementing ec_ilog(), if it is defined.
+ All other code should use EC_ILOG() instead.*/
+#if __GNUC_PREREQ(3,4)
+# if INT_MAX>=2147483647
+# define EC_CLZ0 sizeof(unsigned)*CHAR_BIT
+# define EC_CLZ(_x) (__builtin_clz(_x))
+# elif LONG_MAX>=2147483647L
+# define EC_CLZ0 sizeof(unsigned long)*CHAR_BIT
+# define EC_CLZ(_x) (__builtin_clzl(_x))
+# endif
+#endif
+#if defined(EC_CLZ)
+/*Note that __builtin_clz is not defined when _x==0, according to the gcc
+ documentation (and that of the BSR instruction that implements it on x86).
+ The majority of the time we can never pass it zero.
+ When we need to, it can be special cased.*/
+# define EC_ILOG(_x) (EC_CLZ0-EC_CLZ(_x))
+#else
+# define EC_ILOG(_x) (ec_ilog(_x))
+#endif
+#if __GNUC_PREREQ(3,4)
+# if INT_MAX>=9223372036854775807
+# define EC_CLZ64_0 sizeof(unsigned)*CHAR_BIT
+# define EC_CLZ64(_x) (__builtin_clz(_x))
+# elif LONG_MAX>=9223372036854775807L
+# define EC_CLZ64_0 sizeof(unsigned long)*CHAR_BIT
+# define EC_CLZ64(_x) (__builtin_clzl(_x))
+# elif LLONG_MAX>=9223372036854775807LL
+# define EC_CLZ64_0 sizeof(unsigned long long)*CHAR_BIT
+# define EC_CLZ64(_x) (__builtin_clzll(_x))
+# endif
+#endif
+#if defined(EC_CLZ64)
+/*Note that __builtin_clz is not defined when _x==0, according to the gcc
+ documentation (and that of the BSR instruction that implements it on x86).
+ The majority of the time we can never pass it zero.
+ When we need to, it can be special cased.*/
+# define EC_ILOG64(_x) (EC_CLZ64_0-EC_CLZ64(_x))
+#else
+# define EC_ILOG64(_x) (ec_ilog64(_x))
+#endif
+
+#endif