Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.xiph.org/xiph/opus.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@jmvalin.ca>2011-08-28 10:09:55 +0400
committerJean-Marc Valin <jmvalin@jmvalin.ca>2011-08-28 10:09:55 +0400
commitccaa6d2cf8ed33928624fd3d5b5d72b831c37921 (patch)
tree4af2e9959c4481a1ff3d76ae25eb02fa48228729
parent9592c690d1e59258da498bdf6af675a98c251d53 (diff)
More // comments changed to /*
-rw-r--r--silk/silk_MacroCount.h28
-rw-r--r--silk/silk_MacroDebug.h97
-rw-r--r--silk/silk_burg_modified.c110
-rw-r--r--silk/silk_debug.h6
-rw-r--r--silk/silk_macros.h26
-rw-r--r--silk/silk_typedef.h22
6 files changed, 145 insertions, 144 deletions
diff --git a/silk/silk_MacroCount.h b/silk/silk_MacroCount.h
index 929cf525..95417763 100644
--- a/silk/silk_MacroCount.h
+++ b/silk/silk_MacroCount.h
@@ -159,7 +159,7 @@ static inline opus_int32 SKP_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c
}
-// multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)
+/* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)*/
#undef SKP_MLA_ovflw
#define SKP_MLA_ovflw SKP_MLA
@@ -400,56 +400,56 @@ static inline opus_int32 SKP_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 s
opus_int32 ret;
ops_count += 1;
ret = a + (b << shift);
- return ret; // shift >= 0
+ return ret; /* shift >= 0*/
}
#undef SKP_ADD_LSHIFT32
static inline opus_int32 SKP_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
opus_int32 ret;
ops_count += 1;
ret = a + (b << shift);
- return ret; // shift >= 0
+ return ret; /* shift >= 0*/
}
#undef SKP_ADD_LSHIFT_uint
static inline opus_uint32 SKP_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
opus_uint32 ret;
ops_count += 1;
ret = a + (b << shift);
- return ret; // shift >= 0
+ return ret; /* shift >= 0*/
}
#undef SKP_ADD_RSHIFT
static inline opus_int32 SKP_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
opus_int32 ret;
ops_count += 1;
ret = a + (b >> shift);
- return ret; // shift > 0
+ return ret; /* shift > 0*/
}
#undef SKP_ADD_RSHIFT32
static inline opus_int32 SKP_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
opus_int32 ret;
ops_count += 1;
ret = a + (b >> shift);
- return ret; // shift > 0
+ return ret; /* shift > 0*/
}
#undef SKP_ADD_RSHIFT_uint
static inline opus_uint32 SKP_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
opus_uint32 ret;
ops_count += 1;
ret = a + (b >> shift);
- return ret; // shift > 0
+ return ret; /* shift > 0*/
}
#undef SKP_SUB_LSHIFT32
static inline opus_int32 SKP_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
opus_int32 ret;
ops_count += 1;
ret = a - (b << shift);
- return ret; // shift >= 0
+ return ret; /* shift >= 0*/
}
#undef SKP_SUB_RSHIFT32
static inline opus_int32 SKP_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
opus_int32 ret;
ops_count += 1;
ret = a - (b >> shift);
- return ret; // shift > 0
+ return ret; /* shift > 0*/
}
#undef SKP_RSHIFT_ROUND
@@ -471,7 +471,7 @@ static inline opus_int64 SKP_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
#undef SKP_abs_int64
static inline opus_int64 SKP_abs_int64(opus_int64 a){
ops_count += 1;
- return (((a) > 0) ? (a) : -(a)); // Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN
+ return (((a) > 0) ? (a) : -(a)); /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN*/
}
#undef SKP_abs_int32
@@ -548,7 +548,7 @@ static inline opus_int64 SKP_SUB64(opus_int64 a, opus_int64 b){
#undef SKP_ADD_SAT16
static inline opus_int16 SKP_ADD_SAT16( opus_int16 a16, opus_int16 b16 ) {
opus_int16 res;
- // Nb will be counted in AKP_add32 and SKP_SAT16
+ /* Nb will be counted in AKP_add32 and SKP_SAT16*/
res = (opus_int16)SKP_SAT16( SKP_ADD32( (opus_int32)(a16), (b16) ) );
return res;
}
@@ -577,7 +577,7 @@ static inline opus_int64 SKP_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
static inline opus_int16 SKP_SUB_SAT16( opus_int16 a16, opus_int16 b16 ) {
opus_int16 res;
SKP_assert(0);
- // Nb will be counted in sub-macros
+ /* Nb will be counted in sub-macros*/
res = (opus_int16)SKP_SAT16( SKP_SUB32( (opus_int32)(a16), (b16) ) );
return res;
}
@@ -606,7 +606,7 @@ static inline opus_int64 SKP_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
#undef SKP_SMULWW
static inline opus_int32 SKP_SMULWW(opus_int32 a32, opus_int32 b32){
opus_int32 ret;
- // Nb will be counted in sub-macros
+ /* Nb will be counted in sub-macros*/
ret = SKP_MLA(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16));
return ret;
}
@@ -614,7 +614,7 @@ static inline opus_int32 SKP_SMULWW(opus_int32 a32, opus_int32 b32){
#undef SKP_SMLAWW
static inline opus_int32 SKP_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c32){
opus_int32 ret;
- // Nb will be counted in sub-macros
+ /* Nb will be counted in sub-macros*/
ret = SKP_MLA(SKP_SMLAWB((a32), (b32), (c32)), (b32), SKP_RSHIFT_ROUND((c32), 16));
return ret;
}
diff --git a/silk/silk_MacroDebug.h b/silk/silk_MacroDebug.h
index 082d4ac5..b154eef9 100644
--- a/silk/silk_MacroDebug.h
+++ b/silk/silk_MacroDebug.h
@@ -28,8 +28,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _SIGPROCFIX_API_DEBUG_H_
#define _SIGPROCFIX_API_DEBUG_H_
-// Redefine macro functions with extensive assertion in Win32_DEBUG mode.
-// As function can't be undefined, this file can't work with SigProcFIX_MacroCount.h
+/* Redefine macro functions with extensive assertion in Win32_DEBUG mode.
+ As function can't be undefined, this file can't work with SigProcFIX_MacroCount.h */
#if 0 && defined (_WIN32) && defined (_DEBUG) && !defined (SKP_MACRO_COUNT)
@@ -112,11 +112,11 @@ static inline opus_int64 SKP_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
((((a64) & (b64)) & 0x8000000000000000LL) != 0 ? SKP_int64_MIN : (a64)+(b64)) : \
((((a64) | (b64)) & 0x8000000000000000LL) == 0 ? SKP_int64_MAX : (a64)+(b64)) );
if( res != a64 + b64 ) {
- // Check that we saturated to the correct extreme value
+ /* Check that we saturated to the correct extreme value */
SKP_assert( ( res == SKP_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( SKP_int64_MAX >> 3 ) ) ) ||
( res == SKP_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( SKP_int64_MIN >> 3 ) ) ) );
} else {
- // Saturation not necessary
+ /* Saturation not necessary */
SKP_assert( res == a64 + b64 );
}
return res;
@@ -148,11 +148,11 @@ static inline opus_int64 SKP_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
((((a64)^0x8000000000000000LL) & (b64) & 0x8000000000000000LL) ? SKP_int64_MAX : (a64)-(b64)) );
if( res != a64 - b64 ) {
- // Check that we saturated to the correct extreme value
+ /* Check that we saturated to the correct extreme value */
SKP_assert( ( res == SKP_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( SKP_int64_MAX >> 3 ) ) ) ||
( res == SKP_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( SKP_int64_MIN >> 3 ) ) ) );
} else {
- // Saturation not necessary
+ /* Saturation not necessary */
SKP_assert( res == a64 - b64 );
}
return res;
@@ -161,10 +161,10 @@ static inline opus_int64 SKP_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
#undef SKP_MUL
static inline opus_int32 SKP_MUL(opus_int32 a32, opus_int32 b32){
opus_int32 ret;
- opus_int64 ret64; // Will easily show how many bits that are needed
+ opus_int64 ret64; /* Will easily show how many bits that are needed */
ret = a32 * b32;
ret64 = (opus_int64)a32 * (opus_int64)b32;
- SKP_assert((opus_int64)ret == ret64 ); //Check output overflow
+ SKP_assert((opus_int64)ret == ret64 ); /* Check output overflow */
return ret;
}
@@ -172,14 +172,14 @@ static inline opus_int32 SKP_MUL(opus_int32 a32, opus_int32 b32){
static inline opus_uint32 SKP_MUL_uint(opus_uint32 a32, opus_uint32 b32){
opus_uint32 ret;
ret = a32 * b32;
- SKP_assert((opus_uint64)ret == (opus_uint64)a32 * (opus_uint64)b32); //Check output overflow
+ SKP_assert((opus_uint64)ret == (opus_uint64)a32 * (opus_uint64)b32); /* Check output overflow */
return ret;
}
#undef SKP_MLA
static inline opus_int32 SKP_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32){
opus_int32 ret;
ret = a32 + b32 * c32;
- SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32); //Check output overflow
+ SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32); /* Check output overflow */
return ret;
}
@@ -187,7 +187,7 @@ static inline opus_int32 SKP_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32)
static inline opus_int32 SKP_MLA_uint(opus_uint32 a32, opus_uint32 b32, opus_uint32 c32){
opus_uint32 ret;
ret = a32 + b32 * c32;
- SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32); //Check output overflow
+ SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32); /* Check output overflow */
return ret;
}
@@ -233,7 +233,7 @@ static inline opus_int64 SKP_SMULL(opus_int64 a64, opus_int64 b64){
return ret64;
}
-// no checking needed for SKP_SMULBB
+/* no checking needed for SKP_SMULBB */
#undef SKP_SMLABB
static inline opus_int32 SKP_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
opus_int32 ret;
@@ -242,7 +242,7 @@ static inline opus_int32 SKP_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c
return ret;
}
-// no checking needed for SKP_SMULBT
+/* no checking needed for SKP_SMULBT */
#undef SKP_SMLABT
static inline opus_int32 SKP_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
opus_int32 ret;
@@ -251,7 +251,7 @@ static inline opus_int32 SKP_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c
return ret;
}
-// no checking needed for SKP_SMULTT
+/* no checking needed for SKP_SMULTT */
#undef SKP_SMLATT
static inline opus_int32 SKP_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
opus_int32 ret;
@@ -291,7 +291,7 @@ static inline opus_int32 SKP_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c
return ret;
}
-// multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)
+/* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode) */
#undef SKP_MLA_ovflw
#define SKP_MLA_ovflw(a32, b32, c32) ((a32) + ((b32) * (c32)))
#undef SKP_SMLABB_ovflw
@@ -305,11 +305,11 @@ static inline opus_int32 SKP_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c
#undef SKP_SMLAWT_ovflw
#define SKP_SMLAWT_ovflw(a32, b32, c32) ((a32) + (((b32) >> 16) * ((c32) >> 16)) + ((((b32) & 0x0000FFFF) * ((c32) >> 16)) >> 16))
-// no checking needed for SKP_SMULL
-// no checking needed for SKP_SMLAL
-// no checking needed for SKP_SMLALBB
-// no checking needed for SigProcFIX_CLZ16
-// no checking needed for SigProcFIX_CLZ32
+/* no checking needed for SKP_SMULL
+ no checking needed for SKP_SMLAL
+ no checking needed for SKP_SMLALBB
+ no checking needed for SigProcFIX_CLZ16
+ no checking needed for SigProcFIX_CLZ32*/
#undef SKP_DIV32
static inline opus_int32 SKP_DIV32(opus_int32 a32, opus_int32 b32){
@@ -325,14 +325,14 @@ static inline opus_int32 SKP_DIV32_16(opus_int32 a32, opus_int32 b32){
return a32 / b32;
}
-// no checking needed for SKP_SAT8
-// no checking needed for SKP_SAT16
-// no checking needed for SKP_SAT32
-// no checking needed for SKP_POS_SAT32
-// no checking needed for SKP_ADD_POS_SAT8
-// no checking needed for SKP_ADD_POS_SAT16
-// no checking needed for SKP_ADD_POS_SAT32
-// no checking needed for SKP_ADD_POS_SAT64
+/* no checking needed for SKP_SAT8
+ no checking needed for SKP_SAT16
+ no checking needed for SKP_SAT32
+ no checking needed for SKP_POS_SAT32
+ no checking needed for SKP_ADD_POS_SAT8
+ no checking needed for SKP_ADD_POS_SAT16
+ no checking needed for SKP_ADD_POS_SAT32
+ no checking needed for SKP_ADD_POS_SAT64 */
#undef SKP_LSHIFT8
static inline opus_int8 SKP_LSHIFT8(opus_int8 a, opus_int32 shift){
opus_int8 ret;
@@ -421,7 +421,7 @@ static inline opus_int32 SKP_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 s
SKP_assert(shift <= 31);
ret = a + (b << shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
- return ret; // shift >= 0
+ return ret; /* shift >= 0 */
}
#undef SKP_ADD_LSHIFT32
static inline opus_int32 SKP_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
@@ -430,7 +430,7 @@ static inline opus_int32 SKP_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32
SKP_assert(shift <= 31);
ret = a + (b << shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
- return ret; // shift >= 0
+ return ret; /* shift >= 0 */
}
#undef SKP_ADD_LSHIFT_uint
static inline opus_uint32 SKP_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
@@ -439,7 +439,7 @@ static inline opus_uint32 SKP_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus
SKP_assert(shift <= 32);
ret = a + (b << shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
- return ret; // shift >= 0
+ return ret; /* shift >= 0 */
}
#undef SKP_ADD_RSHIFT
static inline opus_int32 SKP_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
@@ -448,7 +448,7 @@ static inline opus_int32 SKP_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 s
SKP_assert(shift <= 31);
ret = a + (b >> shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
- return ret; // shift > 0
+ return ret; /* shift > 0 */
}
#undef SKP_ADD_RSHIFT32
static inline opus_int32 SKP_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
@@ -457,7 +457,7 @@ static inline opus_int32 SKP_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32
SKP_assert(shift <= 31);
ret = a + (b >> shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
- return ret; // shift > 0
+ return ret; /* shift > 0 */
}
#undef SKP_ADD_RSHIFT_uint
static inline opus_uint32 SKP_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
@@ -466,7 +466,7 @@ static inline opus_uint32 SKP_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus
SKP_assert(shift <= 32);
ret = a + (b >> shift);
SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
- return ret; // shift > 0
+ return ret; /* shift > 0 */
}
#undef SKP_SUB_LSHIFT32
static inline opus_int32 SKP_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
@@ -475,7 +475,7 @@ static inline opus_int32 SKP_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32
SKP_assert(shift <= 31);
ret = a - (b << shift);
SKP_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) << shift));
- return ret; // shift >= 0
+ return ret; /* shift >= 0 */
}
#undef SKP_SUB_RSHIFT32
static inline opus_int32 SKP_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
@@ -484,7 +484,7 @@ static inline opus_int32 SKP_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32
SKP_assert(shift <= 31);
ret = a - (b >> shift);
SKP_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) >> shift));
- return ret; // shift > 0
+ return ret; /* shift > 0 */
}
#undef SKP_RSHIFT_ROUND
@@ -506,17 +506,17 @@ static inline opus_int64 SKP_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
return ret;
}
-// SKP_abs is used on floats also, so doesn't work...
-//#undef SKP_abs
-//static inline opus_int32 SKP_abs(opus_int32 a){
-// SKP_assert(a != 0x80000000);
-// return (((a) > 0) ? (a) : -(a)); // Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN
-//}
+/* SKP_abs is used on floats also, so doesn't work... */
+/*#undef SKP_abs
+static inline opus_int32 SKP_abs(opus_int32 a){
+ SKP_assert(a != 0x80000000);
+ return (((a) > 0) ? (a) : -(a)); // Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN
+}*/
#undef SKP_abs_int64
static inline opus_int64 SKP_abs_int64(opus_int64 a){
SKP_assert(a != 0x8000000000000000);
- return (((a) > 0) ? (a) : -(a)); // Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN
+ return (((a) > 0) ? (a) : -(a)); /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN */
}
#undef SKP_abs_int32
@@ -549,11 +549,12 @@ static inline opus_int32 SKP_CHECK_FIT32( opus_int64 a ){
return( ret );
}
-// no checking for SKP_NSHIFT_MUL_32_32
-// no checking for SKP_NSHIFT_MUL_16_16
-// no checking needed for SKP_min
-// no checking needed for SKP_max
-// no checking needed for SKP_sign
+/* no checking for SKP_NSHIFT_MUL_32_32
+ no checking for SKP_NSHIFT_MUL_16_16
+ no checking needed for SKP_min
+ no checking needed for SKP_max
+ no checking needed for SKP_sign
+*/
#endif
#endif
diff --git a/silk/silk_burg_modified.c b/silk/silk_burg_modified.c
index 9db76e22..2d525b22 100644
--- a/silk/silk_burg_modified.c
+++ b/silk/silk_burg_modified.c
@@ -31,7 +31,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "silk_SigProc_FIX.h"
-#define MAX_FRAME_SIZE 384 // subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384
+#define MAX_FRAME_SIZE 384 /* subfr_length * nb_subfr = ( 0.005 * 16000 + 16 ) * 4 = 384*/
#define MAX_NB_SUBFR 4
#define QA 25
@@ -105,7 +105,7 @@ void silk_burg_modified(
SKP_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
/* Initialize */
- CAb[ 0 ] = CAf[ 0 ] = C0 + SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1; // Q(-rshifts)
+ CAb[ 0 ] = CAf[ 0 ] = C0 + SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1; /* Q(-rshifts)*/
for( n = 0; n < D; n++ ) {
/* Update first row of correlation matrix (without first element) */
@@ -115,70 +115,70 @@ void silk_burg_modified(
if( rshifts > -2 ) {
for( s = 0; s < nb_subfr; s++ ) {
x_ptr = x + s * subfr_length;
- x1 = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); // Q(16-rshifts)
- x2 = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); // Q(16-rshifts)
- tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); // Q(QA-16)
- tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); // Q(QA-16)
+ x1 = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ], 16 - rshifts ); /* Q(16-rshifts)*/
+ x2 = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts ); /* Q(16-rshifts)*/
+ tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ], QA - 16 ); /* Q(QA-16)*/
+ tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 ); /* Q(QA-16)*/
for( k = 0; k < n; k++ ) {
- C_first_row[ k ] = SKP_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); // Q( -rshifts )
- C_last_row[ k ] = SKP_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts )
+ C_first_row[ k ] = SKP_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts )*/
+ C_last_row[ k ] = SKP_SMLAWB( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
Atmp_QA = Af_QA[ k ];
- tmp1 = SKP_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); // Q(QA-16)
- tmp2 = SKP_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); // Q(QA-16)
+ tmp1 = SKP_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ] ); /* Q(QA-16)*/
+ tmp2 = SKP_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] ); /* Q(QA-16)*/
}
- tmp1 = SKP_LSHIFT32( -tmp1, 32 - QA - rshifts ); // Q(16-rshifts)
- tmp2 = SKP_LSHIFT32( -tmp2, 32 - QA - rshifts ); // Q(16-rshifts)
+ tmp1 = SKP_LSHIFT32( -tmp1, 32 - QA - rshifts ); /* Q(16-rshifts)*/
+ tmp2 = SKP_LSHIFT32( -tmp2, 32 - QA - rshifts ); /* Q(16-rshifts)*/
for( k = 0; k <= n; k++ ) {
- CAf[ k ] = SKP_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); // Q( -rshift )
- CAb[ k ] = SKP_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); // Q( -rshift )
+ CAf[ k ] = SKP_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ] ); /* Q( -rshift )*/
+ CAb[ k ] = SKP_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] ); /* Q( -rshift )*/
}
}
} else {
for( s = 0; s < nb_subfr; s++ ) {
x_ptr = x + s * subfr_length;
- x1 = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ], -rshifts ); // Q( -rshifts )
- x2 = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); // Q( -rshifts )
- tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); // Q17
- tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 ); // Q17
+ x1 = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ], -rshifts ); /* Q( -rshifts )*/
+ x2 = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts ); /* Q( -rshifts )*/
+ tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ], 17 ); /* Q17*/
+ tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 ); /* Q17*/
for( k = 0; k < n; k++ ) {
- C_first_row[ k ] = SKP_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); // Q( -rshifts )
- C_last_row[ k ] = SKP_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); // Q( -rshifts )
- Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); // Q17
- tmp1 = SKP_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); // Q17
- tmp2 = SKP_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); // Q17
+ C_first_row[ k ] = SKP_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ] ); /* Q( -rshifts )*/
+ C_last_row[ k ] = SKP_MLA( C_last_row[ k ], x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
+ Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 17 ); /* Q17*/
+ tmp1 = SKP_MLA( tmp1, x_ptr[ n - k - 1 ], Atmp1 ); /* Q17*/
+ tmp2 = SKP_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 ); /* Q17*/
}
- tmp1 = -tmp1; // Q17
- tmp2 = -tmp2; // Q17
+ tmp1 = -tmp1; /* Q17*/
+ tmp2 = -tmp2; /* Q17*/
for( k = 0; k <= n; k++ ) {
CAf[ k ] = SKP_SMLAWW( CAf[ k ], tmp1,
- SKP_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) ); // Q( -rshift )
+ SKP_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) ); /* Q( -rshift )*/
CAb[ k ] = SKP_SMLAWW( CAb[ k ], tmp2,
- SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );// Q( -rshift )
+ SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );/* Q( -rshift )*/
}
}
}
/* Calculate nominator and denominator for the next order reflection (parcor) coefficient */
- tmp1 = C_first_row[ n ]; // Q( -rshifts )
- tmp2 = C_last_row[ n ]; // Q( -rshifts )
- num = 0; // Q( -rshifts )
- nrg = SKP_ADD32( CAb[ 0 ], CAf[ 0 ] ); // Q( 1-rshifts )
+ tmp1 = C_first_row[ n ]; /* Q( -rshifts )*/
+ tmp2 = C_last_row[ n ]; /* Q( -rshifts )*/
+ num = 0; /* Q( -rshifts )*/
+ nrg = SKP_ADD32( CAb[ 0 ], CAf[ 0 ] ); /* Q( 1-rshifts )*/
for( k = 0; k < n; k++ ) {
Atmp_QA = Af_QA[ k ];
lz = silk_CLZ32( SKP_abs( Atmp_QA ) ) - 1;
lz = SKP_min( 32 - QA, lz );
- Atmp1 = SKP_LSHIFT32( Atmp_QA, lz ); // Q( QA + lz )
+ Atmp1 = SKP_LSHIFT32( Atmp_QA, lz ); /* Q( QA + lz )*/
- tmp1 = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts )
- tmp2 = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts )
- num = SKP_ADD_LSHIFT32( num, SKP_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); // Q( -rshifts )
+ tmp1 = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( C_last_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts )*/
+ tmp2 = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts )*/
+ num = SKP_ADD_LSHIFT32( num, SKP_SMMUL( CAb[ n - k ], Atmp1 ), 32 - QA - lz ); /* Q( -rshifts )*/
nrg = SKP_ADD_LSHIFT32( nrg, SKP_SMMUL( SKP_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ),
- Atmp1 ), 32 - QA - lz ); // Q( 1-rshifts )
+ Atmp1 ), 32 - QA - lz ); /* Q( 1-rshifts )*/
}
- CAf[ n + 1 ] = tmp1; // Q( -rshifts )
- CAb[ n + 1 ] = tmp2; // Q( -rshifts )
- num = SKP_ADD32( num, tmp2 ); // Q( -rshifts )
- num = SKP_LSHIFT32( -num, 1 ); // Q( 1-rshifts )
+ CAf[ n + 1 ] = tmp1; /* Q( -rshifts )*/
+ CAb[ n + 1 ] = tmp2; /* Q( -rshifts )*/
+ num = SKP_ADD32( num, tmp2 ); /* Q( -rshifts )*/
+ num = SKP_LSHIFT32( -num, 1 ); /* Q( 1-rshifts )*/
/* Calculate the next order reflection (parcor) coefficient */
if( SKP_abs( num ) < nrg ) {
@@ -192,31 +192,31 @@ void silk_burg_modified(
/* Update the AR coefficients */
for( k = 0; k < (n + 1) >> 1; k++ ) {
- tmp1 = Af_QA[ k ]; // QA
- tmp2 = Af_QA[ n - k - 1 ]; // QA
- Af_QA[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); // QA
- Af_QA[ n - k - 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); // QA
+ tmp1 = Af_QA[ k ]; /* QA*/
+ tmp2 = Af_QA[ n - k - 1 ]; /* QA*/
+ Af_QA[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); /* QA*/
+ Af_QA[ n - k - 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); /* QA*/
}
- Af_QA[ n ] = SKP_RSHIFT32( rc_Q31, 31 - QA ); // QA
+ Af_QA[ n ] = SKP_RSHIFT32( rc_Q31, 31 - QA ); /* QA*/
/* Update C * Af and C * Ab */
for( k = 0; k <= n + 1; k++ ) {
- tmp1 = CAf[ k ]; // Q( -rshifts )
- tmp2 = CAb[ n - k + 1 ]; // Q( -rshifts )
- CAf[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); // Q( -rshifts )
- CAb[ n - k + 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); // Q( -rshifts )
+ tmp1 = CAf[ k ]; /* Q( -rshifts )*/
+ tmp2 = CAb[ n - k + 1 ]; /* Q( -rshifts )*/
+ CAf[ k ] = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 ); /* Q( -rshifts )*/
+ CAb[ n - k + 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 ); /* Q( -rshifts )*/
}
}
/* Return residual energy */
- nrg = CAf[ 0 ]; // Q( -rshifts )
- tmp1 = 1 << 16; // Q16
+ nrg = CAf[ 0 ]; /* Q( -rshifts )*/
+ tmp1 = 1 << 16; /* Q16*/
for( k = 0; k < D; k++ ) {
- Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); // Q16
- nrg = SKP_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); // Q( -rshifts )
- tmp1 = SKP_SMLAWW( tmp1, Atmp1, Atmp1 ); // Q16
+ Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 16 ); /* Q16*/
+ nrg = SKP_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 ); /* Q( -rshifts )*/
+ tmp1 = SKP_SMLAWW( tmp1, Atmp1, Atmp1 ); /* Q16*/
A_Q16[ k ] = -Atmp1;
}
- *res_nrg = SKP_SMLAWW( nrg, SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 ); // Q( -rshifts )
+ *res_nrg = SKP_SMLAWW( nrg, SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 ); /* Q( -rshifts )*/
*res_nrg_Q = -rshifts;
}
diff --git a/silk/silk_debug.h b/silk/silk_debug.h
index 654ab7c4..835b55ed 100644
--- a/silk/silk_debug.h
+++ b/silk/silk_debug.h
@@ -53,7 +53,7 @@ unsigned long GetHighResolutionTime(void); /* O: time in usec*/
/* overrule the above */
#if 0
- // #define NO_ASSERTS
+ /* #define NO_ASSERTS*/
#undef SILK_DEBUG
#define SILK_DEBUG 1
#endif
@@ -68,8 +68,8 @@ unsigned long GetHighResolutionTime(void); /* O: time in usec*/
#if (defined(_WIN32) || defined(_WINCE))
#include <windows.h> /* timer */
-#pragma warning( disable : 4996 ) // stop bitching about strcpy in TIC()
-#else // Linux or Mac
+#pragma warning( disable : 4996 ) /* stop bitching about strcpy in TIC()*/
+#else /* Linux or Mac*/
#include <sys/time.h>
#endif
diff --git a/silk/silk_macros.h b/silk/silk_macros.h
index adbb02af..a507de14 100644
--- a/silk/silk_macros.h
+++ b/silk/silk_macros.h
@@ -28,39 +28,39 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _SILK_API_C_H_
#define _SILK_API_C_H_
-// This is an inline header file for general platform.
+/* This is an inline header file for general platform. */
-// (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int
+/* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */
#define SKP_SMULWB(a32, b32) ((((a32) >> 16) * (opus_int32)((opus_int16)(b32))) + ((((a32) & 0x0000FFFF) * (opus_int32)((opus_int16)(b32))) >> 16))
-// a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit int
+/* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit int */
#define SKP_SMLAWB(a32, b32, c32) ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)))
-// (a32 * (b32 >> 16)) >> 16
+/* (a32 * (b32 >> 16)) >> 16 */
#define SKP_SMULWT(a32, b32) (((a32) >> 16) * ((b32) >> 16) + ((((a32) & 0x0000FFFF) * ((b32) >> 16)) >> 16))
-// a32 + (b32 * (c32 >> 16)) >> 16
+/* a32 + (b32 * (c32 >> 16)) >> 16 */
#define SKP_SMLAWT(a32, b32, c32) ((a32) + (((b32) >> 16) * ((c32) >> 16)) + ((((b32) & 0x0000FFFF) * ((c32) >> 16)) >> 16))
-// (opus_int32)((opus_int16)(a3))) * (opus_int32)((opus_int16)(b32)) output have to be 32bit int
+/* (opus_int32)((opus_int16)(a3))) * (opus_int32)((opus_int16)(b32)) output have to be 32bit int */
#define SKP_SMULBB(a32, b32) ((opus_int32)((opus_int16)(a32)) * (opus_int32)((opus_int16)(b32)))
-// a32 + (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)) output have to be 32bit int
+/* a32 + (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)) output have to be 32bit int */
#define SKP_SMLABB(a32, b32, c32) ((a32) + ((opus_int32)((opus_int16)(b32))) * (opus_int32)((opus_int16)(c32)))
-// (opus_int32)((opus_int16)(a32)) * (b32 >> 16)
+/* (opus_int32)((opus_int16)(a32)) * (b32 >> 16) */
#define SKP_SMULBT(a32, b32) ((opus_int32)((opus_int16)(a32)) * ((b32) >> 16))
-// a32 + (opus_int32)((opus_int16)(b32)) * (c32 >> 16)
+/* a32 + (opus_int32)((opus_int16)(b32)) * (c32 >> 16) */
#define SKP_SMLABT(a32, b32, c32) ((a32) + ((opus_int32)((opus_int16)(b32))) * ((c32) >> 16))
-// a64 + (b32 * c32)
+/* a64 + (b32 * c32) */
#define SKP_SMLAL(a64, b32, c32) (SKP_ADD64((a64), ((opus_int64)(b32) * (opus_int64)(c32))))
-// (a32 * b32) >> 16
+/* (a32 * b32) >> 16 */
#define SKP_SMULWW(a32, b32) SKP_MLA(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16))
-// a32 + ((b32 * c32) >> 16)
+/* a32 + ((b32 * c32) >> 16) */
#define SKP_SMLAWW(a32, b32, c32) SKP_MLA(SKP_SMLAWB((a32), (b32), (c32)), (b32), SKP_RSHIFT_ROUND((c32), 16))
/* add/subtract with output saturated */
@@ -128,5 +128,5 @@ static inline opus_int32 silk_CLZ32(opus_int32 in32)
#endif
#define matrix_c_adr(Matrix_base_adr, row, column, M) (Matrix_base_adr + ((row)+(M)*(column)))
-#endif //_SILK_API_C_H_
+#endif /* _SILK_API_C_H_ */
diff --git a/silk/silk_typedef.h b/silk/silk_typedef.h
index 49d6bae3..6183b9c8 100644
--- a/silk/silk_typedef.h
+++ b/silk/silk_typedef.h
@@ -55,20 +55,20 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# define SKP_STR_CASEINSENSITIVE_COMPARE(x, y) strcasecmp(x, y)
#endif
-#define SKP_int64_MAX ((opus_int64)0x7FFFFFFFFFFFFFFFLL) // 2^63 - 1
-#define SKP_int64_MIN ((opus_int64)0x8000000000000000LL) // -2^63
-#define SKP_int32_MAX 0x7FFFFFFF // 2^31 - 1 = 2147483647
-#define SKP_int32_MIN ((opus_int32)0x80000000) // -2^31 = -2147483648
-#define SKP_int16_MAX 0x7FFF // 2^15 - 1 = 32767
-#define SKP_int16_MIN ((opus_int16)0x8000) // -2^15 = -32768
-#define SKP_int8_MAX 0x7F // 2^7 - 1 = 127
-#define SKP_int8_MIN ((opus_int8)0x80) // -2^7 = -128
+#define SKP_int64_MAX ((opus_int64)0x7FFFFFFFFFFFFFFFLL) /* 2^63 - 1 */
+#define SKP_int64_MIN ((opus_int64)0x8000000000000000LL) /* -2^63 */
+#define SKP_int32_MAX 0x7FFFFFFF /* 2^31 - 1 = 2147483647 */
+#define SKP_int32_MIN ((opus_int32)0x80000000) /* -2^31 = -2147483648 */
+#define SKP_int16_MAX 0x7FFF /* 2^15 - 1 = 32767 */
+#define SKP_int16_MIN ((opus_int16)0x8000) /* -2^15 = -32768 */
+#define SKP_int8_MAX 0x7F /* 2^7 - 1 = 127 */
+#define SKP_int8_MIN ((opus_int8)0x80) /* -2^7 = -128 */
-#define SKP_uint32_MAX 0xFFFFFFFF // 2^32 - 1 = 4294967295
+#define SKP_uint32_MAX 0xFFFFFFFF /* 2^32 - 1 = 4294967295 */
#define SKP_uint32_MIN 0x00000000
-#define SKP_uint16_MAX 0xFFFF // 2^16 - 1 = 65535
+#define SKP_uint16_MAX 0xFFFF /* 2^16 - 1 = 65535 */
#define SKP_uint16_MIN 0x0000
-#define SKP_uint8_MAX 0xFF // 2^8 - 1 = 255
+#define SKP_uint8_MAX 0xFF /* 2^8 - 1 = 255 */
#define SKP_uint8_MIN 0x00
#define SKP_TRUE 1