Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/FastLED/FastLED.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEugene8388608 <zhenya.pavlenko.2004@mail.ru>2022-03-28 17:02:14 +0300
committerGitHub <noreply@github.com>2022-03-28 17:02:14 +0300
commit54bcb56f129ffb2e2b6a98222826986ec48575bd (patch)
tree782448576ca9483d960ab5f10292982cbeb51fb8
parent4d73cddfe4bd2b370ee882b6f68769bf7f8309f4 (diff)
Add files via upload
-rw-r--r--src/lib8tion.h10
-rw-r--r--src/lib8tion/math8.h96
2 files changed, 79 insertions, 27 deletions
diff --git a/src/lib8tion.h b/src/lib8tion.h
index 80e27100..8efc0dc8 100644
--- a/src/lib8tion.h
+++ b/src/lib8tion.h
@@ -33,7 +33,7 @@ FASTLED_NAMESPACE_BEGIN
qsub8( i, j) == MAX( (i - j), 0 )
- Saturating signed 8-bit ("7-bit") add.
- qadd7( i, j) == MIN( (i + j), 0x7F)
+ qadd7( i, j) == MAX( MIN( (i + j), 0x7F), -0x80)
- Scaling (down) of unsigned 8- and 16- bit values.
@@ -99,7 +99,7 @@ FASTLED_NAMESPACE_BEGIN
- Fast 8-bit "easing in/out" function.
- ease8InOutCubic(x) == 3(x^i) - 2(x^3)
+ ease8InOutCubic(x) == 3(x^2) - 2(x^3)
ease8InOutApprox(x) ==
faster, rougher, approximation of cubic easing
ease8InOutQuad(x) == quadratic (vs cubic) easing
@@ -732,9 +732,9 @@ LIB8STATIC uint8_t ease8InOutApprox( fract8 i)
"Ldone_%=: \n\t"
- : [i] "+&a" (i)
+ : [i] "+a" (i)
:
- : "r0", "r1"
+ : "r0"
);
return i;
}
@@ -744,7 +744,7 @@ LIB8STATIC uint8_t ease8InOutApprox( fract8 i)
-/// triwave8: triangle (sawtooth) wave generator. Useful for
+/// triwave8: triangle wave generator. Useful for
/// turning a one-byte ever-increasing value into a
/// one-byte value that oscillates up and down.
///
diff --git a/src/lib8tion/math8.h b/src/lib8tion/math8.h
index f95697bd..fad10d29 100644
--- a/src/lib8tion/math8.h
+++ b/src/lib8tion/math8.h
@@ -50,15 +50,16 @@ LIB8STATIC_ALWAYS_INLINE uint8_t qadd8( uint8_t i, uint8_t j)
#endif
}
-/// Add one byte to another, saturating at 0x7F
+/// Add one byte to another, saturating at 0x7F and -0x80
/// @param i - first byte to add
/// @param j - second byte to add
-/// @returns the sum of i & j, capped at 0xFF
+/// @returns the sum of i & j, capped at 0xFF and -0x80
LIB8STATIC_ALWAYS_INLINE int8_t qadd7( int8_t i, int8_t j)
{
#if QADD7_C == 1
int16_t t = i + j;
if( t > 127) t = 127;
+ if( t < -128) t = -128;
return t;
#elif QADD7_AVRASM == 1
asm volatile(
@@ -66,11 +67,18 @@ LIB8STATIC_ALWAYS_INLINE int8_t qadd7( int8_t i, int8_t j)
"add %0, %1 \n\t"
/* Now test the V flag.
- If V is clear, we branch around a load of 0x7F into i.
- If V is set, we go ahead and load 0x7F into i.
+ If V is clear, we are done.
+ If V is set, we assume that result is positive and load 0x7F into i.
*/
"brvc L_%= \n\t"
"ldi %0, 0x7F \n\t"
+
+ /* Now test the S flag.
+ If S is clear, our assumption is right.
+ If S is set, we load 0x80 into i.
+ */
+ "brge L_%= \n\t"
+ "ldi %0, 0x80 \n\t"
"L_%=: "
: "+a" (i)
: "a" (j)
@@ -129,7 +137,7 @@ LIB8STATIC_ALWAYS_INLINE uint8_t add8( uint8_t i, uint8_t j)
#endif
}
-/// add one byte to another, with one byte result
+/// add one byte to two bytes, with two bytes result
LIB8STATIC_ALWAYS_INLINE uint16_t add8to16( uint8_t i, uint16_t j)
{
#if ADD8_C == 1
@@ -188,6 +196,30 @@ LIB8STATIC_ALWAYS_INLINE uint8_t avg8( uint8_t i, uint8_t j)
}
/// Calculate an integer average of two unsigned
+/// 8-bit integer values (uint8_t).
+/// Fractional results are rounded up, e.g. avg8(20,41) = 31
+LIB8STATIC_ALWAYS_INLINE uint8_t avg8r( uint8_t i, uint8_t j)
+{
+#if AVG8_C == 1
+ return (i + j + 1) >> 1;
+#elif AVG8_AVRASM == 1
+ asm volatile(
+ /* First, add j to i, 9th bit overflows into C flag */
+ "add %0, %1 \n\t"
+ /* Divide by two, moving C flag into high 8th bit, old 1st bit now in C */
+ "ror %0 \n\t"
+ /* Add C flag */
+ "adc %0, __zero_reg__\n\t"
+ : "+a" (i)
+ : "a" (j)
+ );
+ return i;
+#else
+#error "No implementation for avg8r available."
+#endif
+}
+
+/// Calculate an integer average of two unsigned
/// 16-bit integer values (uint16_t).
/// Fractional results are rounded down, e.g. avg16(20,41) = 30
LIB8STATIC_ALWAYS_INLINE uint16_t avg16( uint16_t i, uint16_t j)
@@ -213,15 +245,44 @@ LIB8STATIC_ALWAYS_INLINE uint16_t avg16( uint16_t i, uint16_t j)
#endif
}
+/// Calculate an integer average of two unsigned
+/// 16-bit integer values (uint16_t).
+/// Fractional results are rounded up, e.g. avg16(20,41) = 31
+LIB8STATIC_ALWAYS_INLINE uint16_t avg16r( uint16_t i, uint16_t j)
+{
+#if AVG16_C == 1
+ return (uint32_t)((uint32_t)(i) + (uint32_t)(j) + 1) >> 1;
+#elif AVG16_AVRASM == 1
+ asm volatile(
+ /* First, add jLo (heh) to iLo, 9th bit overflows into C flag */
+ "add %A[i], %A[j] \n\t"
+ /* Now, add C + jHi to iHi, 17th bit overflows into C flag */
+ "adc %B[i], %B[j] \n\t"
+ /* Divide iHi by two, moving C flag into high 16th bit, old 9th bit now in C */
+ "ror %B[i] \n\t"
+ /* Divide iLo by two, moving C flag into high 8th bit, old 1st bit now in C */
+ "ror %A[i] \n\t"
+ /* Add C flag */
+ "adc %A[i], __zero_reg__\n\t"
+ "adc %B[i], __zero_reg__\n\t"
+ : [i] "+a" (i)
+ : [j] "a" (j)
+ );
+ return i;
+#else
+#error "No implementation for avg16r available."
+#endif
+}
+
/// Calculate an integer average of two signed 7-bit
/// integers (int8_t)
/// If the first argument is even, result is rounded down.
-/// If the first argument is odd, result is result up.
+/// If the first argument is odd, result is rounded up.
LIB8STATIC_ALWAYS_INLINE int8_t avg7( int8_t i, int8_t j)
{
#if AVG7_C == 1
- return ((i + j) >> 1) + (i & 0x1);
+ return (i>>1) + (j>>1) + (i & 0x1);
#elif AVG7_AVRASM == 1
asm volatile(
"asr %1 \n\t"
@@ -239,11 +300,11 @@ LIB8STATIC_ALWAYS_INLINE int8_t avg7( int8_t i, int8_t j)
/// Calculate an integer average of two signed 15-bit
/// integers (int16_t)
/// If the first argument is even, result is rounded down.
-/// If the first argument is odd, result is result up.
+/// If the first argument is odd, result is rounded up.
LIB8STATIC_ALWAYS_INLINE int16_t avg15( int16_t i, int16_t j)
{
#if AVG15_C == 1
- return ((int32_t)((int32_t)(i) + (int32_t)(j)) >> 1) + (i & 0x1);
+ return (i>>1) + (j>>1) + (i & 0x1);
#elif AVG15_AVRASM == 1
asm volatile(
/* first divide j by 2, throwing away lowest bit */
@@ -321,13 +382,6 @@ LIB8STATIC uint8_t addmod8( uint8_t a, uint8_t b, uint8_t m)
/// Subtract two numbers, and calculate the modulo
/// of the difference and a third number, M.
/// In other words, it returns (A-B) % M.
-/// It is designed as a compact mechanism for
-/// incrementing a 'mode' switch and wrapping
-/// around back to 'mode 0' when the switch
-/// goes past the end of the available range.
-/// e.g. if you have seven modes, this switches
-/// to the next one and wraps around if needed:
-/// mode = addmod8( mode, 1, 7);
///LIB8STATIC_ALWAYS_INLINESee 'mod8' for notes on performance.
LIB8STATIC uint8_t submod8( uint8_t a, uint8_t b, uint8_t m)
{
@@ -383,16 +437,14 @@ LIB8STATIC_ALWAYS_INLINE uint8_t qmul8( uint8_t i, uint8_t j)
asm volatile(
/* Multiply 8-bit i * 8-bit j, giving 16-bit r1,r0 */
" mul %0, %1 \n\t"
+ /* Extract the LOW 8-bits (r0) */
+ " mov %0, r0 \n\t"
/* If high byte of result is zero, all is well. */
" tst r1 \n\t"
" breq Lnospill_%= \n\t"
- /* If high byte of result > 0, saturate low byte to 0xFF */
- " ldi %0,0xFF \n\t"
- " rjmp Ldone_%= \n\t"
+ /* If high byte of result > 0, saturate to 0xFF */
+ " ldi %0, 0xFF \n\t"
"Lnospill_%=: \n\t"
- /* Extract the LOW 8-bits (r0) */
- " mov %0, r0 \n\t"
- "Ldone_%=: \n\t"
/* Restore r1 to "0"; it's expected to always be that */
" clr __zero_reg__ \n\t"
: "+a" (i)