Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/cr-marcstevens/sha1collisiondetection.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDan Shumow <shumow@gmail.com>2017-03-10 03:07:32 +0300
committerDan Shumow <shumow@gmail.com>2017-03-10 03:07:32 +0300
commit55d1db0980501e582f6cd103a04f493995b1df78 (patch)
tree88574ec7cd6b107f41c34f396209599d8ce3799f /lib
parentad744c8b7a841d2afcb2d4c04f8952d9005501be (diff)
parent9c8e73cadb35776d3310e3f8ceda7183fa75a39f (diff)
Merge remote-tracking branch 'origin/master' into feature/performance
Diffstat (limited to 'lib')
-rw-r--r--lib/sha1.c66
-rw-r--r--lib/sha1.h54
-rw-r--r--lib/sha1_simd.cinc949
-rw-r--r--lib/sha1_simd_avx256.c28
-rw-r--r--lib/sha1_simd_mmx64.c27
-rw-r--r--lib/sha1_simd_neon128.c27
-rw-r--r--lib/sha1_simd_sse128.c27
-rw-r--r--lib/simd_avx256.h89
-rw-r--r--lib/simd_mmx64.h86
-rw-r--r--lib/simd_neon128.h89
-rw-r--r--lib/simd_sse128.h89
-rw-r--r--lib/ubc_check.c2
-rw-r--r--lib/ubc_check.h4
-rw-r--r--lib/ubc_check_simd.cinc204
-rw-r--r--lib/ubc_check_simd_avx256.c21
-rw-r--r--lib/ubc_check_simd_mmx64.c21
-rw-r--r--lib/ubc_check_simd_neon128.c21
-rw-r--r--lib/ubc_check_simd_sse128.c21
-rw-r--r--lib/ubc_check_verify.c494
19 files changed, 76 insertions, 2243 deletions
diff --git a/lib/sha1.c b/lib/sha1.c
index a1b2423..66b03f3 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -8,6 +8,7 @@
#include <string.h>
#include <memory.h>
#include <stdio.h>
+#include <stdlib.h>
#include "sha1.h"
#include "ubc_check.h"
@@ -69,6 +70,7 @@
#define SHA1_STORE_STATE(i) states[i][0] = a; states[i][1] = b; states[i][2] = c; states[i][3] = d; states[i][4] = e;
+/*
void sha1_compression(uint32_t ihv[5], const uint32_t m[16])
{
uint32_t W[80];
@@ -167,10 +169,9 @@ void sha1_compression(uint32_t ihv[5], const uint32_t m[16])
ihv[0] += a; ihv[1] += b; ihv[2] += c; ihv[3] += d; ihv[4] += e;
}
+*/
-
-
-void sha1_compression_W(uint32_t ihv[5], const uint32_t W[80])
+static void sha1_compression_W(uint32_t ihv[5], const uint32_t W[80])
{
uint32_t a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
@@ -684,7 +685,7 @@ void sha1_compression_states(uint32_t ihv[5], const uint32_t m[16], uint32_t W[8
#define SHA1_RECOMPRESS(t) \
-void sha1recompress_fast_ ## t (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5]) \
+static void sha1recompress_fast_ ## t (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5]) \
{ \
uint32_t a = state[0], b = state[1], c = state[2], d = state[3], e = state[4]; \
if (t > 79) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 79); \
@@ -852,6 +853,7 @@ void sha1recompress_fast_ ## t (uint32_t ihvin[5], uint32_t ihvout[5], const uin
ihvout[0] = ihvin[0] + a; ihvout[1] = ihvin[1] + b; ihvout[2] = ihvin[2] + c; ihvout[3] = ihvin[3] + d; ihvout[4] = ihvin[4] + e; \
}
+/*
SHA1_RECOMPRESS(0)
SHA1_RECOMPRESS(1)
SHA1_RECOMPRESS(2)
@@ -915,7 +917,9 @@ SHA1_RECOMPRESS(54)
SHA1_RECOMPRESS(55)
SHA1_RECOMPRESS(56)
SHA1_RECOMPRESS(57)
+*/
SHA1_RECOMPRESS(58)
+/*
SHA1_RECOMPRESS(59)
SHA1_RECOMPRESS(60)
@@ -923,7 +927,9 @@ SHA1_RECOMPRESS(61)
SHA1_RECOMPRESS(62)
SHA1_RECOMPRESS(63)
SHA1_RECOMPRESS(64)
+*/
SHA1_RECOMPRESS(65)
+/*
SHA1_RECOMPRESS(66)
SHA1_RECOMPRESS(67)
SHA1_RECOMPRESS(68)
@@ -939,7 +945,9 @@ SHA1_RECOMPRESS(76)
SHA1_RECOMPRESS(77)
SHA1_RECOMPRESS(78)
SHA1_RECOMPRESS(79)
+*/
+/*
sha1_recompression_type sha1_recompression_step[80] =
{
sha1recompress_fast_0, sha1recompress_fast_1, sha1recompress_fast_2, sha1recompress_fast_3, sha1recompress_fast_4, sha1recompress_fast_5, sha1recompress_fast_6, sha1recompress_fast_7, sha1recompress_fast_8, sha1recompress_fast_9,
@@ -951,6 +959,10 @@ sha1_recompression_type sha1_recompression_step[80] =
sha1recompress_fast_60, sha1recompress_fast_61, sha1recompress_fast_62, sha1recompress_fast_63, sha1recompress_fast_64, sha1recompress_fast_65, sha1recompress_fast_66, sha1recompress_fast_67, sha1recompress_fast_68, sha1recompress_fast_69,
sha1recompress_fast_70, sha1recompress_fast_71, sha1recompress_fast_72, sha1recompress_fast_73, sha1recompress_fast_74, sha1recompress_fast_75, sha1recompress_fast_76, sha1recompress_fast_77, sha1recompress_fast_78, sha1recompress_fast_79,
};
+*/
+
+
+
void sha1_process(SHA1_CTX* ctx, const uint32_t block[16])
{
@@ -983,28 +995,34 @@ void sha1_process(SHA1_CTX* ctx, const uint32_t block[16])
{
for (i = 0; sha1_dvs[i].dvType != 0; ++i)
{
- if ((0 == ctx->ubc_check) || (((uint32_t)(1) << sha1_dvs[i].maskb) & ubc_dv_mask[sha1_dvs[i].maski]))
- {
- for (j = 0; j < 80; ++j)
- ctx->m2[j] = ctx->m1[j] ^ sha1_dvs[i].dm[j];
- (sha1_recompression_step[sha1_dvs[i].testt])(ctx->ihv2, ihvtmp, ctx->m2, ctx->states[sha1_dvs[i].testt]);
- // to verify SHA-1 collision detection code with collisions for reduced-step SHA-1
- if ((ihvtmp[0] == ctx->ihv[0] && ihvtmp[1] == ctx->ihv[1] && ihvtmp[2] == ctx->ihv[2] && ihvtmp[3] == ctx->ihv[3] && ihvtmp[4] == ctx->ihv[4])
- || (ctx->reduced_round_coll && ctx->ihv1[0] == ctx->ihv2[0] && ctx->ihv1[1] == ctx->ihv2[1] && ctx->ihv1[2] == ctx->ihv2[2] && ctx->ihv1[3] == ctx->ihv2[3] && ctx->ihv1[4] == ctx->ihv2[4]))
- {
- ctx->found_collision = 1;
+ for (j = 0; j < 80; ++j)
+ ctx->m2[j] = ctx->m1[j] ^ sha1_dvs[i].dm[j];
- if (ctx->callback != NULL)
- ctx->callback(ctx->total - 64, ctx->ihv1, ctx->ihv2, ctx->m1, ctx->m2);
-
- if (ctx->safe_hash)
- {
- sha1_compression_W(ctx->ihv, ctx->m1);
- sha1_compression_W(ctx->ihv, ctx->m1);
- }
+ /* (sha1_recompression_step[sha1_dvs[i].testt])(ctx->ihv2, ihvtmp, ctx->m2, ctx->states[sha1_dvs[i].testt]); */
+ switch (sha1_dvs[i].testt)
+ {
+ case 58:
+ sha1recompress_fast_58(ctx->ihv2, ihvtmp, ctx->m2, ctx->states[sha1_dvs[i].testt]);
+ break;
+ case 65:
+ sha1recompress_fast_65(ctx->ihv2, ihvtmp, ctx->m2, ctx->states[sha1_dvs[i].testt]);
+ break;
+ default:
+ abort();
+ }
+ /* to verify SHA-1 collision detection code with collisions for reduced-step SHA-1 */
+ if ((0 == ((ihvtmp[0] ^ ctx->ihv[0]) | (ihvtmp[1] ^ ctx->ihv[1]) | (ihvtmp[2] ^ ctx->ihv[2]) | (ihvtmp[3] ^ ctx->ihv[3]) | (ihvtmp[4] ^ ctx->ihv[4])))
+ || (ctx->reduced_round_coll && 0==((ctx->ihv1[0] ^ ctx->ihv2[0]) | (ctx->ihv1[1] ^ ctx->ihv2[1]) | (ctx->ihv1[2] ^ ctx->ihv2[2]) | (ctx->ihv1[3] ^ ctx->ihv2[3]) | (ctx->ihv1[4] ^ ctx->ihv2[4]))))
+ {
+ ctx->found_collision = 1;
- break;
+ if (ctx->safe_hash)
+ {
+ sha1_compression_W(ctx->ihv, ctx->m1);
+ sha1_compression_W(ctx->ihv, ctx->m1);
}
+
+ break;
}
}
}
@@ -1065,7 +1083,7 @@ void SHA1DCSetCallback(SHA1_CTX* ctx, collision_block_callback callback)
ctx->callback = callback;
}
-void SHA1DCUpdate(SHA1_CTX* ctx, const char* buf, unsigned len)
+void SHA1DCUpdate(SHA1_CTX* ctx, const char* buf, size_t len)
{
unsigned left, fill;
if (len == 0)
diff --git a/lib/sha1.h b/lib/sha1.h
index 70dff7f..04ccfb5 100644
--- a/lib/sha1.h
+++ b/lib/sha1.h
@@ -11,17 +11,18 @@ extern "C" {
#include <stdint.h>
-// uses SHA-1 message expansion to expand the first 16 words of W[] to 80 words
-void sha1_message_expansion(uint32_t W[80]);
+/* uses SHA-1 message expansion to expand the first 16 words of W[] to 80 words */
+/* void sha1_message_expansion(uint32_t W[80]); */
-// sha-1 compression function; first version takes a message block pre-parsed as 16 32-bit integers, second version takes an already expanded message)
-void sha1_compression(uint32_t ihv[5], const uint32_t m[16]);
-void sha1_compression_W(uint32_t ihv[5], const uint32_t W[80]);
+/* sha-1 compression function; first version takes a message block pre-parsed as 16 32-bit integers, second version takes an already expanded message) */
+/* void sha1_compression(uint32_t ihv[5], const uint32_t m[16]);
+void sha1_compression_W(uint32_t ihv[5], const uint32_t W[80]); */
-// same as sha1_compression_W, but additionally store intermediate states
-// only stores states ii (the state between step ii-1 and step ii) when DOSTORESTATEii is defined in ubc_check.h
+/* same as sha1_compression_W, but additionally store intermediate states */
+/* only stores states ii (the state between step ii-1 and step ii) when DOSTORESTATEii is defined in ubc_check.h */
void sha1_compression_states(uint32_t ihv[5], const uint32_t m[16], uint32_t W[80], uint32_t states[80][5]);
+/*
// function type for sha1_recompression_step_T (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5])
// where 0 <= T < 80
// me2 is an expanded message (the expansion of an original message block XOR'ed with a disturbance vector's message block difference)
@@ -29,16 +30,17 @@ void sha1_compression_states(uint32_t ihv[5], const uint32_t m[16], uint32_t W[8
// the function will return:
// ihvin: the reconstructed input chaining value
// ihvout: the reconstructed output chaining value
+*/
typedef void(*sha1_recompression_type)(uint32_t*, uint32_t*, const uint32_t*, const uint32_t*);
-// table of sha1_recompression_step_0, ... , sha1_recompression_step_79
-extern sha1_recompression_type sha1_recompression_step[80];
+/* table of sha1_recompression_step_0, ... , sha1_recompression_step_79 */
+/* extern sha1_recompression_type sha1_recompression_step[80];*/
-// a callback function type that can be set to be called when a collision block has been found:
-// void collision_block_callback(uint64_t byteoffset, const uint32_t ihvin1[5], const uint32_t ihvin2[5], const uint32_t m1[80], const uint32_t m2[80])
+/* a callback function type that can be set to be called when a collision block has been found: */
+/* void collision_block_callback(uint64_t byteoffset, const uint32_t ihvin1[5], const uint32_t ihvin2[5], const uint32_t m1[80], const uint32_t m2[80]) */
typedef void(*collision_block_callback)(uint64_t, const uint32_t*, const uint32_t*, const uint32_t*, const uint32_t*);
-// the SHA-1 context
+/* the SHA-1 context */
typedef struct {
uint64_t total;
uint32_t ihv[5];
@@ -57,9 +59,10 @@ typedef struct {
uint32_t states[80][5];
} SHA1_CTX;
-// initialize SHA-1 context
+/* initialize SHA-1 context */
void SHA1DCInit(SHA1_CTX*);
+/*
// function to enable safe SHA-1 hashing:
// collision attacks are thwarted by hashing a detected near-collision block 3 times
// think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
@@ -71,29 +74,30 @@ void SHA1DCInit(SHA1_CTX*);
// but it will result in a different SHA-1 hash for messages where a collision attack was detected
// this will automatically invalidate SHA-1 based digital signature forgeries
// enabled by default
+*/
void SHA1DCSetSafeHash(SHA1_CTX*, int);
-// function to disable or enable the use of Unavoidable Bitconditions (provides a significant speed up)
-// enabled by default
+/* function to disable or enable the use of Unavoidable Bitconditions (provides a significant speed up) */
+/* enabled by default */
void SHA1DCSetUseUBC(SHA1_CTX*, int);
-// function to disable or enable the use of Collision Detection
-// enabled by default
+/* function to disable or enable the use of Collision Detection */
+/* enabled by default */
void SHA1DCSetUseDetectColl(SHA1_CTX* ctx, int detect_coll);
-// function to disable or enable the detection of reduced-round SHA-1 collisions
-// disabled by default
+/* function to disable or enable the detection of reduced-round SHA-1 collisions */
+/* disabled by default */
void SHA1DCSetDetectReducedRoundCollision(SHA1_CTX*, int);
-// function to set a callback function, pass NULL to disable
-// by default no callback set
+/* function to set a callback function, pass NULL to disable */
+/* by default no callback set */
void SHA1DCSetCallback(SHA1_CTX*, collision_block_callback);
-// update SHA-1 context with buffer contents
-void SHA1DCUpdate(SHA1_CTX*, const char*, unsigned);
+/* update SHA-1 context with buffer contents */
+void SHA1DCUpdate(SHA1_CTX*, const char*, size_t);
-// obtain SHA-1 hash from SHA-1 context
-// returns: 0 = no collision detected, otherwise = collision found => warn user for active attack
+/* obtain SHA-1 hash from SHA-1 context */
+/* returns: 0 = no collision detected, otherwise = collision found => warn user for active attack */
int SHA1DCFinal(unsigned char[20], SHA1_CTX*);
#if defined(__cplusplus)
diff --git a/lib/sha1_simd.cinc b/lib/sha1_simd.cinc
deleted file mode 100644
index fed569d..0000000
--- a/lib/sha1_simd.cinc
+++ /dev/null
@@ -1,949 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-#include <string.h>
-#include <memory.h>
-
-#include "ubc_check.h"
-
-#define sha1_f1(b,c,d) SIMD_XOR_VV(d,SIMD_AND_VV(b,SIMD_XOR_VV(c,d)))
-#define sha1_f2(b,c,d) SIMD_XOR_VV(b,SIMD_XOR_VV(c,d))
-#define sha1_f3(b,c,d) SIMD_OR_VV(SIMD_AND_VV(b,SIMD_OR_VV(c,d)),SIMD_AND_VV(c,d))
-#define sha1_f4(b,c,d) SIMD_XOR_VV(b,SIMD_XOR_VV(c,d))
-#define HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, m, t) \
- { e = SIMD_ADD_VV(e,SIMD_ADD_VV(sha1_f1(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x5A827999)))); b = SIMD_ROL_V(b, 30); }
-#define HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, m, t) \
- { e = SIMD_ADD_VV(e,SIMD_ADD_VV(sha1_f2(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x6ED9EBA1)))); b = SIMD_ROL_V(b, 30); }
-#define HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, m, t) \
- { e = SIMD_ADD_VV(e,SIMD_ADD_VV(sha1_f3(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x8F1BBCDC)))); b = SIMD_ROL_V(b, 30); }
-#define HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, m, t) \
- { e = SIMD_ADD_VV(e,SIMD_ADD_VV(sha1_f4(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0xCA62C1D6)))); b = SIMD_ROL_V(b, 30); }
-
-#define HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, m, t) \
- { b = SIMD_ROR_V(b, 30); e = SIMD_SUB_VV(e,SIMD_ADD_VV(sha1_f1(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x5A827999)))); }
-#define HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, m, t) \
- { b = SIMD_ROR_V(b, 30); e = SIMD_SUB_VV(e,SIMD_ADD_VV(sha1_f2(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x6ED9EBA1)))); }
-#define HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, m, t) \
- { b = SIMD_ROR_V(b, 30); e = SIMD_SUB_VV(e,SIMD_ADD_VV(sha1_f3(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0x8F1BBCDC)))); }
-#define HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, m, t) \
- { b = SIMD_ROR_V(b, 30); e = SIMD_SUB_VV(e,SIMD_ADD_VV(sha1_f4(b,c,d),SIMD_ADD_VV(SIMD_ROL_V(a, 5),SIMD_ADD_VW(m[t],0xCA62C1D6)))); }
-
-void SHA1_MESSAGE_EXPANSION_SIMD(SIMD_WORD* W)
-{
- for (unsigned i = 16; i < 80; ++i)
- W[i] = SIMD_ROL_V(SIMD_XOR_VV( SIMD_XOR_VV(W[i - 3],W[i - 8]),SIMD_XOR_VV(W[i - 14],W[i - 16]) ), 1);
-}
-
-void SHA1_COMPRESSION_SIMD(SIMD_WORD* ihv, const SIMD_WORD* m)
-{
- SIMD_WORD W[80];
-
- memcpy(W, m, 16 * sizeof(SIMD_WORD));
- for (unsigned i = 16; i < 80; ++i)
- W[i] = SIMD_ROL_V(SIMD_XOR_VV( SIMD_XOR_VV(W[i - 3],W[i - 8]),SIMD_XOR_VV(W[i - 14],W[i - 16]) ), 1);
-
- SIMD_WORD a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
-
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 0);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 1);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 2);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 3);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 4);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 5);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 6);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 7);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 8);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 9);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 10);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 11);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 12);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 13);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 14);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 15);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 16);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 17);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 18);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 19);
-
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 20);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 21);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 22);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 23);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 24);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 25);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 26);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 27);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 28);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 29);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 30);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 31);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 32);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 33);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 34);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 35);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 36);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 37);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 38);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 39);
-
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 40);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 41);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 42);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 43);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 44);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 45);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 46);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 47);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 48);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 49);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 50);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 51);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 52);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 53);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 54);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 55);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 56);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 57);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 58);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 59);
-
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 60);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 61);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 62);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 63);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 64);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 65);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 66);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 67);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 68);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 69);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 70);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 71);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 72);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 73);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 74);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 75);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 76);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 77);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 78);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 79);
-
- ihv[0] = SIMD_ADD_VV(ihv[0],a);
- ihv[1] = SIMD_ADD_VV(ihv[1],b);
- ihv[2] = SIMD_ADD_VV(ihv[2],c);
- ihv[3] = SIMD_ADD_VV(ihv[3],d);
- ihv[4] = SIMD_ADD_VV(ihv[4],e);
-}
-
-
-
-void SHA1_COMPRESSION_W_SIMD(SIMD_WORD* ihv, const SIMD_WORD* W)
-{
- SIMD_WORD a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
-
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 0);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 1);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 2);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 3);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 4);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 5);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 6);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 7);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 8);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 9);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 10);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 11);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 12);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 13);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 14);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 15);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 16);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 17);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 18);
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 19);
-
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 20);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 21);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 22);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 23);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 24);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 25);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 26);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 27);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 28);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 29);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 30);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 31);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 32);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 33);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 34);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 35);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 36);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 37);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 38);
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 39);
-
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 40);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 41);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 42);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 43);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 44);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 45);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 46);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 47);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 48);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 49);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 50);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 51);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 52);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 53);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 54);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 55);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 56);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 57);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 58);
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 59);
-
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 60);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 61);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 62);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 63);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 64);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 65);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 66);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 67);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 68);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 69);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 70);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 71);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 72);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 73);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 74);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 75);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 76);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 77);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 78);
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 79);
-
- ihv[0] = SIMD_ADD_VV(ihv[0],a);
- ihv[1] = SIMD_ADD_VV(ihv[1],b);
- ihv[2] = SIMD_ADD_VV(ihv[2],c);
- ihv[3] = SIMD_ADD_VV(ihv[3],d);
- ihv[4] = SIMD_ADD_VV(ihv[4],e);
-}
-
-
-
-
-
-#define SHA1_STORE_STATE(i) states[(i*5)+0] = a; states[(i*5)+1] = b; states[(i*5)+2] = c; states[(i*5)+3] = d; states[(i*5)+4] = e;
-
-
-void SHA1_COMPRESSION_STATES_SIMD(SIMD_WORD* ihv, const SIMD_WORD* W, SIMD_WORD* states)
-{
- SIMD_WORD a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
-
-#ifdef DOSTORESTATE00
- SHA1_STORE_STATE(0)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 0);
-
-#ifdef DOSTORESTATE01
- SHA1_STORE_STATE(1)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 1);
-
-#ifdef DOSTORESTATE02
- SHA1_STORE_STATE(2)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 2);
-
-#ifdef DOSTORESTATE03
- SHA1_STORE_STATE(3)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 3);
-
-#ifdef DOSTORESTATE04
- SHA1_STORE_STATE(4)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 4);
-
-#ifdef DOSTORESTATE05
- SHA1_STORE_STATE(5)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 5);
-
-#ifdef DOSTORESTATE06
- SHA1_STORE_STATE(6)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 6);
-
-#ifdef DOSTORESTATE07
- SHA1_STORE_STATE(7)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 7);
-
-#ifdef DOSTORESTATE08
- SHA1_STORE_STATE(8)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 8);
-
-#ifdef DOSTORESTATE09
- SHA1_STORE_STATE(9)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 9);
-
-#ifdef DOSTORESTATE10
- SHA1_STORE_STATE(10)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 10);
-
-#ifdef DOSTORESTATE11
- SHA1_STORE_STATE(11)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 11);
-
-#ifdef DOSTORESTATE12
- SHA1_STORE_STATE(12)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 12);
-
-#ifdef DOSTORESTATE13
- SHA1_STORE_STATE(13)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 13);
-
-#ifdef DOSTORESTATE14
- SHA1_STORE_STATE(14)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 14);
-
-#ifdef DOSTORESTATE15
- SHA1_STORE_STATE(15)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 15);
-
-#ifdef DOSTORESTATE16
- SHA1_STORE_STATE(16)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 16);
-
-#ifdef DOSTORESTATE17
- SHA1_STORE_STATE(17)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 17);
-
-#ifdef DOSTORESTATE18
- SHA1_STORE_STATE(18)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 18);
-
-#ifdef DOSTORESTATE19
- SHA1_STORE_STATE(19)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 19);
-
-
-
-#ifdef DOSTORESTATE20
- SHA1_STORE_STATE(20)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 20);
-
-#ifdef DOSTORESTATE21
- SHA1_STORE_STATE(21)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 21);
-
-#ifdef DOSTORESTATE22
- SHA1_STORE_STATE(22)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 22);
-
-#ifdef DOSTORESTATE23
- SHA1_STORE_STATE(23)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 23);
-
-#ifdef DOSTORESTATE24
- SHA1_STORE_STATE(24)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 24);
-
-#ifdef DOSTORESTATE25
- SHA1_STORE_STATE(25)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 25);
-
-#ifdef DOSTORESTATE26
- SHA1_STORE_STATE(26)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 26);
-
-#ifdef DOSTORESTATE27
- SHA1_STORE_STATE(27)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 27);
-
-#ifdef DOSTORESTATE28
- SHA1_STORE_STATE(28)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 28);
-
-#ifdef DOSTORESTATE29
- SHA1_STORE_STATE(29)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 29);
-
-#ifdef DOSTORESTATE30
- SHA1_STORE_STATE(30)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 30);
-
-#ifdef DOSTORESTATE31
- SHA1_STORE_STATE(31)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 31);
-
-#ifdef DOSTORESTATE32
- SHA1_STORE_STATE(32)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 32);
-
-#ifdef DOSTORESTATE33
- SHA1_STORE_STATE(33)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 33);
-
-#ifdef DOSTORESTATE34
- SHA1_STORE_STATE(34)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 34);
-
-#ifdef DOSTORESTATE35
- SHA1_STORE_STATE(35)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 35);
-
-#ifdef DOSTORESTATE36
- SHA1_STORE_STATE(36)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 36);
-
-#ifdef DOSTORESTATE37
- SHA1_STORE_STATE(37)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 37);
-
-#ifdef DOSTORESTATE38
- SHA1_STORE_STATE(38)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 38);
-
-#ifdef DOSTORESTATE39
- SHA1_STORE_STATE(39)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 39);
-
-
-
-#ifdef DOSTORESTATE40
- SHA1_STORE_STATE(40)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 40);
-
-#ifdef DOSTORESTATE41
- SHA1_STORE_STATE(41)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 41);
-
-#ifdef DOSTORESTATE42
- SHA1_STORE_STATE(42)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 42);
-
-#ifdef DOSTORESTATE43
- SHA1_STORE_STATE(43)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 43);
-
-#ifdef DOSTORESTATE44
- SHA1_STORE_STATE(44)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 44);
-
-#ifdef DOSTORESTATE45
- SHA1_STORE_STATE(45)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 45);
-
-#ifdef DOSTORESTATE46
- SHA1_STORE_STATE(46)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 46);
-
-#ifdef DOSTORESTATE47
- SHA1_STORE_STATE(47)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 47);
-
-#ifdef DOSTORESTATE48
- SHA1_STORE_STATE(48)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 48);
-
-#ifdef DOSTORESTATE49
- SHA1_STORE_STATE(49)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 49);
-
-#ifdef DOSTORESTATE50
- SHA1_STORE_STATE(50)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 50);
-
-#ifdef DOSTORESTATE51
- SHA1_STORE_STATE(51)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 51);
-
-#ifdef DOSTORESTATE52
- SHA1_STORE_STATE(52)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 52);
-
-#ifdef DOSTORESTATE53
- SHA1_STORE_STATE(53)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 53);
-
-#ifdef DOSTORESTATE54
- SHA1_STORE_STATE(54)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 54);
-
-#ifdef DOSTORESTATE55
- SHA1_STORE_STATE(55)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 55);
-
-#ifdef DOSTORESTATE56
- SHA1_STORE_STATE(56)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 56);
-
-#ifdef DOSTORESTATE57
- SHA1_STORE_STATE(57)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 57);
-
-#ifdef DOSTORESTATE58
- SHA1_STORE_STATE(58)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 58);
-
-#ifdef DOSTORESTATE59
- SHA1_STORE_STATE(59)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 59);
-
-
-
-
-#ifdef DOSTORESTATE60
- SHA1_STORE_STATE(60)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 60);
-
-#ifdef DOSTORESTATE61
- SHA1_STORE_STATE(61)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 61);
-
-#ifdef DOSTORESTATE62
- SHA1_STORE_STATE(62)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 62);
-
-#ifdef DOSTORESTATE63
- SHA1_STORE_STATE(63)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 63);
-
-#ifdef DOSTORESTATE64
- SHA1_STORE_STATE(64)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 64);
-
-#ifdef DOSTORESTATE65
- SHA1_STORE_STATE(65)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 65);
-
-#ifdef DOSTORESTATE66
- SHA1_STORE_STATE(66)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 66);
-
-#ifdef DOSTORESTATE67
- SHA1_STORE_STATE(67)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 67);
-
-#ifdef DOSTORESTATE68
- SHA1_STORE_STATE(68)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 68);
-
-#ifdef DOSTORESTATE69
- SHA1_STORE_STATE(69)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 69);
-
-#ifdef DOSTORESTATE70
- SHA1_STORE_STATE(70)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 70);
-
-#ifdef DOSTORESTATE71
- SHA1_STORE_STATE(71)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 71);
-
-#ifdef DOSTORESTATE72
- SHA1_STORE_STATE(72)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 72);
-
-#ifdef DOSTORESTATE73
- SHA1_STORE_STATE(73)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 73);
-
-#ifdef DOSTORESTATE74
- SHA1_STORE_STATE(74)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 74);
-
-#ifdef DOSTORESTATE75
- SHA1_STORE_STATE(75)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 75);
-
-#ifdef DOSTORESTATE76
- SHA1_STORE_STATE(76)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 76);
-
-#ifdef DOSTORESTATE77
- SHA1_STORE_STATE(77)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 77);
-
-#ifdef DOSTORESTATE78
- SHA1_STORE_STATE(78)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 78);
-
-#ifdef DOSTORESTATE79
- SHA1_STORE_STATE(79)
-#endif
- HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 79);
-
-
-
- ihv[0] = SIMD_ADD_VV(ihv[0],a);
- ihv[1] = SIMD_ADD_VV(ihv[1],b);
- ihv[2] = SIMD_ADD_VV(ihv[2],c);
- ihv[3] = SIMD_ADD_VV(ihv[3],d);
- ihv[4] = SIMD_ADD_VV(ihv[4],e);
-}
-
-
-
-
-
-
-
-
-
-
-
-typedef void (*sha1_recompression_simd_type)(SIMD_WORD*, SIMD_WORD*, const SIMD_WORD*, const SIMD_WORD*);
-
-#define SHA1_RECOMPRESS(tttt) \
-void SHA1_RECOMPRESSION_SIMD(tttt) (SIMD_WORD* ihvin, SIMD_WORD* ihvout, const SIMD_WORD* me2, const SIMD_WORD* state) \
-{ \
- SIMD_WORD a = state[0], b = state[1], c = state[2], d = state[3], e = state[4]; \
- if (tttt > 79) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 79); \
- if (tttt > 78) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 78); \
- if (tttt > 77) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 77); \
- if (tttt > 76) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 76); \
- if (tttt > 75) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 75); \
- if (tttt > 74) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 74); \
- if (tttt > 73) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 73); \
- if (tttt > 72) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 72); \
- if (tttt > 71) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 71); \
- if (tttt > 70) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 70); \
- if (tttt > 69) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 69); \
- if (tttt > 68) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 68); \
- if (tttt > 67) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 67); \
- if (tttt > 66) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 66); \
- if (tttt > 65) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 65); \
- if (tttt > 64) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 64); \
- if (tttt > 63) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 63); \
- if (tttt > 62) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 62); \
- if (tttt > 61) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 61); \
- if (tttt > 60) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 60); \
- if (tttt > 59) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 59); \
- if (tttt > 58) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 58); \
- if (tttt > 57) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 57); \
- if (tttt > 56) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 56); \
- if (tttt > 55) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 55); \
- if (tttt > 54) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 54); \
- if (tttt > 53) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 53); \
- if (tttt > 52) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 52); \
- if (tttt > 51) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 51); \
- if (tttt > 50) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 50); \
- if (tttt > 49) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 49); \
- if (tttt > 48) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 48); \
- if (tttt > 47) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 47); \
- if (tttt > 46) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 46); \
- if (tttt > 45) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 45); \
- if (tttt > 44) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 44); \
- if (tttt > 43) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 43); \
- if (tttt > 42) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 42); \
- if (tttt > 41) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 41); \
- if (tttt > 40) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 40); \
- if (tttt > 39) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 39); \
- if (tttt > 38) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 38); \
- if (tttt > 37) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 37); \
- if (tttt > 36) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 36); \
- if (tttt > 35) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 35); \
- if (tttt > 34) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 34); \
- if (tttt > 33) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 33); \
- if (tttt > 32) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 32); \
- if (tttt > 31) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 31); \
- if (tttt > 30) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 30); \
- if (tttt > 29) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 29); \
- if (tttt > 28) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 28); \
- if (tttt > 27) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 27); \
- if (tttt > 26) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 26); \
- if (tttt > 25) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 25); \
- if (tttt > 24) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 24); \
- if (tttt > 23) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 23); \
- if (tttt > 22) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 22); \
- if (tttt > 21) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 21); \
- if (tttt > 20) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 20); \
- if (tttt > 19) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 19); \
- if (tttt > 18) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 18); \
- if (tttt > 17) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 17); \
- if (tttt > 16) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 16); \
- if (tttt > 15) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 15); \
- if (tttt > 14) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 14); \
- if (tttt > 13) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 13); \
- if (tttt > 12) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 12); \
- if (tttt > 11) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 11); \
- if (tttt > 10) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 10); \
- if (tttt > 9) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 9); \
- if (tttt > 8) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 8); \
- if (tttt > 7) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 7); \
- if (tttt > 6) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 6); \
- if (tttt > 5) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 5); \
- if (tttt > 4) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 4); \
- if (tttt > 3) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 3); \
- if (tttt > 2) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 2); \
- if (tttt > 1) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 1); \
- if (tttt > 0) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 0); \
- ihvin[0] = a; ihvin[1] = b; ihvin[2] = c; ihvin[3] = d; ihvin[4] = e; \
- a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; \
- if (tttt <= 0) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 0); \
- if (tttt <= 1) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 1); \
- if (tttt <= 2) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 2); \
- if (tttt <= 3) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 3); \
- if (tttt <= 4) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 4); \
- if (tttt <= 5) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 5); \
- if (tttt <= 6) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 6); \
- if (tttt <= 7) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 7); \
- if (tttt <= 8) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 8); \
- if (tttt <= 9) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 9); \
- if (tttt <= 10) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 10); \
- if (tttt <= 11) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 11); \
- if (tttt <= 12) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 12); \
- if (tttt <= 13) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 13); \
- if (tttt <= 14) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 14); \
- if (tttt <= 15) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 15); \
- if (tttt <= 16) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 16); \
- if (tttt <= 17) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 17); \
- if (tttt <= 18) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 18); \
- if (tttt <= 19) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 19); \
- if (tttt <= 20) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 20); \
- if (tttt <= 21) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 21); \
- if (tttt <= 22) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 22); \
- if (tttt <= 23) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 23); \
- if (tttt <= 24) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 24); \
- if (tttt <= 25) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 25); \
- if (tttt <= 26) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 26); \
- if (tttt <= 27) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 27); \
- if (tttt <= 28) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 28); \
- if (tttt <= 29) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 29); \
- if (tttt <= 30) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 30); \
- if (tttt <= 31) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 31); \
- if (tttt <= 32) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 32); \
- if (tttt <= 33) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 33); \
- if (tttt <= 34) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 34); \
- if (tttt <= 35) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 35); \
- if (tttt <= 36) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 36); \
- if (tttt <= 37) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 37); \
- if (tttt <= 38) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 38); \
- if (tttt <= 39) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 39); \
- if (tttt <= 40) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 40); \
- if (tttt <= 41) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 41); \
- if (tttt <= 42) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 42); \
- if (tttt <= 43) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 43); \
- if (tttt <= 44) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 44); \
- if (tttt <= 45) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 45); \
- if (tttt <= 46) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 46); \
- if (tttt <= 47) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 47); \
- if (tttt <= 48) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 48); \
- if (tttt <= 49) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 49); \
- if (tttt <= 50) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 50); \
- if (tttt <= 51) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 51); \
- if (tttt <= 52) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 52); \
- if (tttt <= 53) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 53); \
- if (tttt <= 54) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 54); \
- if (tttt <= 55) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 55); \
- if (tttt <= 56) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 56); \
- if (tttt <= 57) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 57); \
- if (tttt <= 58) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 58); \
- if (tttt <= 59) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 59); \
- if (tttt <= 60) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 60); \
- if (tttt <= 61) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 61); \
- if (tttt <= 62) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 62); \
- if (tttt <= 63) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 63); \
- if (tttt <= 64) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 64); \
- if (tttt <= 65) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 65); \
- if (tttt <= 66) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 66); \
- if (tttt <= 67) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 67); \
- if (tttt <= 68) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 68); \
- if (tttt <= 69) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 69); \
- if (tttt <= 70) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 70); \
- if (tttt <= 71) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 71); \
- if (tttt <= 72) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 72); \
- if (tttt <= 73) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 73); \
- if (tttt <= 74) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 74); \
- if (tttt <= 75) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 75); \
- if (tttt <= 76) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 76); \
- if (tttt <= 77) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 77); \
- if (tttt <= 78) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 78); \
- if (tttt <= 79) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 79); \
- ihvout[0] = SIMD_ADD_VV(ihvin[0],a); \
- ihvout[1] = SIMD_ADD_VV(ihvin[1],b); \
- ihvout[2] = SIMD_ADD_VV(ihvin[2],c); \
- ihvout[3] = SIMD_ADD_VV(ihvin[3],d); \
- ihvout[4] = SIMD_ADD_VV(ihvin[4],e); \
-}
-
-SHA1_RECOMPRESS(0)
-SHA1_RECOMPRESS(1)
-SHA1_RECOMPRESS(2)
-SHA1_RECOMPRESS(3)
-SHA1_RECOMPRESS(4)
-SHA1_RECOMPRESS(5)
-SHA1_RECOMPRESS(6)
-SHA1_RECOMPRESS(7)
-SHA1_RECOMPRESS(8)
-SHA1_RECOMPRESS(9)
-
-SHA1_RECOMPRESS(10)
-SHA1_RECOMPRESS(11)
-SHA1_RECOMPRESS(12)
-SHA1_RECOMPRESS(13)
-SHA1_RECOMPRESS(14)
-SHA1_RECOMPRESS(15)
-SHA1_RECOMPRESS(16)
-SHA1_RECOMPRESS(17)
-SHA1_RECOMPRESS(18)
-SHA1_RECOMPRESS(19)
-
-SHA1_RECOMPRESS(20)
-SHA1_RECOMPRESS(21)
-SHA1_RECOMPRESS(22)
-SHA1_RECOMPRESS(23)
-SHA1_RECOMPRESS(24)
-SHA1_RECOMPRESS(25)
-SHA1_RECOMPRESS(26)
-SHA1_RECOMPRESS(27)
-SHA1_RECOMPRESS(28)
-SHA1_RECOMPRESS(29)
-
-SHA1_RECOMPRESS(30)
-SHA1_RECOMPRESS(31)
-SHA1_RECOMPRESS(32)
-SHA1_RECOMPRESS(33)
-SHA1_RECOMPRESS(34)
-SHA1_RECOMPRESS(35)
-SHA1_RECOMPRESS(36)
-SHA1_RECOMPRESS(37)
-SHA1_RECOMPRESS(38)
-SHA1_RECOMPRESS(39)
-
-SHA1_RECOMPRESS(40)
-SHA1_RECOMPRESS(41)
-SHA1_RECOMPRESS(42)
-SHA1_RECOMPRESS(43)
-SHA1_RECOMPRESS(44)
-SHA1_RECOMPRESS(45)
-SHA1_RECOMPRESS(46)
-SHA1_RECOMPRESS(47)
-SHA1_RECOMPRESS(48)
-SHA1_RECOMPRESS(49)
-
-SHA1_RECOMPRESS(50)
-SHA1_RECOMPRESS(51)
-SHA1_RECOMPRESS(52)
-SHA1_RECOMPRESS(53)
-SHA1_RECOMPRESS(54)
-SHA1_RECOMPRESS(55)
-SHA1_RECOMPRESS(56)
-SHA1_RECOMPRESS(57)
-SHA1_RECOMPRESS(58)
-SHA1_RECOMPRESS(59)
-
-SHA1_RECOMPRESS(60)
-SHA1_RECOMPRESS(61)
-SHA1_RECOMPRESS(62)
-SHA1_RECOMPRESS(63)
-SHA1_RECOMPRESS(64)
-SHA1_RECOMPRESS(65)
-SHA1_RECOMPRESS(66)
-SHA1_RECOMPRESS(67)
-SHA1_RECOMPRESS(68)
-SHA1_RECOMPRESS(69)
-
-SHA1_RECOMPRESS(70)
-SHA1_RECOMPRESS(71)
-SHA1_RECOMPRESS(72)
-SHA1_RECOMPRESS(73)
-SHA1_RECOMPRESS(74)
-SHA1_RECOMPRESS(75)
-SHA1_RECOMPRESS(76)
-SHA1_RECOMPRESS(77)
-SHA1_RECOMPRESS(78)
-SHA1_RECOMPRESS(79)
-
-sha1_recompression_simd_type SHA1_RECOMPRESSION_TABLE_SIMD [80] =
-{
- SHA1_RECOMPRESSION_SIMD(0), SHA1_RECOMPRESSION_SIMD(1), SHA1_RECOMPRESSION_SIMD(2), SHA1_RECOMPRESSION_SIMD(3), SHA1_RECOMPRESSION_SIMD(4), SHA1_RECOMPRESSION_SIMD(5), SHA1_RECOMPRESSION_SIMD(6), SHA1_RECOMPRESSION_SIMD(7), SHA1_RECOMPRESSION_SIMD(8), SHA1_RECOMPRESSION_SIMD(9),
- SHA1_RECOMPRESSION_SIMD(10), SHA1_RECOMPRESSION_SIMD(11), SHA1_RECOMPRESSION_SIMD(12), SHA1_RECOMPRESSION_SIMD(13), SHA1_RECOMPRESSION_SIMD(14), SHA1_RECOMPRESSION_SIMD(15), SHA1_RECOMPRESSION_SIMD(16), SHA1_RECOMPRESSION_SIMD(17), SHA1_RECOMPRESSION_SIMD(18), SHA1_RECOMPRESSION_SIMD(19),
- SHA1_RECOMPRESSION_SIMD(20), SHA1_RECOMPRESSION_SIMD(21), SHA1_RECOMPRESSION_SIMD(22), SHA1_RECOMPRESSION_SIMD(23), SHA1_RECOMPRESSION_SIMD(24), SHA1_RECOMPRESSION_SIMD(25), SHA1_RECOMPRESSION_SIMD(26), SHA1_RECOMPRESSION_SIMD(27), SHA1_RECOMPRESSION_SIMD(28), SHA1_RECOMPRESSION_SIMD(29),
- SHA1_RECOMPRESSION_SIMD(30), SHA1_RECOMPRESSION_SIMD(31), SHA1_RECOMPRESSION_SIMD(32), SHA1_RECOMPRESSION_SIMD(33), SHA1_RECOMPRESSION_SIMD(34), SHA1_RECOMPRESSION_SIMD(35), SHA1_RECOMPRESSION_SIMD(36), SHA1_RECOMPRESSION_SIMD(37), SHA1_RECOMPRESSION_SIMD(38), SHA1_RECOMPRESSION_SIMD(39),
- SHA1_RECOMPRESSION_SIMD(40), SHA1_RECOMPRESSION_SIMD(41), SHA1_RECOMPRESSION_SIMD(42), SHA1_RECOMPRESSION_SIMD(43), SHA1_RECOMPRESSION_SIMD(44), SHA1_RECOMPRESSION_SIMD(45), SHA1_RECOMPRESSION_SIMD(46), SHA1_RECOMPRESSION_SIMD(47), SHA1_RECOMPRESSION_SIMD(48), SHA1_RECOMPRESSION_SIMD(49),
- SHA1_RECOMPRESSION_SIMD(50), SHA1_RECOMPRESSION_SIMD(51), SHA1_RECOMPRESSION_SIMD(52), SHA1_RECOMPRESSION_SIMD(53), SHA1_RECOMPRESSION_SIMD(54), SHA1_RECOMPRESSION_SIMD(55), SHA1_RECOMPRESSION_SIMD(56), SHA1_RECOMPRESSION_SIMD(57), SHA1_RECOMPRESSION_SIMD(58), SHA1_RECOMPRESSION_SIMD(59),
- SHA1_RECOMPRESSION_SIMD(60), SHA1_RECOMPRESSION_SIMD(61), SHA1_RECOMPRESSION_SIMD(62), SHA1_RECOMPRESSION_SIMD(63), SHA1_RECOMPRESSION_SIMD(64), SHA1_RECOMPRESSION_SIMD(65), SHA1_RECOMPRESSION_SIMD(66), SHA1_RECOMPRESSION_SIMD(67), SHA1_RECOMPRESSION_SIMD(68), SHA1_RECOMPRESSION_SIMD(69),
- SHA1_RECOMPRESSION_SIMD(70), SHA1_RECOMPRESSION_SIMD(71), SHA1_RECOMPRESSION_SIMD(72), SHA1_RECOMPRESSION_SIMD(73), SHA1_RECOMPRESSION_SIMD(74), SHA1_RECOMPRESSION_SIMD(75), SHA1_RECOMPRESSION_SIMD(76), SHA1_RECOMPRESSION_SIMD(77), SHA1_RECOMPRESSION_SIMD(78), SHA1_RECOMPRESSION_SIMD(79),
-};
diff --git a/lib/sha1_simd_avx256.c b/lib/sha1_simd_avx256.c
deleted file mode 100644
index 45bc502..0000000
--- a/lib/sha1_simd_avx256.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates avx256 code using avx256 MACROS (simd_avx256.h) and generic SIMD code (sha1_simd.cinc)
-
-#ifdef HAVE_AVX
-#include "sha1.h"
-
-#include "simd_avx256.h"
-
-#define SHA1_MESSAGE_EXPANSION_SIMD sha1_message_expansion_avx256
-#define SHA1_COMPRESSION_SIMD sha1_avx256
-#define SHA1_COMPRESSION_W_SIMD sha1_W_avx256
-#define SHA1_COMPRESSION_STATES_SIMD sha1_states_avx256
-#define SHA1_RECOMPRESSION_SIMD(t) sha1recompress_fast_ ## t ## _avx256
-#define SHA1_RECOMPRESSION_TABLE_SIMD sha1_recompression_step_avx256
-
-#include "sha1_simd.cinc"
-
-#else
-
-#pragma message "The file: sha1_simd_avx256.c is not compiled for this architecture."
-
-#endif //HAVE_AVX
diff --git a/lib/sha1_simd_mmx64.c b/lib/sha1_simd_mmx64.c
deleted file mode 100644
index 6cc860f..0000000
--- a/lib/sha1_simd_mmx64.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates sse128 code using sse128 MACROS (simd_sse128.h) and generic SIMD code (sha1_simd.cinc)
-#ifdef HAVE_MMX
-
-#include "sha1.h"
-#include "simd_mmx64.h"
-
-#define SHA1_MESSAGE_EXPANSION_SIMD sha1_message_expansion_mmx64
-#define SHA1_COMPRESSION_SIMD sha1_mmx64
-#define SHA1_COMPRESSION_W_SIMD sha1_W_mmx64
-#define SHA1_COMPRESSION_STATES_SIMD sha1_states_mmx64
-#define SHA1_RECOMPRESSION_SIMD(t) sha1recompress_fast_ ## t ## _mmx64
-#define SHA1_RECOMPRESSION_TABLE_SIMD sha1_recompression_step_mmx64
-
-#include "sha1_simd.cinc"
-
-#else
-
-#pragma message "The file: sha1_simd_mmx64.c is not compiled for this architecture."
-
-#endif //HAVE_MMX
diff --git a/lib/sha1_simd_neon128.c b/lib/sha1_simd_neon128.c
deleted file mode 100644
index 4ab25d2..0000000
--- a/lib/sha1_simd_neon128.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates neon 32x4 code using neon MACROS (arm_neon.h) and generic SIMD code (sha1_simd.cinc)
-#ifdef HAVE_NEON
-#include "sha1.h"
-
-#include "simd_neon128.h"
-
-#define SHA1_MESSAGE_EXPANSION_SIMD sha1_message_expansion_neon128
-#define SHA1_COMPRESSION_SIMD sha1_neon128
-#define SHA1_COMPRESSION_W_SIMD sha1_W_neon128
-#define SHA1_COMPRESSION_STATES_SIMD sha1_states_neon128
-#define SHA1_RECOMPRESSION_SIMD(t) sha1recompress_fast_ ## t ## _neon128
-#define SHA1_RECOMPRESSION_TABLE_SIMD sha1_recompression_step_neon128
-
-#include "sha1_simd.cinc"
-
-#else
-
-#pragma message "The file: sha1_simd_neon128.c is not compiled for this architecture."
-
-#endif //HAVE_NEON
diff --git a/lib/sha1_simd_sse128.c b/lib/sha1_simd_sse128.c
deleted file mode 100644
index cda8d17..0000000
--- a/lib/sha1_simd_sse128.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates sse128 code using sse128 MACROS (simd_sse128.h) and generic SIMD code (sha1_simd.cinc)
-#ifdef HAVE_SSE
-#include "sha1.h"
-
-#include "simd_sse128.h"
-
-#define SHA1_MESSAGE_EXPANSION_SIMD sha1_message_expansion_sse128
-#define SHA1_COMPRESSION_SIMD sha1_sse128
-#define SHA1_COMPRESSION_W_SIMD sha1_W_sse128
-#define SHA1_COMPRESSION_STATES_SIMD sha1_states_sse128
-#define SHA1_RECOMPRESSION_SIMD(t) sha1recompress_fast_ ## t ## _sse128
-#define SHA1_RECOMPRESSION_TABLE_SIMD sha1_recompression_step_sse128
-
-#include "sha1_simd.cinc"
-
-#else
-
-#pragma message "The file: sha1_simd_sse128.c is not compiled for this architecture."
-
-#endif //HAVE_SSE
diff --git a/lib/simd_avx256.h b/lib/simd_avx256.h
deleted file mode 100644
index 8f3fee7..0000000
--- a/lib/simd_avx256.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this header defines SIMD MACROS for avx256 intrinsics
-// used to generate avx256 code from generic SIMD code (sha1_simd.cinc, ubc_check_simd.cinc)
-
-#ifndef SIMD_AVX256_HEADER
-#define SIMD_AVX256_HEADER
-#ifdef HAVE_AVX
-// requires AVX2 not just AVX
-#define SIMD_VERSION avx256
-#define SIMD_VECSIZE 8
-
-#ifdef __GNUC__
-
-// TODO
-#include <immintrin.h>
-
-#define SIMD_WORD __m256i
-
-#define SIMD_ZERO _mm256_setzero_si256()
-#define SIMD_WTOV(l) _mm256_set1_epi32(l)
-#define SIMD_ADD_VV(l,r) _mm256_add_epi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm256_add_epi32(l, _mm256_set1_epi32(r))
-#define SIMD_SUB_VV(l,r) _mm256_sub_epi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm256_sub_epi32(l, _mm256_set1_epi32(r))
-#define SIMD_AND_VV(l,r) _mm256_and_si256(l,r)
-#define SIMD_AND_VW(l,r) _mm256_and_si256(l, _mm256_set1_epi32(r))
-#define SIMD_OR_VV(l,r) _mm256_or_si256(l,r)
-#define SIMD_OR_VW(l,r) _mm256_or_si256(l, _mm256_set1_epi32(r))
-#define SIMD_XOR_VV(l,r) _mm256_xor_si256(l,r)
-#define SIMD_XOR_VW(l,r) _mm256_xor_si256(l, _mm256_set1_epi32(r))
-//#define SIMD_NOT_V(l) _mm256_andnot_si256(l,l)
-#define SIMD_SHL_V(l,i) _mm256_slli_epi32(l,i)
-#define SIMD_SHR_V(l,i) _mm256_srli_epi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm256_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm256_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#else // __GNUC__
-
-// VISUAL STUDIO
-
-#include <immintrin.h>
-
-#define SIMD_WORD __m256i
-
-#define SIMD_ZERO _mm256_setzero_si256()
-#define SIMD_WTOV(l) _mm256_set1_epi32(l)
-#define SIMD_ADD_VV(l,r) _mm256_add_epi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm256_add_epi32(l, _mm256_set1_epi32(r))
-#define SIMD_SUB_VV(l,r) _mm256_sub_epi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm256_sub_epi32(l, _mm256_set1_epi32(r))
-#define SIMD_AND_VV(l,r) _mm256_and_si256(l,r)
-#define SIMD_AND_VW(l,r) _mm256_and_si256(l, _mm256_set1_epi32(r))
-#define SIMD_OR_VV(l,r) _mm256_or_si256(l,r)
-#define SIMD_OR_VW(l,r) _mm256_or_si256(l, _mm256_set1_epi32(r))
-#define SIMD_XOR_VV(l,r) _mm256_xor_si256(l,r)
-#define SIMD_XOR_VW(l,r) _mm256_xor_si256(l, _mm256_set1_epi32(r))
-//#define SIMD_NOT_V(l) _mm256_andnot_si256(l,l)
-#define SIMD_SHL_V(l,i) _mm256_slli_epi32(l,i)
-#define SIMD_SHR_V(l,i) _mm256_srli_epi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm256_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm256_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#endif // __GNUC__
-
-
-// these are general definitions for lacking SIMD operations
-
-#ifndef SIMD_NOT_V
-#define SIMD_NOT_V(l) SIMD_XOR_VW(l,0xFFFFFFFF)
-#endif
-
-#ifndef SIMD_NEG_V
-#define SIMD_NEG_V(l) SIMD_SUB_VV(SIMD_ZERO,l)
-#endif
-
-#ifndef SIMD_ROL_V
-#define SIMD_ROL_V(l,i) SIMD_OR_VV(SIMD_SHL_V(l,i),SIMD_SHR_V(l,32-i))
-#define SIMD_ROR_V(l,i) SIMD_OR_VV(SIMD_SHR_V(l,i),SIMD_SHL_V(l,32-i))
-#endif
-#endif // HAVE_AVX
-#endif // SIMD_AVX256_HEADER
diff --git a/lib/simd_mmx64.h b/lib/simd_mmx64.h
deleted file mode 100644
index 254d132..0000000
--- a/lib/simd_mmx64.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this header defines SIMD MACROS for sse128 intrinsics
-// used to generate sse128 code from generic SIMD code (sha1_simd.cinc, ubc_check_simd.cinc)
-
-#ifndef SIMD_MMX64_HEADER
-#define SIMD_MMX64_HEADER
-#ifdef HAVE_MMX
-#define SIMD_VERSION mmx64
-#define SIMD_VECSIZE 2
-
-#ifdef __GNUC__
-#include <mmintrin.h>
-
-#define SIMD_WORD __m64
-
-#define SIMD_ZERO _mm_setzero_si64()
-#define SIMD_WTOV(l) _mm_set1_pi32(l)
-#define SIMD_ADD_VV(l,r) _mm_add_pi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm_add_pi32(l, _mm_set1_pi32(r))
-#define SIMD_SUB_VV(l,r) _mm_sub_pi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm_sub_pi32(l, _mm_set1_pi32(r))
-#define SIMD_AND_VV(l,r) _mm_and_si64(l,r)
-#define SIMD_AND_VW(l,r) _mm_and_si64(l, _mm_set1_pi32(r))
-#define SIMD_OR_VV(l,r) _mm_or_si64(l,r)
-#define SIMD_OR_VW(l,r) _mm_or_si64(l, _mm_set1_pi32(r))
-#define SIMD_XOR_VV(l,r) _mm_xor_si64(l,r)
-#define SIMD_XOR_VW(l,r) _mm_xor_si64(l, _mm_set1_pi32(r))
-//#define SIMD_NOT_V(l) _mm_andnot_si64(l,l)
-#define SIMD_SHL_V(l,i) _mm_slli_pi32(l,i)
-#define SIMD_SHR_V(l,i) _mm_srli_pi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm_rol_pi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm_ror_epi32(l,i)
-#define SIMD_CLEANUP _mm_empty()
-
-
-#else // __GNUC__
-
-// VISUAL STUDIO
-#include <mmintrin.h>
-
-#define SIMD_WORD __m64
-
-#define SIMD_ZERO _mm_setzero_si64()
-#define SIMD_WTOV(l) _mm_set1_pi32(l)
-#define SIMD_ADD_VV(l,r) _mm_add_pi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm_add_pi32(l, _mm_set1_pi32(r))
-#define SIMD_SUB_VV(l,r) _mm_sub_pi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm_sub_pi32(l, _mm_set1_pi32(r))
-#define SIMD_AND_VV(l,r) _mm_and_si64(l,r)
-#define SIMD_AND_VW(l,r) _mm_and_si64(l, _mm_set1_pi32(r))
-#define SIMD_OR_VV(l,r) _mm_or_si64(l,r)
-#define SIMD_OR_VW(l,r) _mm_or_si64(l, _mm_set1_pi32(r))
-#define SIMD_XOR_VV(l,r) _mm_xor_si64(l,r)
-#define SIMD_XOR_VW(l,r) _mm_xor_si64(l, _mm_set1_pi32(r))
-//#define SIMD_NOT_V(l) _mm_andnot_si64(l,l)
-#define SIMD_SHL_V(l,i) _mm_slli_pi32(l,i)
-#define SIMD_SHR_V(l,i) _mm_srli_pi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm_rol_pi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm_ror_epi32(l,i)
-#define SIMD_CLEANUP _mm_empty()
-
-#endif // __GNUC__
-
-
-// these are general definitions for lacking SIMD operations
-
-#ifndef SIMD_NOT_V
-#define SIMD_NOT_V(l) SIMD_XOR_VW(l,0xFFFFFFFF)
-#endif
-
-#ifndef SIMD_NEG_V
-#define SIMD_NEG_V(l) SIMD_SUB_VV(SIMD_ZERO,l)
-#endif
-
-#ifndef SIMD_ROL_V
-#define SIMD_ROL_V(l,i) SIMD_OR_VV(SIMD_SHL_V(l,i),SIMD_SHR_V(l,32-i))
-#define SIMD_ROR_V(l,i) SIMD_OR_VV(SIMD_SHR_V(l,i),SIMD_SHL_V(l,32-i))
-#endif
-#endif // HAVE_MMX
-#endif // SIMD_SSE128_HEADER
diff --git a/lib/simd_neon128.h b/lib/simd_neon128.h
deleted file mode 100644
index 9b1e314..0000000
--- a/lib/simd_neon128.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this header defines SIMD MACROS for sse128 intrinsics
-// used to generate sse128 code from generic SIMD code (sha1_simd.cinc, ubc_check_simd.cinc)
-
-#ifndef SIMD_NEON128_HEADER
-#define SIMD_NEON128_HEADER
-#ifdef HAVE_NEON
-
-#include <arm_neon.h>
-
-#define SIMD_VERSION neon128
-#define SIMD_VECSIZE 4
-
-#ifdef __GNUC__
-
-#define SIMD_WORD int32x4_t
-
-#define SIMD_ZERO vmovq_n_s32(0)
-#define SIMD_WTOV(l) vmovq_n_s32(l)
-#define SIMD_ADD_VV(l,r) vaddq_s32(l,r)
-#define SIMD_ADD_VW(l,r) vaddq_s32(l, vmovq_n_s32(r))
-#define SIMD_SUB_VV(l,r) vsubq_s32(l,r)
-#define SIMD_SUB_VW(l,r) vsubq_s32(l, vmovq_n_s32(r))
-#define SIMD_AND_VV(l,r) vandq_s32(l,r)
-#define SIMD_AND_VW(l,r) vandq_s32(l, vmovq_n_s32(r))
-#define SIMD_OR_VV(l,r) vorrq_s32(l,r)
-#define SIMD_OR_VW(l,r) vorrq_s32(l, vmovq_n_s32(r))
-#define SIMD_XOR_VV(l,r) veorq_s32(l,r)
-#define SIMD_XOR_VW(l,r) veorq_s32(l, vmovq_n_s32(r))
-#define SIMD_NOT_V(l) vmvnq_s32(l)
-#define SIMD_SHL_V(l,i) vshlq_n_s32(l,i) // note that this requires that i be a constant
-#define SIMD_SHR_V(l,i) vshrq_n_s32(l,i) // note that this requires that i be a constant
-//#define SIMD_ROL_V(l,i) _mm128_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm128_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#else // __GNUC__
-
-// VISUAL STUDIO
-
-#include <intrin.h>
-
-#define SIMD_WORD __m128i
-
-#define SIMD_ZERO _mm_setzero_si128()
-#define SIMD_WTOV(l) _mm_set1_epi32(l)
-#define SIMD_ADD_VV(l,r) _mm_add_epi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm_add_epi32(l, _mm_set1_epi32(r))
-#define SIMD_SUB_VV(l,r) _mm_sub_epi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm_sub_epi32(l, _mm_set1_epi32(r))
-#define SIMD_AND_VV(l,r) _mm_and_si128(l,r)
-#define SIMD_AND_VW(l,r) _mm_and_si128(l, _mm_set1_epi32(r))
-#define SIMD_OR_VV(l,r) _mm_or_si128(l,r)
-#define SIMD_OR_VW(l,r) _mm_or_si128(l, _mm_set1_epi32(r))
-#define SIMD_XOR_VV(l,r) _mm_xor_si128(l,r)
-#define SIMD_XOR_VW(l,r) _mm_xor_si128(l, _mm_set1_epi32(r))
-#define SIMD_NOT_V(l) _mm_andnot_si128(l,l)
-#define SIMD_SHL_V(l,i) _mm_slli_epi32(l,i)
-#define SIMD_SHR_V(l,i) _mm_srli_epi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm128_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm128_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#endif // __GNUC__
-
-
-// these are general definitions for lacking SIMD operations
-
-#ifndef SIMD_NOT_V
-#define SIMD_NOT_V(l) SIMD_XOR_VW(l,0xFFFFFFFF)
-#endif
-
-#ifndef SIMD_NEG_V
-#define SIMD_NEG_V(l) SIMD_SUB_VV(SIMD_ZERO,l)
-#endif
-
-#ifndef SIMD_ROL_V
-#define SIMD_ROL_V(l,i) SIMD_OR_VV(SIMD_SHL_V(l,i),SIMD_SHR_V(l,32-i))
-#define SIMD_ROR_V(l,i) SIMD_OR_VV(SIMD_SHR_V(l,i),SIMD_SHL_V(l,32-i))
-#endif
-
-#endif // HAVE_NEON
-#endif // SIMD_NEON128_HEADER
diff --git a/lib/simd_sse128.h b/lib/simd_sse128.h
deleted file mode 100644
index 927417a..0000000
--- a/lib/simd_sse128.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this header defines SIMD MACROS for sse128 intrinsics
-// used to generate sse128 code from generic SIMD code (sha1_simd.cinc, ubc_check_simd.cinc)
-
-#ifndef SIMD_SSE128_HEADER
-#define SIMD_SSE128_HEADER
-#ifdef HAVE_SSE
-
-#define SIMD_VERSION sse128
-#define SIMD_VECSIZE 4
-
-#ifdef __GNUC__
-
-#include <emmintrin.h>
-
-#define SIMD_WORD __m128i
-
-#define SIMD_ZERO _mm_setzero_si128()
-#define SIMD_WTOV(l) _mm_set1_epi32(l)
-#define SIMD_ADD_VV(l,r) _mm_add_epi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm_add_epi32(l, _mm_set1_epi32(r))
-#define SIMD_SUB_VV(l,r) _mm_sub_epi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm_sub_epi32(l, _mm_set1_epi32(r))
-#define SIMD_AND_VV(l,r) _mm_and_si128(l,r)
-#define SIMD_AND_VW(l,r) _mm_and_si128(l, _mm_set1_epi32(r))
-#define SIMD_OR_VV(l,r) _mm_or_si128(l,r)
-#define SIMD_OR_VW(l,r) _mm_or_si128(l, _mm_set1_epi32(r))
-#define SIMD_XOR_VV(l,r) _mm_xor_si128(l,r)
-#define SIMD_XOR_VW(l,r) _mm_xor_si128(l, _mm_set1_epi32(r))
-//#define SIMD_NOT_V(l) _mm_andnot_si128(l,l)
-#define SIMD_SHL_V(l,i) _mm_slli_epi32(l,i)
-#define SIMD_SHR_V(l,i) _mm_srli_epi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm128_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm128_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#else // __GNUC__
-
-// VISUAL STUDIO
-
-#include <intrin.h>
-
-#define SIMD_WORD __m128i
-
-#define SIMD_ZERO _mm_setzero_si128()
-#define SIMD_WTOV(l) _mm_set1_epi32(l)
-#define SIMD_ADD_VV(l,r) _mm_add_epi32(l,r)
-#define SIMD_ADD_VW(l,r) _mm_add_epi32(l, _mm_set1_epi32(r))
-#define SIMD_SUB_VV(l,r) _mm_sub_epi32(l,r)
-#define SIMD_SUB_VW(l,r) _mm_sub_epi32(l, _mm_set1_epi32(r))
-#define SIMD_AND_VV(l,r) _mm_and_si128(l,r)
-#define SIMD_AND_VW(l,r) _mm_and_si128(l, _mm_set1_epi32(r))
-#define SIMD_OR_VV(l,r) _mm_or_si128(l,r)
-#define SIMD_OR_VW(l,r) _mm_or_si128(l, _mm_set1_epi32(r))
-#define SIMD_XOR_VV(l,r) _mm_xor_si128(l,r)
-#define SIMD_XOR_VW(l,r) _mm_xor_si128(l, _mm_set1_epi32(r))
-//#define SIMD_NOT_V(l) _mm_andnot_si128(l,l)
-#define SIMD_SHL_V(l,i) _mm_slli_epi32(l,i)
-#define SIMD_SHR_V(l,i) _mm_srli_epi32(l,i)
-//#define SIMD_ROL_V(l,i) _mm128_rol_epi32(l,i)
-//#define SIMD_ROR_V(l,i) _mm128_ror_epi32(l,i)
-#define SIMD_CLEANUP
-
-#endif // __GNUC__
-
-
-// these are general definitions for lacking SIMD operations
-
-#ifndef SIMD_NOT_V
-#define SIMD_NOT_V(l) SIMD_XOR_VW(l,0xFFFFFFFF)
-#endif
-
-#ifndef SIMD_NEG_V
-#define SIMD_NEG_V(l) SIMD_SUB_VV(SIMD_ZERO,l)
-#endif
-
-#ifndef SIMD_ROL_V
-#define SIMD_ROL_V(l,i) SIMD_OR_VV(SIMD_SHL_V(l,i),SIMD_SHR_V(l,32-i))
-#define SIMD_ROR_V(l,i) SIMD_OR_VV(SIMD_SHR_V(l,i),SIMD_SHL_V(l,32-i))
-#endif
-
-#endif // HAVE_SSE
-#endif // SIMD_SSE128_HEADER
diff --git a/lib/ubc_check.c b/lib/ubc_check.c
index 6e0ffca..27d0976 100644
--- a/lib/ubc_check.c
+++ b/lib/ubc_check.c
@@ -5,6 +5,7 @@
* https://opensource.org/licenses/MIT
***/
+/*
// this file was generated by the 'parse_bitrel' program in the tools section
// using the data files from directory 'tools/data/3565'
//
@@ -21,6 +22,7 @@
// ubc_check is programmatically generated and the unavoidable bitconditions have been hardcoded
// a directly verifiable version named ubc_check_verify can be found in ubc_check_verify.c
// ubc_check has been verified against ubc_check_verify using the 'ubc_check_test' program in the tools section
+*/
#include <stdint.h>
#include "ubc_check.h"
diff --git a/lib/ubc_check.h b/lib/ubc_check.h
index 41319a0..b349bed 100644
--- a/lib/ubc_check.h
+++ b/lib/ubc_check.h
@@ -5,6 +5,7 @@
* https://opensource.org/licenses/MIT
***/
+/*
// this file was generated by the 'parse_bitrel' program in the tools section
// using the data files from directory 'tools/data/3565'
//
@@ -17,6 +18,7 @@
// ubc_check takes as input an expanded message block and verifies the unavoidable bitconditions for all listed DVs
// it returns a dvmask where each bit belonging to a DV is set if all unavoidable bitconditions for that DV have been met
// thus one needs to do the recompression check for each DV that has its bit set
+*/
#ifndef UBC_CHECK_H
#define UBC_CHECK_H
@@ -41,4 +43,4 @@ void ubc_check(const uint32_t W[80], uint32_t dvmask[DVMASKSIZE]);
}
#endif
-#endif // UBC_CHECK_H
+#endif /* UBC_CHECK_H */
diff --git a/lib/ubc_check_simd.cinc b/lib/ubc_check_simd.cinc
deleted file mode 100644
index 27d751b..0000000
--- a/lib/ubc_check_simd.cinc
+++ /dev/null
@@ -1,204 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-#include "ubc_check.h"
-
-static const uint32_t DV_I_43_0_bit = (uint32_t)(1) << 0;
-static const uint32_t DV_I_44_0_bit = (uint32_t)(1) << 1;
-static const uint32_t DV_I_45_0_bit = (uint32_t)(1) << 2;
-static const uint32_t DV_I_46_0_bit = (uint32_t)(1) << 3;
-static const uint32_t DV_I_46_2_bit = (uint32_t)(1) << 4;
-static const uint32_t DV_I_47_0_bit = (uint32_t)(1) << 5;
-static const uint32_t DV_I_47_2_bit = (uint32_t)(1) << 6;
-static const uint32_t DV_I_48_0_bit = (uint32_t)(1) << 7;
-static const uint32_t DV_I_48_2_bit = (uint32_t)(1) << 8;
-static const uint32_t DV_I_49_0_bit = (uint32_t)(1) << 9;
-static const uint32_t DV_I_49_2_bit = (uint32_t)(1) << 10;
-static const uint32_t DV_I_50_0_bit = (uint32_t)(1) << 11;
-static const uint32_t DV_I_50_2_bit = (uint32_t)(1) << 12;
-static const uint32_t DV_I_51_0_bit = (uint32_t)(1) << 13;
-static const uint32_t DV_I_51_2_bit = (uint32_t)(1) << 14;
-static const uint32_t DV_I_52_0_bit = (uint32_t)(1) << 15;
-static const uint32_t DV_II_45_0_bit = (uint32_t)(1) << 16;
-static const uint32_t DV_II_46_0_bit = (uint32_t)(1) << 17;
-static const uint32_t DV_II_46_2_bit = (uint32_t)(1) << 18;
-static const uint32_t DV_II_47_0_bit = (uint32_t)(1) << 19;
-static const uint32_t DV_II_48_0_bit = (uint32_t)(1) << 20;
-static const uint32_t DV_II_49_0_bit = (uint32_t)(1) << 21;
-static const uint32_t DV_II_49_2_bit = (uint32_t)(1) << 22;
-static const uint32_t DV_II_50_0_bit = (uint32_t)(1) << 23;
-static const uint32_t DV_II_50_2_bit = (uint32_t)(1) << 24;
-static const uint32_t DV_II_51_0_bit = (uint32_t)(1) << 25;
-static const uint32_t DV_II_51_2_bit = (uint32_t)(1) << 26;
-static const uint32_t DV_II_52_0_bit = (uint32_t)(1) << 27;
-static const uint32_t DV_II_53_0_bit = (uint32_t)(1) << 28;
-static const uint32_t DV_II_54_0_bit = (uint32_t)(1) << 29;
-static const uint32_t DV_II_55_0_bit = (uint32_t)(1) << 30;
-static const uint32_t DV_II_56_0_bit = (uint32_t)(1) << 31;
-
-void UBC_CHECK_SIMD(const SIMD_WORD* W, SIMD_WORD* dvmask)
-{
- SIMD_WORD mask = SIMD_WTOV(0xFFFFFFFF);
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[63],SIMD_SHR_V(W[64],5)),(1<<0))), ~(DV_I_48_0_bit|DV_II_48_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[63],SIMD_SHR_V(W[64],5)),(1<<1))), ~(DV_I_45_0_bit|DV_II_45_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[63],6),SIMD_SHL_V(W[64],1))), ~(DV_I_48_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[62],SIMD_SHR_V(W[63],5)),(1<<0))), ~(DV_I_47_0_bit|DV_II_47_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(W[62],SIMD_SHR_V(W[63],5))), ~(DV_I_44_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[62],4),SIMD_SHR_V(W[63],1))), ~(DV_I_47_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[61],SIMD_SHR_V(W[62],5)),(1<<0))), ~(DV_I_46_0_bit|DV_II_46_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[61],1),SIMD_SHR_V(W[62],6))), ~(DV_I_43_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[61],SIMD_SHR_V(W[62],5)),(1<<2))), ~(DV_I_46_2_bit|DV_II_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[60],SIMD_SHR_V(W[61],5)),(1<<0))), ~(DV_I_45_0_bit|DV_II_45_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[60],27),SIMD_SHL_V(W[64],2))), ~(DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHR_V(W[60],4),SIMD_SHR_V(W[64],29))), ~(DV_I_44_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[59],1),SIMD_SHR_V(W[64],29))), ~(DV_I_44_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[59],26),SIMD_SHL_V(W[63],1))), ~(DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHR_V(W[59],5),SIMD_SHR_V(W[63],30))), ~(DV_I_43_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHR_V(SIMD_XOR_VV(W[59],W[60]),2)), ~(DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(W[58],SIMD_SHR_V(W[63],30))), ~(DV_I_43_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[58],25),W[62])), ~(DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHR_V(SIMD_XOR_VV(W[58],W[61]),1), ~(DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[58],W[59]),29),1),1), ~(DV_II_51_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[57],24),SIMD_SHR_V(W[61],1))), ~(DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[57],26),SIMD_SHL_V(W[59],1))), ~(DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[57],W[58]),29),1),1), ~(DV_II_50_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[56],SIMD_SHR_V(W[59],25)),(1<<4)),(1<<4)), ~(DV_II_52_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[56],W[59]),29),1)), ~(DV_II_51_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[56],W[57]),29),1),1), ~(DV_II_49_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[55],SIMD_SHR_V(W[58],25)),(1<<4)),(1<<4)), ~(DV_II_51_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[56],25),SIMD_SHL_V(W[55],20))), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHR_V(SIMD_XOR_VV(W[55],W[58]),6), ~(DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[55],W[56]),29),1),1), ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_51_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[54],SIMD_SHR_V(W[57],25)),(1<<4)),(1<<4)), ~(DV_II_50_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[55],23),SIMD_SHL_V(W[54],18))), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[54],W[55]),29),1),1), ~(DV_I_51_0_bit|DV_II_47_0_bit|DV_II_50_0_bit|DV_II_55_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[53],SIMD_SHR_V(W[56],25)),(1<<4)),(1<<4)), ~(DV_II_49_0_bit|DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHL_V(SIMD_XOR_VV(W[53],W[55]),20)), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[54],21),SIMD_SHL_V(W[53],16))), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[53],W[56]),29),1)), ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[53],W[54]),29),1),1), ~(DV_I_50_0_bit|DV_II_46_0_bit|DV_II_49_0_bit|DV_II_54_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[52],W[56]),25), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[52],SIMD_SHR_V(W[55],25)),(1<<4)),(1<<4)), ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_50_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHL_V(SIMD_XOR_VV(W[52],W[54]),18)), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[52],W[53]),29),1),1), ~(DV_I_49_0_bit|DV_II_45_0_bit|DV_II_48_0_bit|DV_II_53_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[51],W[55]),23), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[51],SIMD_SHR_V(W[54],25)),(1<<4)),(1<<4)), ~(DV_I_51_0_bit|DV_II_47_0_bit|DV_II_49_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHL_V(SIMD_XOR_VV(W[51],W[53]),16)), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[52],13),SIMD_SHL_V(W[51],8))), ~(DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[51],W[54]),29),1)), ~(DV_I_50_0_bit|DV_II_46_0_bit|DV_II_47_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[51],W[52]),29),1),1), ~(DV_I_48_0_bit|DV_II_47_0_bit|DV_II_52_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[50],W[54]),21), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[50],SIMD_SHR_V(W[53],25)),(1<<4)),(1<<4)), ~(DV_I_50_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_48_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[51],SIMD_SHR_V(W[50],5)),(1<<1)),(1<<1)), ~(DV_I_50_2_bit|DV_II_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[50],W[52]),29),1)), ~(DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[50],W[51]),29),1),1), ~(DV_I_47_0_bit|DV_II_46_0_bit|DV_II_51_0_bit|DV_II_52_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[49],SIMD_SHR_V(W[52],25)),(1<<4)),(1<<4)), ~(DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_47_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHL_V(SIMD_XOR_VV(W[49],W[51]),8)), ~(DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[50],9),SIMD_SHL_V(W[49],4))), ~(DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[49],W[51]),29),1)), ~(DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[49],W[50]),29),1),1), ~(DV_I_46_0_bit|DV_II_45_0_bit|DV_II_50_0_bit|DV_II_51_0_bit|DV_II_55_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[48],SIMD_SHR_V(W[51],25)),(1<<4)),(1<<4)), ~(DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[48],W[50]),(1<<6)),(1<<6)), ~(DV_I_50_2_bit|DV_II_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[49],7),SIMD_SHL_V(W[48],2))), ~(DV_I_48_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[48],W[55]),29),1)), ~(DV_I_51_0_bit|DV_I_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[48],W[50]),29),1)), ~(DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[48],W[49]),29),1),1), ~(DV_I_45_0_bit|DV_I_52_0_bit|DV_II_49_0_bit|DV_II_50_0_bit|DV_II_54_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[47],W[51]),17), ~(DV_II_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[47],SIMD_SHR_V(W[50],25)),(1<<4)),(1<<4)), ~(DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_51_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[47],W[49]),(1<<6)),(1<<6)), ~(DV_I_49_2_bit|DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[48],SIMD_SHR_V(W[47],5)),(1<<1)),(1<<1)), ~(DV_I_47_2_bit|DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[47],W[49]),29),1)), ~(DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[47],W[48]),29),1),1), ~(DV_I_44_0_bit|DV_I_51_0_bit|DV_II_48_0_bit|DV_II_49_0_bit|DV_II_53_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[46],W[48]),25), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[46],4),SIMD_SHR_V(W[49],29)),1),1), ~(DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit|DV_II_50_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[46],W[48]),(1<<6)),(1<<6)), ~(DV_I_48_2_bit|DV_I_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[47],SIMD_SHR_V(W[46],5)),(1<<1)),(1<<1)), ~(DV_I_46_2_bit|DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[46],W[48]),29),1)), ~(DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[46],W[47]),29),1),1), ~(DV_I_43_0_bit|DV_I_50_0_bit|DV_II_47_0_bit|DV_II_48_0_bit|DV_II_52_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[45],W[47]),23), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[45],4),SIMD_SHR_V(W[48],29)),1),1), ~(DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit|DV_II_49_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[45],W[47]),(1<<6)),(1<<6)), ~(DV_I_47_2_bit|DV_I_49_2_bit|DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[46],21),SIMD_SHL_V(W[45],16))), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[45],W[47]),29),1)), ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[45],W[46]),29),1),1), ~(DV_I_49_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_47_0_bit|DV_II_51_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[44],SIMD_SHR_V(W[45],5)),(1<<1))), ~(DV_I_51_2_bit|DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[44],28),SIMD_SHL_V(W[48],3))), ~(DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[44],27),SIMD_SHL_V(W[48],2))), ~(DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[44],4),SIMD_SHR_V(W[47],29)),1),1), ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit|DV_II_48_0_bit|DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[44],W[46]),6),1),1), ~(DV_I_46_2_bit|DV_I_48_2_bit|DV_I_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[44],W[46]),29),1)), ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[44],W[45]),29),1),1), ~(DV_I_48_0_bit|DV_I_51_0_bit|DV_I_52_0_bit|DV_II_45_0_bit|DV_II_46_0_bit|DV_II_50_0_bit|DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[43],W[51]),11), ~(DV_I_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[43],27),SIMD_SHL_V(W[47],2))), ~(DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[43],26),SIMD_SHL_V(W[47],1))), ~(DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[43],4),SIMD_SHR_V(W[46],29)),1),1), ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit|DV_II_47_0_bit|DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[43],W[45]),(1<<6)),(1<<6)), ~(DV_I_47_2_bit|DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[43],W[44]),29),1),1), ~(DV_I_47_0_bit|DV_I_50_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_49_0_bit|DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[42],W[50]),9), ~(DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[42],26),SIMD_SHL_V(W[46],1))), ~(DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[42],25),W[46])), ~(DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[42],4),SIMD_SHR_V(W[45],29)),1),1), ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[42],W[44]),6),1),1), ~(DV_I_46_2_bit|DV_I_48_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[43],SIMD_SHR_V(W[42],5)),(1<<1)),(1<<1)), ~(DV_II_46_2_bit|DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[42],W[43]),29),1),1), ~(DV_I_46_0_bit|DV_I_49_0_bit|DV_I_50_0_bit|DV_II_48_0_bit|DV_II_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[41],SIMD_SHR_V(W[42],5)),(1<<1))), ~(DV_I_48_2_bit|DV_II_46_2_bit|DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[41],25),W[45])), ~(DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[41],24),SIMD_SHR_V(W[45],1))), ~(DV_II_53_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[41],4),SIMD_SHR_V(W[44],29)),1),1), ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(W[41],W[43])), ~(DV_I_47_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[42],SIMD_SHR_V(W[41],5)),(1<<1)),(1<<1)), ~(DV_I_51_2_bit|DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[41],W[42]),29),1),1), ~(DV_I_45_0_bit|DV_I_48_0_bit|DV_I_49_0_bit|DV_II_47_0_bit|DV_II_48_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[40],SIMD_SHR_V(W[41],5)),(1<<1))), ~(DV_I_47_2_bit|DV_I_51_2_bit|DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[40],24),SIMD_SHR_V(W[44],1))), ~(DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[40],23),SIMD_SHR_V(W[44],2))), ~(DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[40],4),SIMD_SHR_V(W[43],29)),1),1), ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_50_0_bit|DV_II_49_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[40],W[42]),4),1)), ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_SHR_V(SIMD_XOR_VV(W[40],W[42]),2)), ~(DV_I_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[41],SIMD_SHR_V(W[40],5)),(1<<1)),(1<<1)), ~(DV_I_50_2_bit|DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[40],W[41]),29),1),1), ~(DV_I_44_0_bit|DV_I_47_0_bit|DV_I_48_0_bit|DV_II_46_0_bit|DV_II_47_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[39],SIMD_SHR_V(W[40],5)),(1<<1))), ~(DV_I_46_2_bit|DV_I_50_2_bit|DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[39],22),SIMD_SHR_V(W[43],3))), ~(DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[39],SIMD_SHR_V(W[43],25)),(1<<4)),(1<<4)), ~(DV_I_52_0_bit|DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[39],4),SIMD_SHR_V(W[42],29)),1),1), ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_49_0_bit|DV_II_48_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[39],W[41]),4),1)), ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[39],21),SIMD_SHR_V(W[43],4))), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[40],9),SIMD_SHL_V(W[39],4))), ~(DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[44],1),SIMD_SHR_V(W[39],3))), ~(DV_II_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[38],26),SIMD_SHR_V(W[43],4))), ~(DV_II_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[38],W[40]),9), ~(DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[38],20),SIMD_SHR_V(W[42],5))), ~(DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[38],SIMD_SHR_V(W[42],25)),(1<<4)),(1<<4)), ~(DV_I_51_0_bit|DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[38],4),SIMD_SHR_V(W[41],29)),1),1), ~(DV_I_44_0_bit|DV_I_48_0_bit|DV_II_47_0_bit|DV_II_54_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[38],W[40]),4),1)), ~(DV_I_44_0_bit|DV_II_54_0_bit|DV_II_56_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[38],W[39]),11), ~(DV_I_52_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[38],19),SIMD_SHR_V(W[42],6))), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[43],3),SIMD_SHR_V(W[38],5))), ~(DV_II_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[37],24),SIMD_SHR_V(W[42],6))), ~(DV_II_50_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[37],SIMD_SHR_V(W[38],5)),(1<<1))), ~(DV_I_48_2_bit|DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[37],18),SIMD_SHR_V(W[41],7))), ~(DV_II_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[37],SIMD_SHR_V(W[41],25)),(1<<4)),(1<<4)), ~(DV_I_50_0_bit|DV_II_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[37],4),SIMD_SHR_V(W[40],29)),1),1), ~(DV_I_43_0_bit|DV_I_47_0_bit|DV_II_46_0_bit|DV_II_53_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_SHR_V(SIMD_XOR_VV(W[37],W[39]),4),1)), ~(DV_I_43_0_bit|DV_II_53_0_bit|DV_II_55_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[37],W[38]),9), ~(DV_I_51_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[37],17),SIMD_SHR_V(W[41],8))), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[37],13),SIMD_SHL_V(W[37],8))), ~(DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[42],5),SIMD_SHR_V(W[37],7))), ~(DV_II_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHL_V(W[36],22),SIMD_SHR_V(W[41],8))), ~(DV_II_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[36],SIMD_SHR_V(W[37],5)),(1<<1))), ~(DV_I_47_2_bit|DV_I_50_2_bit|DV_II_46_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[36],17),SIMD_SHR_V(W[40],8))), ~(DV_II_48_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[36],4),SIMD_SHR_V(W[40],29)),1),1), ~(DV_I_46_0_bit|DV_I_49_0_bit|DV_II_45_0_bit|DV_II_48_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[36],W[38]),(1<<4))), ~(DV_II_52_0_bit|DV_II_54_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SHL_V(SIMD_XOR_VV(W[36],W[37]),7), ~(DV_I_50_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[41],7),SIMD_SHR_V(W[36],9))), ~(DV_II_49_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NEG_V(SIMD_AND_VW(SIMD_XOR_VV(W[35],SIMD_SHR_V(W[36],5)),(1<<1))), ~(DV_I_46_2_bit|DV_I_49_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(W[35],SIMD_SHR_V(W[39],25)),(1<<3)),(1<<3)), ~(DV_I_51_0_bit|DV_II_47_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_SUB_VW(SIMD_AND_VW(SIMD_XOR_VV(SIMD_SHR_V(W[35],4),SIMD_SHR_V(W[39],29)),1),1), ~(DV_I_45_0_bit|DV_I_48_0_bit|DV_II_47_0_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW(SIMD_NOT_V(SIMD_XOR_VV(SIMD_SHL_V(W[35],9),SIMD_SHR_V(W[39],16))), ~(DV_I_51_2_bit)));
- mask = SIMD_AND_VV(mask, SIMD_OR_VW((SIMD_XOR_VV(SIMD_SHR_V(W[40],8),SIMD_SHR_V(W[35],10))), ~(DV_II_48_0_bit)));
- dvmask[0]=mask;
- SIMD_CLEANUP;
-}
diff --git a/lib/ubc_check_simd_avx256.c b/lib/ubc_check_simd_avx256.c
deleted file mode 100644
index cfb1841..0000000
--- a/lib/ubc_check_simd_avx256.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates avx256 code using avx256 MACROS (simd_avx256.h) and generic SIMD code (ubc_check_simd.cinc)
-#ifdef HAVE_AVX
-#include "ubc_check.h"
-#include "simd_avx256.h"
-
-#define UBC_CHECK_SIMD ubc_check_avx256
-
-#include "ubc_check_simd.cinc"
-
-#else
-
-#pragma message "The file: ubc_check_simd_avx256.c is not compiled for this architecture."
-
-#endif // HAVE_AVX
diff --git a/lib/ubc_check_simd_mmx64.c b/lib/ubc_check_simd_mmx64.c
deleted file mode 100644
index c360133..0000000
--- a/lib/ubc_check_simd_mmx64.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates sse128 code using sse128 MACROS (simd_sse128.h) and generic SIMD code (ubc_check_simd.cinc)
-#ifdef HAVE_MMX
-#include "ubc_check.h"
-#include "simd_mmx64.h"
-
-#define UBC_CHECK_SIMD ubc_check_mmx64
-
-#include "ubc_check_simd.cinc"
-
-#else
-
-#pragma message "The file: ubc_check_simd_mmx64.c is not compiled for this architecture."
-
-#endif //HAVE_MMX
diff --git a/lib/ubc_check_simd_neon128.c b/lib/ubc_check_simd_neon128.c
deleted file mode 100644
index daac9b0..0000000
--- a/lib/ubc_check_simd_neon128.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates neon 32x4 code using neon MACROS (arm_neon.h) and generic SIMD code (sha1_simd.cinc)
-#ifdef HAVE_NEON
-#include "ubc_check.h"
-#include "simd_neon128.h"
-
-#define UBC_CHECK_SIMD ubc_check_neon128
-
-#include "ubc_check_simd.cinc"
-
-#else
-
-#pragma message "The file: ubc_check_simd_neon128.c is not compiled for this architecture."
-
-#endif //HAVE_NEON
diff --git a/lib/ubc_check_simd_sse128.c b/lib/ubc_check_simd_sse128.c
deleted file mode 100644
index 94ffd7d..0000000
--- a/lib/ubc_check_simd_sse128.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file generates sse128 code using sse128 MACROS (simd_sse128.h) and generic SIMD code (ubc_check_simd.cinc)
-#ifdef HAVE_SSE
-#include "ubc_check.h"
-#include "simd_sse128.h"
-
-#define UBC_CHECK_SIMD ubc_check_sse128
-
-#include "ubc_check_simd.cinc"
-
-#else
-
-#pragma message "The file: ubc_check_simd_sse128.c is not compiled for this architecture."
-
-#endif //HAVE_SSE
diff --git a/lib/ubc_check_verify.c b/lib/ubc_check_verify.c
deleted file mode 100644
index d9233a8..0000000
--- a/lib/ubc_check_verify.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/***
-* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
-* Distributed under the MIT Software License.
-* See accompanying file LICENSE.txt or copy at
-* https://opensource.org/licenses/MIT
-***/
-
-// this file was generated by the 'parse_bitrel' program in the tools section
-// using the data files from directory 'tools/data/3565'
-//
-// see also ubc_check.h and ubc_check.c
-//
-// ubc_check in ubc_check.c is programmatically generated and the unavoidable bitconditions have been hardcoded
-// this is a directly verifiable version named ubc_check_verify
-// ubc_check has been verified against ubc_check_verify using the 'ubc_check_test' program in the tools section
-
-#include <stdint.h>
-#include "ubc_check.h"
-
-void ubc_check_verify(const uint32_t W[80], uint32_t dvmask[DVMASKSIZE])
-{
- for (unsigned i=0; i < DVMASKSIZE; ++i)
- dvmask[i]=0xFFFFFFFF;
-
- if ( (0^((W[37]>>4)&1)^((W[39]>>4)&1))!=1
- || (0^((W[37]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[37]>>4)&1)^((W[41]>>4)&1))!=0
- || (0^((W[37]>>4)&1)^((W[42]>>29)&1))!=1
- || (0^((W[37]>>4)&1)^((W[43]>>4)&1))!=1
- || (0^((W[37]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[37]>>4)&1)^((W[46]>>29)&1))!=1
- || (0^((W[37]>>4)&1)^((W[47]>>29)&1))!=1
- || (0^((W[61]>>1)&1)^((W[62]>>6)&1))!=1
- || (0^((W[59]>>5)&1)^((W[63]>>30)&1))!=0
- || (0^((W[58]>>0)&1)^((W[63]>>30)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<0));
-
- if ( (0^((W[38]>>4)&1)^((W[40]>>4)&1))!=1
- || (0^((W[38]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[42]>>4)&1))!=0
- || (0^((W[38]>>4)&1)^((W[43]>>29)&1))!=1
- || (0^((W[38]>>4)&1)^((W[44]>>4)&1))!=1
- || (0^((W[38]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[47]>>29)&1))!=1
- || (0^((W[38]>>4)&1)^((W[48]>>29)&1))!=1
- || (0^((W[62]>>1)&1)^((W[63]>>6)&1))!=1
- || (0^((W[60]>>5)&1)^((W[64]>>30)&1))!=0
- || (0^((W[59]>>0)&1)^((W[64]>>30)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<1));
-
- if ( (0^((W[35]>>4)&1)^((W[39]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[41]>>4)&1))!=1
- || (0^((W[39]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[43]>>4)&1))!=0
- || (0^((W[39]>>4)&1)^((W[44]>>29)&1))!=1
- || (0^((W[39]>>4)&1)^((W[45]>>4)&1))!=1
- || (0^((W[39]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[48]>>29)&1))!=1
- || (0^((W[39]>>4)&1)^((W[49]>>29)&1))!=1
- || (0^((W[63]>>1)&1)^((W[64]>>6)&1))!=1
- || (0^((W[60]>>0)&1)^((W[61]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<2));
-
- if ( (0^((W[36]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[42]>>4)&1))!=1
- || (0^((W[40]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[44]>>4)&1))!=0
- || (0^((W[40]>>4)&1)^((W[45]>>29)&1))!=1
- || (0^((W[40]>>4)&1)^((W[46]>>4)&1))!=1
- || (0^((W[40]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[49]>>29)&1))!=1
- || (0^((W[40]>>4)&1)^((W[50]>>29)&1))!=1
- || (0^((W[61]>>0)&1)^((W[62]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<3));
-
- if ( (0^((W[35]>>1)&1)^((W[36]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[40]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[42]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[44]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[46]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[47]>>1)&1))!=1
- || (0^((W[61]>>2)&1)^((W[62]>>7)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<4));
-
- if ( (0^((W[37]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[37]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[43]>>4)&1))!=1
- || (0^((W[41]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[45]>>4)&1))!=0
- || (0^((W[41]>>4)&1)^((W[46]>>29)&1))!=1
- || (0^((W[41]>>4)&1)^((W[47]>>4)&1))!=1
- || (0^((W[41]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[50]>>29)&1))!=1
- || (0^((W[41]>>4)&1)^((W[51]>>29)&1))!=1
- || (0^((W[62]>>0)&1)^((W[63]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<5));
-
- if ( (0^((W[36]>>1)&1)^((W[37]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[41]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[43]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[45]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[47]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[48]>>1)&1))!=1
- || (0^((W[62]>>2)&1)^((W[63]>>7)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<6));
-
- if ( (0^((W[35]>>4)&1)^((W[39]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[44]>>4)&1))!=1
- || (0^((W[42]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[46]>>4)&1))!=0
- || (0^((W[42]>>4)&1)^((W[47]>>29)&1))!=1
- || (0^((W[42]>>4)&1)^((W[48]>>4)&1))!=1
- || (0^((W[42]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[51]>>29)&1))!=1
- || (0^((W[42]>>4)&1)^((W[52]>>29)&1))!=1
- || (0^((W[63]>>0)&1)^((W[64]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<7));
-
- if ( (0^((W[37]>>1)&1)^((W[38]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[42]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[44]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[46]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[48]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[49]>>1)&1))!=1
- || (0^((W[63]>>2)&1)^((W[64]>>7)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<8));
-
- if ( (0^((W[36]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[45]>>4)&1))!=1
- || (0^((W[43]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[47]>>4)&1))!=0
- || (0^((W[43]>>4)&1)^((W[48]>>29)&1))!=1
- || (0^((W[43]>>4)&1)^((W[49]>>4)&1))!=1
- || (0^((W[43]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[52]>>29)&1))!=1
- || (0^((W[43]>>4)&1)^((W[53]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<9));
-
- if ( (0^((W[35]>>1)&1)^((W[36]>>6)&1))!=1
- || (0^((W[38]>>1)&1)^((W[39]>>6)&1))!=1
- || (0^((W[38]>>1)&1)^((W[40]>>1)&1))!=1
- || (0^((W[42]>>1)&1)^((W[43]>>6)&1))!=1
- || (0^((W[42]>>1)&1)^((W[45]>>6)&1))!=1
- || (0^((W[42]>>1)&1)^((W[47]>>6)&1))!=1
- || (0^((W[42]>>1)&1)^((W[49]>>6)&1))!=1
- || (0^((W[42]>>1)&1)^((W[50]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<10));
-
- if ( (0^((W[36]>>4)&1)^((W[37]>>4)&1))!=1
- || (0^((W[36]>>4)&1)^((W[41]>>29)&1))!=1
- || (0^((W[40]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[46]>>4)&1))!=1
- || (0^((W[44]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[48]>>4)&1))!=0
- || (0^((W[44]>>4)&1)^((W[49]>>29)&1))!=1
- || (0^((W[44]>>4)&1)^((W[50]>>4)&1))!=1
- || (0^((W[44]>>4)&1)^((W[51]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[53]>>29)&1))!=1
- || (0^((W[44]>>4)&1)^((W[54]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<11));
-
- if ( (0^((W[36]>>1)&1)^((W[37]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[40]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[41]>>1)&1))!=1
- || (0^((W[43]>>1)&1)^((W[44]>>6)&1))!=1
- || (0^((W[43]>>1)&1)^((W[46]>>6)&1))!=1
- || (0^((W[43]>>1)&1)^((W[48]>>6)&1))!=1
- || (0^((W[43]>>1)&1)^((W[50]>>6)&1))!=1
- || (0^((W[43]>>1)&1)^((W[51]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<12));
-
- if ( (0^((W[37]>>4)&1)^((W[38]>>4)&1))!=1
- || (0^((W[35]>>3)&1)^((W[39]>>28)&1))!=0
- || (0^((W[37]>>4)&1)^((W[42]>>29)&1))!=1
- || (0^((W[41]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[47]>>4)&1))!=1
- || (0^((W[45]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[49]>>4)&1))!=0
- || (0^((W[45]>>4)&1)^((W[50]>>29)&1))!=1
- || (0^((W[45]>>4)&1)^((W[51]>>4)&1))!=1
- || (0^((W[45]>>4)&1)^((W[52]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[54]>>29)&1))!=1
- || (0^((W[45]>>4)&1)^((W[55]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<13));
-
- if ( (0^((W[37]>>1)&1)^((W[37]>>6)&1))!=0
- || (0^((W[37]>>1)&1)^((W[38]>>6)&1))!=1
- || (0^((W[35]>>5)&1)^((W[39]>>30)&1))!=0
- || (0^((W[40]>>1)&1)^((W[41]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[42]>>1)&1))!=1
- || (0^((W[44]>>1)&1)^((W[45]>>6)&1))!=1
- || (0^((W[44]>>1)&1)^((W[47]>>6)&1))!=1
- || (0^((W[44]>>1)&1)^((W[49]>>6)&1))!=1
- || (0^((W[44]>>1)&1)^((W[51]>>6)&1))!=1
- || (0^((W[44]>>1)&1)^((W[52]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<14));
-
- if ( (0^((W[38]>>4)&1)^((W[39]>>4)&1))!=1
- || (0^((W[38]>>4)&1)^((W[43]>>29)&1))!=1
- || (0^((W[42]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[48]>>4)&1))!=1
- || (0^((W[46]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[50]>>4)&1))!=0
- || (0^((W[46]>>4)&1)^((W[51]>>29)&1))!=1
- || (0^((W[46]>>4)&1)^((W[52]>>4)&1))!=1
- || (0^((W[46]>>4)&1)^((W[53]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[55]>>29)&1))!=1
- || (0^((W[46]>>4)&1)^((W[56]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<15));
-
- if ( (0^((W[36]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[52]>>29)&1)^((W[53]>>29)&1))!=0
- || (0^((W[50]>>29)&1)^((W[53]>>29)&1))!=1
- || (0^((W[49]>>4)&1)^((W[53]>>29)&1))!=0
- || (0^((W[49]>>29)&1)^((W[53]>>29)&1))!=1
- || (0^((W[47]>>4)&1)^((W[53]>>29)&1))!=1
- || (0^((W[63]>>1)&1)^((W[64]>>6)&1))!=1
- || (0^((W[60]>>0)&1)^((W[61]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<16));
-
- if ( (0^((W[37]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[37]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[53]>>29)&1)^((W[54]>>29)&1))!=0
- || (0^((W[51]>>29)&1)^((W[54]>>29)&1))!=1
- || (0^((W[50]>>4)&1)^((W[54]>>29)&1))!=0
- || (0^((W[50]>>29)&1)^((W[54]>>29)&1))!=1
- || (0^((W[48]>>4)&1)^((W[54]>>29)&1))!=1
- || (0^((W[61]>>0)&1)^((W[62]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<17));
-
- if ( (0^((W[36]>>1)&1)^((W[37]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[42]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[43]>>1)&1))!=1
- || (0^((W[50]>>6)&1)^((W[51]>>1)&1))!=0
- || (0^((W[48]>>6)&1)^((W[51]>>1)&1))!=0
- || (0^((W[47]>>1)&1)^((W[51]>>1)&1))!=1
- || (0^((W[61]>>2)&1)^((W[62]>>7)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<18));
-
- if ( (0^((W[35]>>3)&1)^((W[39]>>28)&1))!=0
- || (0^((W[35]>>4)&1)^((W[39]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[54]>>29)&1)^((W[55]>>29)&1))!=0
- || (0^((W[52]>>29)&1)^((W[55]>>29)&1))!=1
- || (0^((W[51]>>4)&1)^((W[55]>>29)&1))!=0
- || (0^((W[51]>>29)&1)^((W[55]>>29)&1))!=1
- || (0^((W[49]>>4)&1)^((W[55]>>29)&1))!=1
- || (0^((W[62]>>0)&1)^((W[63]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<19));
-
- if ( (0^((W[35]>>30)&1)^((W[36]>>3)&1))!=1
- || (0^((W[35]>>30)&1)^((W[40]>>28)&1))!=1
- || (0^((W[36]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[39]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[55]>>29)&1)^((W[56]>>29)&1))!=0
- || (0^((W[53]>>29)&1)^((W[56]>>29)&1))!=1
- || (0^((W[52]>>4)&1)^((W[56]>>29)&1))!=0
- || (0^((W[52]>>29)&1)^((W[56]>>29)&1))!=1
- || (0^((W[50]>>4)&1)^((W[56]>>29)&1))!=1
- || (0^((W[63]>>0)&1)^((W[64]>>5)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<20));
-
- if ( (0^((W[36]>>30)&1)^((W[37]>>3)&1))!=1
- || (0^((W[36]>>30)&1)^((W[41]>>28)&1))!=1
- || (0^((W[37]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[56]>>29)&1)^((W[57]>>29)&1))!=0
- || (0^((W[54]>>29)&1)^((W[57]>>29)&1))!=1
- || (0^((W[53]>>4)&1)^((W[57]>>29)&1))!=0
- || (0^((W[53]>>29)&1)^((W[57]>>29)&1))!=1
- || (0^((W[51]>>4)&1)^((W[57]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<21));
-
- if ( (0^((W[36]>>0)&1)^((W[37]>>5)&1))!=1
- || (0^((W[39]>>1)&1)^((W[40]>>6)&1))!=1
- || (0^((W[39]>>1)&1)^((W[41]>>1)&1))!=1
- || (0^((W[36]>>0)&1)^((W[41]>>30)&1))!=1
- || (0^((W[44]>>1)&1)^((W[45]>>6)&1))!=1
- || (0^((W[44]>>1)&1)^((W[46]>>1)&1))!=1
- || (0^((W[53]>>6)&1)^((W[54]>>1)&1))!=0
- || (0^((W[51]>>6)&1)^((W[54]>>1)&1))!=0
- || (0^((W[50]>>1)&1)^((W[54]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<22));
-
- if ( (0^((W[37]>>30)&1)^((W[38]>>3)&1))!=1
- || (0^((W[37]>>30)&1)^((W[42]>>28)&1))!=1
- || (0^((W[38]>>4)&1)^((W[42]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[41]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[57]>>29)&1)^((W[58]>>29)&1))!=0
- || (0^((W[55]>>29)&1)^((W[58]>>29)&1))!=1
- || (0^((W[54]>>4)&1)^((W[58]>>29)&1))!=0
- || (0^((W[54]>>29)&1)^((W[58]>>29)&1))!=1
- || (0^((W[52]>>4)&1)^((W[58]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<23));
-
- if ( (0^((W[37]>>0)&1)^((W[38]>>5)&1))!=1
- || (0^((W[40]>>1)&1)^((W[41]>>6)&1))!=1
- || (0^((W[40]>>1)&1)^((W[42]>>1)&1))!=1
- || (0^((W[37]>>0)&1)^((W[42]>>30)&1))!=1
- || (0^((W[45]>>1)&1)^((W[46]>>6)&1))!=1
- || (0^((W[45]>>1)&1)^((W[47]>>1)&1))!=1
- || (0^((W[54]>>6)&1)^((W[55]>>1)&1))!=0
- || (0^((W[52]>>6)&1)^((W[55]>>1)&1))!=0
- || (0^((W[51]>>1)&1)^((W[55]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<24));
-
- if ( (0^((W[38]>>30)&1)^((W[39]>>3)&1))!=1
- || (0^((W[38]>>30)&1)^((W[43]>>28)&1))!=1
- || (0^((W[39]>>4)&1)^((W[43]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[42]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[51]>>29)&1))!=0
- || (0^((W[58]>>29)&1)^((W[59]>>29)&1))!=0
- || (0^((W[56]>>29)&1)^((W[59]>>29)&1))!=1
- || (0^((W[55]>>4)&1)^((W[59]>>29)&1))!=0
- || (0^((W[55]>>29)&1)^((W[59]>>29)&1))!=1
- || (0^((W[53]>>4)&1)^((W[59]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<25));
-
- if ( (0^((W[38]>>0)&1)^((W[39]>>5)&1))!=1
- || (0^((W[41]>>1)&1)^((W[42]>>6)&1))!=1
- || (0^((W[41]>>1)&1)^((W[43]>>1)&1))!=1
- || (0^((W[38]>>0)&1)^((W[43]>>30)&1))!=1
- || (0^((W[46]>>1)&1)^((W[47]>>6)&1))!=1
- || (0^((W[46]>>1)&1)^((W[48]>>1)&1))!=1
- || (0^((W[55]>>6)&1)^((W[56]>>1)&1))!=0
- || (0^((W[53]>>6)&1)^((W[56]>>1)&1))!=0
- || (0^((W[52]>>1)&1)^((W[56]>>1)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<26));
-
- if ( (0^((W[36]>>4)&1)^((W[38]>>4)&1))!=1
- || (0^((W[39]>>30)&1)^((W[40]>>3)&1))!=1
- || (0^((W[39]>>30)&1)^((W[44]>>28)&1))!=1
- || (0^((W[40]>>4)&1)^((W[44]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[43]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[48]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[48]>>4)&1)^((W[51]>>29)&1))!=0
- || (0^((W[48]>>4)&1)^((W[52]>>29)&1))!=0
- || (0^((W[59]>>29)&1)^((W[60]>>29)&1))!=0
- || (0^((W[57]>>29)&1)^((W[60]>>29)&1))!=1
- || (0^((W[56]>>4)&1)^((W[60]>>29)&1))!=0
- || (0^((W[56]>>29)&1)^((W[60]>>29)&1))!=1
- || (0^((W[54]>>4)&1)^((W[60]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<27));
-
- if ( (0^((W[37]>>4)&1)^((W[39]>>4)&1))!=1
- || (0^((W[37]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[41]>>3)&1)^((W[45]>>28)&1))!=0
- || (0^((W[41]>>4)&1)^((W[45]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[44]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[49]>>4)&1)^((W[51]>>29)&1))!=0
- || (0^((W[49]>>4)&1)^((W[52]>>29)&1))!=0
- || (0^((W[49]>>4)&1)^((W[53]>>29)&1))!=0
- || (0^((W[58]>>29)&1)^((W[61]>>29)&1))!=1
- || (0^((W[57]>>4)&1)^((W[61]>>29)&1))!=0
- || (0^((W[57]>>29)&1)^((W[61]>>29)&1))!=1
- || (0^((W[55]>>4)&1)^((W[61]>>29)&1))!=1
- )
- dvmask[0] &= ~((uint32_t)(1<<28));
-
- if ( (0^((W[53]>>29)&1)^((W[54]>>29)&1))!=0
- || (0^((W[52]>>29)&1)^((W[54]>>29)&1))!=0
- || (0^((W[50]>>4)&1)^((W[54]>>29)&1))!=0
- || (0^((W[48]>>29)&1)^((W[49]>>29)&1))!=0
- || (0^((W[47]>>29)&1)^((W[49]>>29)&1))!=0
- || (0^((W[45]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[42]>>3)&1)^((W[46]>>28)&1))!=0
- || (0^((W[42]>>4)&1)^((W[46]>>29)&1))!=0
- || (0^((W[40]>>4)&1)^((W[41]>>29)&1))!=1
- || (0^((W[38]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[36]>>4)&1)^((W[41]>>29)&1))!=1
- || (0^((W[58]>>4)&1)^((W[62]>>29)&1))!=0
- || (0^((W[58]>>29)&1)^((W[59]>>29)&1))!=0
- || (0^((W[56]>>4)&1)^((W[59]>>29)&1))!=0
- )
- dvmask[0] &= ~((uint32_t)(1<<29));
-
- if ( (0^((W[37]>>4)&1)^((W[39]>>4)&1))!=1
- || (0^((W[37]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[37]>>4)&1)^((W[41]>>4)&1))!=0
- || (0^((W[37]>>4)&1)^((W[42]>>29)&1))!=1
- || (0^((W[43]>>3)&1)^((W[47]>>28)&1))!=0
- || (0^((W[43]>>4)&1)^((W[47]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[46]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[51]>>4)&1)^((W[53]>>29)&1))!=0
- || (0^((W[51]>>4)&1)^((W[54]>>29)&1))!=0
- || (0^((W[51]>>4)&1)^((W[55]>>29)&1))!=0
- || (0^((W[59]>>4)&1)^((W[63]>>29)&1))!=0
- || (0^((W[57]>>4)&1)^((W[59]>>29)&1))!=0
- )
- dvmask[0] &= ~((uint32_t)(1<<30));
-
- if ( (0^((W[38]>>4)&1)^((W[40]>>4)&1))!=1
- || (0^((W[38]>>4)&1)^((W[40]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[41]>>29)&1))!=0
- || (0^((W[38]>>4)&1)^((W[42]>>4)&1))!=0
- || (0^((W[38]>>4)&1)^((W[43]>>29)&1))!=1
- || (0^((W[44]>>3)&1)^((W[48]>>28)&1))!=0
- || (0^((W[44]>>4)&1)^((W[48]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[49]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[50]>>29)&1))!=0
- || (0^((W[47]>>4)&1)^((W[51]>>29)&1))!=0
- || (0^((W[52]>>4)&1)^((W[54]>>29)&1))!=0
- || (0^((W[52]>>4)&1)^((W[55]>>29)&1))!=0
- || (0^((W[52]>>4)&1)^((W[56]>>29)&1))!=0
- || (0^((W[60]>>4)&1)^((W[64]>>29)&1))!=0
- )
- dvmask[0] &= ~((uint32_t)(1<<31));
-
-}