diff options
author | Adam Langley <agl@chromium.org> | 2014-06-20 23:00:00 +0400 |
---|---|---|
committer | Adam Langley <agl@chromium.org> | 2014-06-21 00:17:35 +0400 |
commit | de0b2026841c34193cacf5c97646b38439e13200 (patch) | |
tree | 55390fa8197b59f6611025e6701463fd4f54658f /crypto/poly1305 | |
parent | d8983ce0f2b083a45416195e05a11f3a2a1d5aed (diff) |
ChaCha20-Poly1305 support.
Diffstat (limited to 'crypto/poly1305')
-rw-r--r-- | crypto/poly1305/CMakeLists.txt | 21 | ||||
-rw-r--r-- | crypto/poly1305/poly1305.c | 323 | ||||
-rw-r--r-- | crypto/poly1305/poly1305.h | 47 | ||||
-rw-r--r-- | crypto/poly1305/poly1305_arm.c | 288 | ||||
-rw-r--r-- | crypto/poly1305/poly1305_arm_asm.S | 2013 | ||||
-rw-r--r-- | crypto/poly1305/poly1305_vec.c | 887 |
6 files changed, 3579 insertions, 0 deletions
diff --git a/crypto/poly1305/CMakeLists.txt b/crypto/poly1305/CMakeLists.txt new file mode 100644 index 00000000..65d7dbbe --- /dev/null +++ b/crypto/poly1305/CMakeLists.txt @@ -0,0 +1,21 @@ +include_directories(. .. ../../include) + +if (${ARCH} STREQUAL "arm") + set( + POLY1305_ARCH_SOURCES + + poly1305_arm_asm.S + ) +endif() + +add_library( + poly1305 + + OBJECT + + poly1305.c + poly1305_arm.c + poly1305_vec.c + + ${POLY1305_ARCH_SOURCES} +) diff --git a/crypto/poly1305/poly1305.c b/crypto/poly1305/poly1305.c new file mode 100644 index 00000000..256ad69b --- /dev/null +++ b/crypto/poly1305/poly1305.c @@ -0,0 +1,323 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* This implementation of poly1305 is by Andrew Moon + * (https://github.com/floodyberry/poly1305-donna) and released as public + * domain. */ + +#include <openssl/poly1305.h> + +#include <string.h> + +#include <openssl/cpu.h> + + +#if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) + +#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) +/* We can assume little-endian. */ +static uint32_t U8TO32_LE(const uint8_t *m) { + uint32_t r; + memcpy(&r, m, sizeof(r)); + return r; +} + +static void U32TO8_LE(uint8_t *m, uint32_t v) { memcpy(m, &v, sizeof(v)); } +#else +static uint32_t U8TO32_LE(const uint8_t *m) { + return (uint32_t)m[0] | (uint32_t)m[1] << 8 | (uint32_t)m[2] << 16 | + (uint32_t)m[3] << 24; +} + +static void U32TO8_LE(uint8_t *m, uint32_t v) { + m[0] = v; + m[1] = v >> 8; + m[2] = v >> 16; + m[3] = v >> 24; +} +#endif + +#if defined(OPENSSL_ARM) +void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]); + +void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, + size_t in_len); + +void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]); +#endif + +static uint64_t mul32x32_64(uint32_t a, uint32_t b) { return (uint64_t)a * b; } + +struct poly1305_state_st { + uint32_t r0, r1, r2, r3, r4; + uint32_t s1, s2, s3, s4; + uint32_t h0, h1, h2, h3, h4; + uint8_t buf[16]; + unsigned int buf_used; + uint8_t key[16]; +}; + +/* poly1305_blocks updates |state| given some amount of input data. This + * function may only be called with a |len| that is not a multiple of 16 at the + * end of the data. Otherwise the input must be buffered into 16 byte blocks. */ +static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, + size_t len) { + uint32_t t0, t1, t2, t3; + uint64_t t[5]; + uint32_t b; + uint64_t c; + size_t j; + uint8_t mp[16]; + + if (len < 16) { + goto poly1305_donna_atmost15bytes; + } + +poly1305_donna_16bytes: + t0 = U8TO32_LE(in); + t1 = U8TO32_LE(in + 4); + t2 = U8TO32_LE(in + 8); + t3 = U8TO32_LE(in + 12); + + in += 16; + len -= 16; + + state->h0 += t0 & 0x3ffffff; + state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; + state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; + state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; + state->h4 += (t3 >> 8) | (1 << 24); + +poly1305_donna_mul: + t[0] = mul32x32_64(state->h0, state->r0) + mul32x32_64(state->h1, state->s4) + + mul32x32_64(state->h2, state->s3) + mul32x32_64(state->h3, state->s2) + + mul32x32_64(state->h4, state->s1); + t[1] = mul32x32_64(state->h0, state->r1) + mul32x32_64(state->h1, state->r0) + + mul32x32_64(state->h2, state->s4) + mul32x32_64(state->h3, state->s3) + + mul32x32_64(state->h4, state->s2); + t[2] = mul32x32_64(state->h0, state->r2) + mul32x32_64(state->h1, state->r1) + + mul32x32_64(state->h2, state->r0) + mul32x32_64(state->h3, state->s4) + + mul32x32_64(state->h4, state->s3); + t[3] = mul32x32_64(state->h0, state->r3) + mul32x32_64(state->h1, state->r2) + + mul32x32_64(state->h2, state->r1) + mul32x32_64(state->h3, state->r0) + + mul32x32_64(state->h4, state->s4); + t[4] = mul32x32_64(state->h0, state->r4) + mul32x32_64(state->h1, state->r3) + + mul32x32_64(state->h2, state->r2) + mul32x32_64(state->h3, state->r1) + + mul32x32_64(state->h4, state->r0); + + state->h0 = (uint32_t)t[0] & 0x3ffffff; + c = (t[0] >> 26); + t[1] += c; + state->h1 = (uint32_t)t[1] & 0x3ffffff; + b = (uint32_t)(t[1] >> 26); + t[2] += b; + state->h2 = (uint32_t)t[2] & 0x3ffffff; + b = (uint32_t)(t[2] >> 26); + t[3] += b; + state->h3 = (uint32_t)t[3] & 0x3ffffff; + b = (uint32_t)(t[3] >> 26); + t[4] += b; + state->h4 = (uint32_t)t[4] & 0x3ffffff; + b = (uint32_t)(t[4] >> 26); + state->h0 += b * 5; + + if (len >= 16) + goto poly1305_donna_16bytes; + +/* final bytes */ +poly1305_donna_atmost15bytes: + if (!len) + return; + + for (j = 0; j < len; j++) + mp[j] = in[j]; + mp[j++] = 1; + for (; j < 16; j++) + mp[j] = 0; + len = 0; + + t0 = U8TO32_LE(mp + 0); + t1 = U8TO32_LE(mp + 4); + t2 = U8TO32_LE(mp + 8); + t3 = U8TO32_LE(mp + 12); + + state->h0 += t0 & 0x3ffffff; + state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; + state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; + state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; + state->h4 += (t3 >> 8); + + goto poly1305_donna_mul; +} + +void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + uint32_t t0, t1, t2, t3; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_init_neon(statep, key); + return; + } +#endif + + t0 = U8TO32_LE(key + 0); + t1 = U8TO32_LE(key + 4); + t2 = U8TO32_LE(key + 8); + t3 = U8TO32_LE(key + 12); + + /* precompute multipliers */ + state->r0 = t0 & 0x3ffffff; + t0 >>= 26; + t0 |= t1 << 6; + state->r1 = t0 & 0x3ffff03; + t1 >>= 20; + t1 |= t2 << 12; + state->r2 = t1 & 0x3ffc0ff; + t2 >>= 14; + t2 |= t3 << 18; + state->r3 = t2 & 0x3f03fff; + t3 >>= 8; + state->r4 = t3 & 0x00fffff; + + state->s1 = state->r1 * 5; + state->s2 = state->r2 * 5; + state->s3 = state->r3 * 5; + state->s4 = state->r4 * 5; + + /* init state */ + state->h0 = 0; + state->h1 = 0; + state->h2 = 0; + state->h3 = 0; + state->h4 = 0; + + state->buf_used = 0; + memcpy(state->key, key + 16, sizeof(state->key)); +} + +void CRYPTO_poly1305_update(poly1305_state *statep, const uint8_t *in, + size_t in_len) { + unsigned int i; + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_update_neon(statep, in, in_len); + return; + } +#endif + + if (state->buf_used) { + unsigned int todo = 16 - state->buf_used; + if (todo > in_len) + todo = in_len; + for (i = 0; i < todo; i++) + state->buf[state->buf_used + i] = in[i]; + state->buf_used += todo; + in_len -= todo; + in += todo; + + if (state->buf_used == 16) { + poly1305_update(state, state->buf, 16); + state->buf_used = 0; + } + } + + if (in_len >= 16) { + size_t todo = in_len & ~0xf; + poly1305_update(state, in, todo); + in += todo; + in_len &= 0xf; + } + + if (in_len) { + for (i = 0; i < in_len; i++) + state->buf[i] = in[i]; + state->buf_used = in_len; + } +} + +void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + uint64_t f0, f1, f2, f3; + uint32_t g0, g1, g2, g3, g4; + uint32_t b, nb; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_finish_neon(statep, mac); + return; + } +#endif + + if (state->buf_used) + poly1305_update(state, state->buf, state->buf_used); + + b = state->h0 >> 26; + state->h0 = state->h0 & 0x3ffffff; + state->h1 += b; + b = state->h1 >> 26; + state->h1 = state->h1 & 0x3ffffff; + state->h2 += b; + b = state->h2 >> 26; + state->h2 = state->h2 & 0x3ffffff; + state->h3 += b; + b = state->h3 >> 26; + state->h3 = state->h3 & 0x3ffffff; + state->h4 += b; + b = state->h4 >> 26; + state->h4 = state->h4 & 0x3ffffff; + state->h0 += b * 5; + + g0 = state->h0 + 5; + b = g0 >> 26; + g0 &= 0x3ffffff; + g1 = state->h1 + b; + b = g1 >> 26; + g1 &= 0x3ffffff; + g2 = state->h2 + b; + b = g2 >> 26; + g2 &= 0x3ffffff; + g3 = state->h3 + b; + b = g3 >> 26; + g3 &= 0x3ffffff; + g4 = state->h4 + b - (1 << 26); + + b = (g4 >> 31) - 1; + nb = ~b; + state->h0 = (state->h0 & nb) | (g0 & b); + state->h1 = (state->h1 & nb) | (g1 & b); + state->h2 = (state->h2 & nb) | (g2 & b); + state->h3 = (state->h3 & nb) | (g3 & b); + state->h4 = (state->h4 & nb) | (g4 & b); + + f0 = ((state->h0) | (state->h1 << 26)) + (uint64_t)U8TO32_LE(&state->key[0]); + f1 = ((state->h1 >> 6) | (state->h2 << 20)) + + (uint64_t)U8TO32_LE(&state->key[4]); + f2 = ((state->h2 >> 12) | (state->h3 << 14)) + + (uint64_t)U8TO32_LE(&state->key[8]); + f3 = ((state->h3 >> 18) | (state->h4 << 8)) + + (uint64_t)U8TO32_LE(&state->key[12]); + + U32TO8_LE(&mac[0], f0); + f1 += (f0 >> 32); + U32TO8_LE(&mac[4], f1); + f2 += (f1 >> 32); + U32TO8_LE(&mac[8], f2); + f3 += (f2 >> 32); + U32TO8_LE(&mac[12], f3); +} + +#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 */ diff --git a/crypto/poly1305/poly1305.h b/crypto/poly1305/poly1305.h new file mode 100644 index 00000000..a15bf1a8 --- /dev/null +++ b/crypto/poly1305/poly1305.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_POLY1305_H +#define OPENSSL_HEADER_POLY1305_H + +#include <openssl/base.h> + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef unsigned char poly1305_state[512]; + +/* poly1305_init sets up |state| so that it can be used to calculate an + * authentication tag with the one-time key |key|. Note that |key| is a + * one-time key and therefore there is no `reset' method because that would + * enable several messages to be authenticated with the same key. */ +extern void CRYPTO_poly1305_init(poly1305_state* state, const uint8_t key[32]); + +/* poly1305_update processes |in_len| bytes from |in|. It can be called zero or + * more times after poly1305_init. */ +extern void CRYPTO_poly1305_update(poly1305_state* state, const uint8_t* in, + size_t in_len); + +/* poly1305_finish completes the poly1305 calculation and writes a 16 byte + * authentication tag to |mac|. */ +extern void CRYPTO_poly1305_finish(poly1305_state* state, uint8_t mac[16]); + + +#if defined(__cplusplus) +} /* extern C */ +#endif + +#endif /* OPENSSL_HEADER_POLY1305_H */ diff --git a/crypto/poly1305/poly1305_arm.c b/crypto/poly1305/poly1305_arm.c new file mode 100644 index 00000000..9d5e2769 --- /dev/null +++ b/crypto/poly1305/poly1305_arm.c @@ -0,0 +1,288 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* This implementation was taken from the public domain, neon2 version in + * SUPERCOP by D. J. Bernstein and Peter Schwabe. */ + +#include <openssl/poly1305.h> + + +#if defined(OPENSSL_ARM) + +typedef struct { + uint32_t v[12]; /* for alignment; only using 10 */ +} fe1305x2; + +#define addmulmod openssl_poly1305_neon2_addmulmod +#define blocks openssl_poly1305_neon2_blocks + +extern void addmulmod(fe1305x2 *r, const fe1305x2 *x, const fe1305x2 *y, + const fe1305x2 *c); + +extern int blocks(fe1305x2 *h, const fe1305x2 *precomp, const uint8_t *in, + unsigned int inlen); + +static void freeze(fe1305x2 *r) { + int i; + + uint32_t x0 = r->v[0]; + uint32_t x1 = r->v[2]; + uint32_t x2 = r->v[4]; + uint32_t x3 = r->v[6]; + uint32_t x4 = r->v[8]; + uint32_t y0; + uint32_t y1; + uint32_t y2; + uint32_t y3; + uint32_t y4; + uint32_t swap; + + for (i = 0; i < 3; ++i) { + x1 += x0 >> 26; + x0 &= 0x3ffffff; + x2 += x1 >> 26; + x1 &= 0x3ffffff; + x3 += x2 >> 26; + x2 &= 0x3ffffff; + x4 += x3 >> 26; + x3 &= 0x3ffffff; + x0 += 5 * (x4 >> 26); + x4 &= 0x3ffffff; + } + + y0 = x0 + 5; + y1 = x1 + (y0 >> 26); + y0 &= 0x3ffffff; + y2 = x2 + (y1 >> 26); + y1 &= 0x3ffffff; + y3 = x3 + (y2 >> 26); + y2 &= 0x3ffffff; + y4 = x4 + (y3 >> 26); + y3 &= 0x3ffffff; + swap = -(y4 >> 26); + y4 &= 0x3ffffff; + + y0 ^= x0; + y1 ^= x1; + y2 ^= x2; + y3 ^= x3; + y4 ^= x4; + + y0 &= swap; + y1 &= swap; + y2 &= swap; + y3 &= swap; + y4 &= swap; + + y0 ^= x0; + y1 ^= x1; + y2 ^= x2; + y3 ^= x3; + y4 ^= x4; + + r->v[0] = y0; + r->v[2] = y1; + r->v[4] = y2; + r->v[6] = y3; + r->v[8] = y4; +} + +static void fe1305x2_tobytearray(uint8_t *r, fe1305x2 *x) { + uint32_t x0 = x->v[0]; + uint32_t x1 = x->v[2]; + uint32_t x2 = x->v[4]; + uint32_t x3 = x->v[6]; + uint32_t x4 = x->v[8]; + + x1 += x0 >> 26; + x0 &= 0x3ffffff; + x2 += x1 >> 26; + x1 &= 0x3ffffff; + x3 += x2 >> 26; + x2 &= 0x3ffffff; + x4 += x3 >> 26; + x3 &= 0x3ffffff; + + *(uint32_t *)r = x0 + (x1 << 26); + *(uint32_t *)(r + 4) = (x1 >> 6) + (x2 << 20); + *(uint32_t *)(r + 8) = (x2 >> 12) + (x3 << 14); + *(uint32_t *)(r + 12) = (x3 >> 18) + (x4 << 8); +} + +/* load32 exists to avoid breaking strict aliasing rules in + * fe1305x2_frombytearray. */ +static uint32_t load32(uint8_t *t) { + uint32_t tmp; + memcpy(&tmp, t, sizeof(tmp)); + return tmp; +} + +static void fe1305x2_frombytearray(fe1305x2 *r, const uint8_t *x, + unsigned long long xlen) { + int i; + uint8_t t[17]; + + for (i = 0; (i < 16) && (i < xlen); i++) + t[i] = x[i]; + xlen -= i; + x += i; + t[i++] = 1; + for (; i < 17; i++) + t[i] = 0; + + r->v[0] = 0x3ffffff & load32(t); + r->v[2] = 0x3ffffff & (load32(t + 3) >> 2); + r->v[4] = 0x3ffffff & (load32(t + 6) >> 4); + r->v[6] = 0x3ffffff & (load32(t + 9) >> 6); + r->v[8] = load32(t + 13); + + if (xlen) { + for (i = 0; (i < 16) && (i < xlen); i++) + t[i] = x[i]; + t[i++] = 1; + for (; i < 17; i++) + t[i] = 0; + + r->v[1] = 0x3ffffff & load32(t); + r->v[3] = 0x3ffffff & (load32(t + 3) >> 2); + r->v[5] = 0x3ffffff & (load32(t + 6) >> 4); + r->v[7] = 0x3ffffff & (load32(t + 9) >> 6); + r->v[9] = load32(t + 13); + } else + r->v[1] = r->v[3] = r->v[5] = r->v[7] = r->v[9] = 0; +} + +static const fe1305x2 zero __attribute__((aligned(16))); + +struct poly1305_state_st { + uint8_t data[sizeof(fe1305x2[5]) + 128]; + uint8_t buf[32]; + unsigned int buf_used; + uint8_t key[16]; +}; + +void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + unsigned int j; + + r->v[1] = r->v[0] = 0x3ffffff & *(uint32_t *)key; + r->v[3] = r->v[2] = 0x3ffff03 & ((*(uint32_t *)(key + 3)) >> 2); + r->v[5] = r->v[4] = 0x3ffc0ff & ((*(uint32_t *)(key + 6)) >> 4); + r->v[7] = r->v[6] = 0x3f03fff & ((*(uint32_t *)(key + 9)) >> 6); + r->v[9] = r->v[8] = 0x00fffff & ((*(uint32_t *)(key + 12)) >> 8); + + for (j = 0; j < 10; j++) + h->v[j] = 0; /* XXX: should fast-forward a bit */ + + addmulmod(precomp, r, r, &zero); /* precompute r^2 */ + addmulmod(precomp + 1, precomp, precomp, &zero); /* precompute r^4 */ + + memcpy(st->key, key + 16, 16); + st->buf_used = 0; +} + +void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, + size_t in_len) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + unsigned int i; + + if (st->buf_used) { + unsigned int todo = 32 - st->buf_used; + if (todo > in_len) + todo = in_len; + for (i = 0; i < todo; i++) + st->buf[st->buf_used + i] = in[i]; + st->buf_used += todo; + in_len -= todo; + in += todo; + + if (st->buf_used == sizeof(st->buf) && in_len) { + addmulmod(h, h, precomp, &zero); + fe1305x2_frombytearray(c, st->buf, sizeof(st->buf)); + for (i = 0; i < 10; i++) + h->v[i] += c->v[i]; + st->buf_used = 0; + } + } + + while (in_len > 32) { + unsigned int tlen = 1048576; + if (in_len < tlen) + tlen = in_len; + tlen -= blocks(h, precomp, in, tlen); + in_len -= tlen; + in += tlen; + } + + if (in_len) { + for (i = 0; i < in_len; i++) + st->buf[i] = in[i]; + st->buf_used = in_len; + } +} + +void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + + addmulmod(h, h, precomp, &zero); + + if (st->buf_used > 16) { + fe1305x2_frombytearray(c, st->buf, st->buf_used); + precomp->v[1] = r->v[1]; + precomp->v[3] = r->v[3]; + precomp->v[5] = r->v[5]; + precomp->v[7] = r->v[7]; + precomp->v[9] = r->v[9]; + addmulmod(h, h, precomp, c); + } else if (st->buf_used > 0) { + fe1305x2_frombytearray(c, st->buf, st->buf_used); + r->v[1] = 1; + r->v[3] = 0; + r->v[5] = 0; + r->v[7] = 0; + r->v[9] = 0; + addmulmod(h, h, r, c); + } + + h->v[0] += h->v[1]; + h->v[2] += h->v[3]; + h->v[4] += h->v[5]; + h->v[6] += h->v[7]; + h->v[8] += h->v[9]; + freeze(h); + + fe1305x2_frombytearray(c, st->key, 16); + c->v[8] ^= (1 << 24); + + h->v[0] += c->v[0]; + h->v[2] += c->v[2]; + h->v[4] += c->v[4]; + h->v[6] += c->v[6]; + h->v[8] += c->v[8]; + fe1305x2_tobytearray(mac, h); +} + +#endif /* OPENSSL_ARM */ diff --git a/crypto/poly1305/poly1305_arm_asm.S b/crypto/poly1305/poly1305_arm_asm.S new file mode 100644 index 00000000..e196e57d --- /dev/null +++ b/crypto/poly1305/poly1305_arm_asm.S @@ -0,0 +1,2013 @@ +#if defined(__arm__) + +# This implementation was taken from the public domain, neon2 version in +# SUPERCOP by D. J. Bernstein and Peter Schwabe. + +# qhasm: int32 input_0 + +# qhasm: int32 input_1 + +# qhasm: int32 input_2 + +# qhasm: int32 input_3 + +# qhasm: stack32 input_4 + +# qhasm: stack32 input_5 + +# qhasm: stack32 input_6 + +# qhasm: stack32 input_7 + +# qhasm: int32 caller_r4 + +# qhasm: int32 caller_r5 + +# qhasm: int32 caller_r6 + +# qhasm: int32 caller_r7 + +# qhasm: int32 caller_r8 + +# qhasm: int32 caller_r9 + +# qhasm: int32 caller_r10 + +# qhasm: int32 caller_r11 + +# qhasm: int32 caller_r12 + +# qhasm: int32 caller_r14 + +# qhasm: reg128 caller_q4 + +# qhasm: reg128 caller_q5 + +# qhasm: reg128 caller_q6 + +# qhasm: reg128 caller_q7 + +# qhasm: startcode +.fpu neon +.text + +# qhasm: reg128 r0 + +# qhasm: reg128 r1 + +# qhasm: reg128 r2 + +# qhasm: reg128 r3 + +# qhasm: reg128 r4 + +# qhasm: reg128 x01 + +# qhasm: reg128 x23 + +# qhasm: reg128 x4 + +# qhasm: reg128 y0 + +# qhasm: reg128 y12 + +# qhasm: reg128 y34 + +# qhasm: reg128 5y12 + +# qhasm: reg128 5y34 + +# qhasm: stack128 y0_stack + +# qhasm: stack128 y12_stack + +# qhasm: stack128 y34_stack + +# qhasm: stack128 5y12_stack + +# qhasm: stack128 5y34_stack + +# qhasm: reg128 z0 + +# qhasm: reg128 z12 + +# qhasm: reg128 z34 + +# qhasm: reg128 5z12 + +# qhasm: reg128 5z34 + +# qhasm: stack128 z0_stack + +# qhasm: stack128 z12_stack + +# qhasm: stack128 z34_stack + +# qhasm: stack128 5z12_stack + +# qhasm: stack128 5z34_stack + +# qhasm: stack128 two24 + +# qhasm: int32 ptr + +# qhasm: reg128 c01 + +# qhasm: reg128 c23 + +# qhasm: reg128 d01 + +# qhasm: reg128 d23 + +# qhasm: reg128 t0 + +# qhasm: reg128 t1 + +# qhasm: reg128 t2 + +# qhasm: reg128 t3 + +# qhasm: reg128 t4 + +# qhasm: reg128 mask + +# qhasm: reg128 u0 + +# qhasm: reg128 u1 + +# qhasm: reg128 u2 + +# qhasm: reg128 u3 + +# qhasm: reg128 u4 + +# qhasm: reg128 v01 + +# qhasm: reg128 mid + +# qhasm: reg128 v23 + +# qhasm: reg128 v4 + +# qhasm: int32 len + +# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks +.align 4 +.global openssl_poly1305_neon2_blocks +.type openssl_poly1305_neon2_blocks STT_FUNC +openssl_poly1305_neon2_blocks: +vpush {q4,q5,q6,q7} +mov r12,sp +sub sp,sp,#192 +and sp,sp,#0xffffffe0 + +# qhasm: len = input_3 +# asm 1: mov >len=int32#4,<input_3=int32#4 +# asm 2: mov >len=r3,<input_3=r3 +mov r3,r3 + +# qhasm: new y0 + +# qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8 +# asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]! +# asm 2: vld1.8 {<y0=d0},[<input_1=r1]! +vld1.8 {d0},[r1]! + +# qhasm: y12 = mem128[input_1]; input_1 += 16 +# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]! +# asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]! +vld1.8 {d2-d3},[r1]! + +# qhasm: y34 = mem128[input_1]; input_1 += 16 +# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]! +# asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]! +vld1.8 {d4-d5},[r1]! + +# qhasm: input_1 += 8 +# asm 1: add >input_1=int32#2,<input_1=int32#2,#8 +# asm 2: add >input_1=r1,<input_1=r1,#8 +add r1,r1,#8 + +# qhasm: new z0 + +# qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8 +# asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]! +# asm 2: vld1.8 {<z0=d6},[<input_1=r1]! +vld1.8 {d6},[r1]! + +# qhasm: z12 = mem128[input_1]; input_1 += 16 +# asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]! +# asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]! +vld1.8 {d8-d9},[r1]! + +# qhasm: z34 = mem128[input_1]; input_1 += 16 +# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]! +# asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]! +vld1.8 {d10-d11},[r1]! + +# qhasm: 2x mask = 0xffffffff +# asm 1: vmov.i64 >mask=reg128#7,#0xffffffff +# asm 2: vmov.i64 >mask=q6,#0xffffffff +vmov.i64 q6,#0xffffffff + +# qhasm: 2x u4 = 0xff +# asm 1: vmov.i64 >u4=reg128#8,#0xff +# asm 2: vmov.i64 >u4=q7,#0xff +vmov.i64 q7,#0xff + +# qhasm: x01 aligned= mem128[input_0];input_0+=16 +# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]! +# asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]! +vld1.8 {d16-d17},[r0,: 128]! + +# qhasm: x23 aligned= mem128[input_0];input_0+=16 +# asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]! +# asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]! +vld1.8 {d18-d19},[r0,: 128]! + +# qhasm: x4 aligned= mem64[input_0]x4[1] +# asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64] +# asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64] +vld1.8 {d20},[r0,: 64] + +# qhasm: input_0 -= 32 +# asm 1: sub >input_0=int32#1,<input_0=int32#1,#32 +# asm 2: sub >input_0=r0,<input_0=r0,#32 +sub r0,r0,#32 + +# qhasm: 2x mask unsigned>>=6 +# asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6 +# asm 2: vshr.u64 >mask=q6,<mask=q6,#6 +vshr.u64 q6,q6,#6 + +# qhasm: 2x u4 unsigned>>= 7 +# asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7 +# asm 2: vshr.u64 >u4=q7,<u4=q7,#7 +vshr.u64 q7,q7,#7 + +# qhasm: 4x 5y12 = y12 << 2 +# asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2 +# asm 2: vshl.i32 >5y12=q11,<y12=q1,#2 +vshl.i32 q11,q1,#2 + +# qhasm: 4x 5y34 = y34 << 2 +# asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2 +# asm 2: vshl.i32 >5y34=q12,<y34=q2,#2 +vshl.i32 q12,q2,#2 + +# qhasm: 4x 5y12 += y12 +# asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2 +# asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1 +vadd.i32 q11,q11,q1 + +# qhasm: 4x 5y34 += y34 +# asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3 +# asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2 +vadd.i32 q12,q12,q2 + +# qhasm: 2x u4 <<= 24 +# asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24 +# asm 2: vshl.i64 >u4=q7,<u4=q7,#24 +vshl.i64 q7,q7,#24 + +# qhasm: 4x 5z12 = z12 << 2 +# asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2 +# asm 2: vshl.i32 >5z12=q13,<z12=q4,#2 +vshl.i32 q13,q4,#2 + +# qhasm: 4x 5z34 = z34 << 2 +# asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2 +# asm 2: vshl.i32 >5z34=q14,<z34=q5,#2 +vshl.i32 q14,q5,#2 + +# qhasm: 4x 5z12 += z12 +# asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5 +# asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4 +vadd.i32 q13,q13,q4 + +# qhasm: 4x 5z34 += z34 +# asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6 +# asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5 +vadd.i32 q14,q14,q5 + +# qhasm: new two24 + +# qhasm: new y0_stack + +# qhasm: new y12_stack + +# qhasm: new y34_stack + +# qhasm: new 5y12_stack + +# qhasm: new 5y34_stack + +# qhasm: new z0_stack + +# qhasm: new z12_stack + +# qhasm: new z34_stack + +# qhasm: new 5z12_stack + +# qhasm: new 5z34_stack + +# qhasm: ptr = &two24 +# asm 1: lea >ptr=int32#2,<two24=stack128#1 +# asm 2: lea >ptr=r1,<two24=[sp,#0] +add r1,sp,#0 + +# qhasm: mem128[ptr] aligned= u4 +# asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128] +vst1.8 {d14-d15},[r1,: 128] + +# qhasm: r4 = u4 +# asm 1: vmov >r4=reg128#16,<u4=reg128#8 +# asm 2: vmov >r4=q15,<u4=q7 +vmov q15,q7 + +# qhasm: r0 = u4 +# asm 1: vmov >r0=reg128#8,<u4=reg128#8 +# asm 2: vmov >r0=q7,<u4=q7 +vmov q7,q7 + +# qhasm: ptr = &y0_stack +# asm 1: lea >ptr=int32#2,<y0_stack=stack128#2 +# asm 2: lea >ptr=r1,<y0_stack=[sp,#16] +add r1,sp,#16 + +# qhasm: mem128[ptr] aligned= y0 +# asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128] +vst1.8 {d0-d1},[r1,: 128] + +# qhasm: ptr = &y12_stack +# asm 1: lea >ptr=int32#2,<y12_stack=stack128#3 +# asm 2: lea >ptr=r1,<y12_stack=[sp,#32] +add r1,sp,#32 + +# qhasm: mem128[ptr] aligned= y12 +# asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128] +vst1.8 {d2-d3},[r1,: 128] + +# qhasm: ptr = &y34_stack +# asm 1: lea >ptr=int32#2,<y34_stack=stack128#4 +# asm 2: lea >ptr=r1,<y34_stack=[sp,#48] +add r1,sp,#48 + +# qhasm: mem128[ptr] aligned= y34 +# asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128] +vst1.8 {d4-d5},[r1,: 128] + +# qhasm: ptr = &z0_stack +# asm 1: lea >ptr=int32#2,<z0_stack=stack128#7 +# asm 2: lea >ptr=r1,<z0_stack=[sp,#96] +add r1,sp,#96 + +# qhasm: mem128[ptr] aligned= z0 +# asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128] +vst1.8 {d6-d7},[r1,: 128] + +# qhasm: ptr = &z12_stack +# asm 1: lea >ptr=int32#2,<z12_stack=stack128#8 +# asm 2: lea >ptr=r1,<z12_stack=[sp,#112] +add r1,sp,#112 + +# qhasm: mem128[ptr] aligned= z12 +# asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128] +vst1.8 {d8-d9},[r1,: 128] + +# qhasm: ptr = &z34_stack +# asm 1: lea >ptr=int32#2,<z34_stack=stack128#9 +# asm 2: lea >ptr=r1,<z34_stack=[sp,#128] +add r1,sp,#128 + +# qhasm: mem128[ptr] aligned= z34 +# asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128] +vst1.8 {d10-d11},[r1,: 128] + +# qhasm: ptr = &5y12_stack +# asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5 +# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64] +add r1,sp,#64 + +# qhasm: mem128[ptr] aligned= 5y12 +# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128] +vst1.8 {d22-d23},[r1,: 128] + +# qhasm: ptr = &5y34_stack +# asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6 +# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80] +add r1,sp,#80 + +# qhasm: mem128[ptr] aligned= 5y34 +# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128] +vst1.8 {d24-d25},[r1,: 128] + +# qhasm: ptr = &5z12_stack +# asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10 +# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144] +add r1,sp,#144 + +# qhasm: mem128[ptr] aligned= 5z12 +# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128] +vst1.8 {d26-d27},[r1,: 128] + +# qhasm: ptr = &5z34_stack +# asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11 +# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160] +add r1,sp,#160 + +# qhasm: mem128[ptr] aligned= 5z34 +# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128] +# asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128] +vst1.8 {d28-d29},[r1,: 128] + +# qhasm: unsigned>? len - 64 +# asm 1: cmp <len=int32#4,#64 +# asm 2: cmp <len=r3,#64 +cmp r3,#64 + +# qhasm: goto below64bytes if !unsigned> +bls ._below64bytes + +# qhasm: input_2 += 32 +# asm 1: add >input_2=int32#2,<input_2=int32#3,#32 +# asm 2: add >input_2=r1,<input_2=r2,#32 +add r1,r2,#32 + +# qhasm: mainloop2: +._mainloop2: + +# qhasm: c01 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]! +# asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]! +vld1.8 {d0-d1},[r1]! + +# qhasm: c23 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]! +# asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]! +vld1.8 {d2-d3},[r1]! + +# qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3] +# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top +# asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11 +vmlal.u32 q15,d16,d11 + +# qhasm: ptr = &z12_stack +# asm 1: lea >ptr=int32#3,<z12_stack=stack128#8 +# asm 2: lea >ptr=r2,<z12_stack=[sp,#112] +add r2,sp,#112 + +# qhasm: z12 aligned= mem128[ptr] +# asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128] +vld1.8 {d4-d5},[r2,: 128] + +# qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1] +# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot +# asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10 +vmlal.u32 q15,d17,d10 + +# qhasm: ptr = &z0_stack +# asm 1: lea >ptr=int32#3,<z0_stack=stack128#7 +# asm 2: lea >ptr=r2,<z0_stack=[sp,#96] +add r2,sp,#96 + +# qhasm: z0 aligned= mem128[ptr] +# asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128] +vld1.8 {d6-d7},[r2,: 128] + +# qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3] +# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top +# asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5 +vmlal.u32 q15,d18,d5 + +# qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3] +# asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top +# asm 2: vtrn.32 <c01=d1,<c23=d3 +vtrn.32 d1,d3 + +# qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1] +# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot +# asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4 +vmlal.u32 q15,d19,d4 + +# qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1] +# asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot +# asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6 +vmlal.u32 q15,d20,d6 + +# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 +# asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18 +# asm 2: vshll.u32 >r3=q4,<c23=d3,#18 +vshll.u32 q4,d3,#18 + +# qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3] +# asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot +# asm 2: vtrn.32 <c01=d0,<c23=d2 +vtrn.32 d0,d2 + +# qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1] +# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot +# asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10 +vmlal.u32 q4,d16,d10 + +# qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3] +# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top +# asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5 +vmlal.u32 q4,d17,d5 + +# qhasm: r0 = r0[1]c01[0]r0[2,3] +# asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1 +# asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1 +vext.32 d14,d14,d0,#1 + +# qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1] +# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot +# asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4 +vmlal.u32 q4,d18,d4 + +# qhasm: input_2 -= 64 +# asm 1: sub >input_2=int32#2,<input_2=int32#2,#64 +# asm 2: sub >input_2=r1,<input_2=r1,#64 +sub r1,r1,#64 + +# qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1] +# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot +# asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6 +vmlal.u32 q4,d19,d6 + +# qhasm: ptr = &5z34_stack +# asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11 +# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160] +add r2,sp,#160 + +# qhasm: 5z34 aligned= mem128[ptr] +# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128] +vld1.8 {d10-d11},[r2,: 128] + +# qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3] +# asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top +# asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11 +vmlal.u32 q4,d20,d11 + +# qhasm: r0 = r0[1]r0[0]r0[3]r0[2] +# asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8 +# asm 2: vrev64.i32 >r0=q7,<r0=q7 +vrev64.i32 q7,q7 + +# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 +# asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12 +# asm 2: vshll.u32 >r2=q13,<c01=d1,#12 +vshll.u32 q13,d1,#12 + +# qhasm: d01 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]! +# asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]! +vld1.8 {d22-d23},[r1]! + +# qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3] +# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top +# asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5 +vmlal.u32 q13,d16,d5 + +# qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1] +# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot +# asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4 +vmlal.u32 q13,d17,d4 + +# qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1] +# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot +# asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6 +vmlal.u32 q13,d18,d6 + +# qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3] +# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top +# asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11 +vmlal.u32 q13,d19,d11 + +# qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1] +# asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot +# asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10 +vmlal.u32 q13,d20,d10 + +# qhasm: r0 = r0[0,1]c01[1]r0[2] +# asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1 +# asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1 +vext.32 d15,d0,d15,#1 + +# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 +# asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6 +# asm 2: vshll.u32 >r1=q14,<c23=d2,#6 +vshll.u32 q14,d2,#6 + +# qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1] +# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot +# asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4 +vmlal.u32 q14,d16,d4 + +# qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1] +# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot +# asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6 +vmlal.u32 q14,d17,d6 + +# qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3] +# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top +# asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11 +vmlal.u32 q14,d18,d11 + +# qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1] +# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot +# asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10 +vmlal.u32 q14,d19,d10 + +# qhasm: ptr = &5z12_stack +# asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10 +# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144] +add r2,sp,#144 + +# qhasm: 5z12 aligned= mem128[ptr] +# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128] +vld1.8 {d0-d1},[r2,: 128] + +# qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3] +# asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top +# asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1 +vmlal.u32 q14,d20,d1 + +# qhasm: d23 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]! +# asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]! +vld1.8 {d2-d3},[r1]! + +# qhasm: input_2 += 32 +# asm 1: add >input_2=int32#2,<input_2=int32#2,#32 +# asm 2: add >input_2=r1,<input_2=r1,#32 +add r1,r1,#32 + +# qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1] +# asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot +# asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0 +vmlal.u32 q7,d20,d0 + +# qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1] +# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot +# asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10 +vmlal.u32 q7,d18,d10 + +# qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1] +# asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top +# asm 2: vswp <d23=d2,<d01=d23 +vswp d2,d23 + +# qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3] +# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top +# asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1 +vmlal.u32 q7,d19,d1 + +# qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1] +# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot +# asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6 +vmlal.u32 q7,d16,d6 + +# qhasm: new mid + +# qhasm: 2x v4 = d23 unsigned>> 40 +# asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40 +# asm 2: vshr.u64 >v4=q3,<d23=q1,#40 +vshr.u64 q3,q1,#40 + +# qhasm: mid = d01[1]d23[0] mid[2,3] +# asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1 +# asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1 +vext.32 d0,d22,d2,#1 + +# qhasm: new v23 + +# qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14 +# asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14 +# asm 2: vshrn.u64 <v23=d19,<d23=q1,#14 +vshrn.u64 d19,q1,#14 + +# qhasm: mid = mid[0,1] d01[3]d23[2] +# asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1 +# asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1 +vext.32 d1,d23,d3,#1 + +# qhasm: new v01 + +# qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26 +# asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26 +# asm 2: vshrn.u64 <v01=d21,<d01=q11,#26 +vshrn.u64 d21,q11,#26 + +# qhasm: v01 = d01[1]d01[0] v01[2,3] +# asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1 +# asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1 +vext.32 d20,d22,d22,#1 + +# qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3] +# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top +# asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11 +vmlal.u32 q7,d17,d11 + +# qhasm: v01 = v01[1]d01[2] v01[2,3] +# asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1 +# asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1 +vext.32 d20,d20,d23,#1 + +# qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20 +# asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20 +# asm 2: vshrn.u64 <v23=d18,<mid=q0,#20 +vshrn.u64 d18,q0,#20 + +# qhasm: v4 = v4[0]v4[2]v4[1]v4[3] +# asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top +# asm 2: vtrn.32 <v4=d6,<v4=d7 +vtrn.32 d6,d7 + +# qhasm: 4x v01 &= 0x03ffffff +# asm 1: vand.i32 <v01=reg128#11,#0x03ffffff +# asm 2: vand.i32 <v01=q10,#0x03ffffff +vand.i32 q10,#0x03ffffff + +# qhasm: ptr = &y34_stack +# asm 1: lea >ptr=int32#3,<y34_stack=stack128#4 +# asm 2: lea >ptr=r2,<y34_stack=[sp,#48] +add r2,sp,#48 + +# qhasm: y34 aligned= mem128[ptr] +# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128] +vld1.8 {d4-d5},[r2,: 128] + +# qhasm: 4x v23 &= 0x03ffffff +# asm 1: vand.i32 <v23=reg128#10,#0x03ffffff +# asm 2: vand.i32 <v23=q9,#0x03ffffff +vand.i32 q9,#0x03ffffff + +# qhasm: ptr = &y12_stack +# asm 1: lea >ptr=int32#3,<y12_stack=stack128#3 +# asm 2: lea >ptr=r2,<y12_stack=[sp,#32] +add r2,sp,#32 + +# qhasm: y12 aligned= mem128[ptr] +# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128] +vld1.8 {d2-d3},[r2,: 128] + +# qhasm: 4x v4 |= 0x01000000 +# asm 1: vorr.i32 <v4=reg128#4,#0x01000000 +# asm 2: vorr.i32 <v4=q3,#0x01000000 +vorr.i32 q3,#0x01000000 + +# qhasm: ptr = &y0_stack +# asm 1: lea >ptr=int32#3,<y0_stack=stack128#2 +# asm 2: lea >ptr=r2,<y0_stack=[sp,#16] +add r2,sp,#16 + +# qhasm: y0 aligned= mem128[ptr] +# asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128] +vld1.8 {d0-d1},[r2,: 128] + +# qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3] +# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top +# asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5 +vmlal.u32 q15,d20,d5 + +# qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1] +# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot +# asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4 +vmlal.u32 q15,d21,d4 + +# qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3] +# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top +# asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3 +vmlal.u32 q15,d18,d3 + +# qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1] +# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2 +vmlal.u32 q15,d19,d2 + +# qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0 +vmlal.u32 q15,d6,d0 + +# qhasm: ptr = &5y34_stack +# asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6 +# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80] +add r2,sp,#80 + +# qhasm: 5y34 aligned= mem128[ptr] +# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128] +vld1.8 {d24-d25},[r2,: 128] + +# qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1] +# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot +# asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4 +vmlal.u32 q4,d20,d4 + +# qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3] +# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top +# asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3 +vmlal.u32 q4,d21,d3 + +# qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1] +# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2 +vmlal.u32 q4,d18,d2 + +# qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1] +# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0 +vmlal.u32 q4,d19,d0 + +# qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25 +vmlal.u32 q4,d6,d25 + +# qhasm: ptr = &5y12_stack +# asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5 +# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64] +add r2,sp,#64 + +# qhasm: 5y12 aligned= mem128[ptr] +# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128] +vld1.8 {d22-d23},[r2,: 128] + +# qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1] +# asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot +# asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22 +vmlal.u32 q7,d6,d22 + +# qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24 +vmlal.u32 q7,d18,d24 + +# qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3] +# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top +# asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23 +vmlal.u32 q7,d19,d23 + +# qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0 +vmlal.u32 q7,d20,d0 + +# qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25 +vmlal.u32 q7,d21,d25 + +# qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1] +# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2 +vmlal.u32 q14,d20,d2 + +# qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1] +# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0 +vmlal.u32 q14,d21,d0 + +# qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25 +vmlal.u32 q14,d18,d25 + +# qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24 +vmlal.u32 q14,d19,d24 + +# qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3] +# asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top +# asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23 +vmlal.u32 q14,d6,d23 + +# qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3] +# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top +# asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3 +vmlal.u32 q13,d20,d3 + +# qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1] +# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2 +vmlal.u32 q13,d21,d2 + +# qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0 +vmlal.u32 q13,d18,d0 + +# qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25 +vmlal.u32 q13,d19,d25 + +# qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24 +vmlal.u32 q13,d6,d24 + +# qhasm: ptr = &two24 +# asm 1: lea >ptr=int32#3,<two24=stack128#1 +# asm 2: lea >ptr=r2,<two24=[sp,#0] +add r2,sp,#0 + +# qhasm: 2x t1 = r0 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26 +# asm 2: vshr.u64 >t1=q3,<r0=q7,#26 +vshr.u64 q3,q7,#26 + +# qhasm: len -= 64 +# asm 1: sub >len=int32#4,<len=int32#4,#64 +# asm 2: sub >len=r3,<len=r3,#64 +sub r3,r3,#64 + +# qhasm: r0 &= mask +# asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7 +# asm 2: vand >r0=q5,<r0=q7,<mask=q6 +vand q5,q7,q6 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4 +# asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3 +vadd.i64 q3,q14,q3 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26 +# asm 2: vshr.u64 >t4=q7,<r3=q4,#26 +vshr.u64 q7,q4,#26 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 +# asm 2: vand >r3=q4,<r3=q4,<mask=q6 +vand q4,q4,q6 + +# qhasm: 2x x4 = r4 + t4 +# asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8 +# asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7 +vadd.i64 q7,q15,q7 + +# qhasm: r4 aligned= mem128[ptr] +# asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128] +vld1.8 {d30-d31},[r2,: 128] + +# qhasm: 2x t2 = r1 unsigned>> 26 +# asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26 +# asm 2: vshr.u64 >t2=q8,<r1=q3,#26 +vshr.u64 q8,q3,#26 + +# qhasm: r1 &= mask +# asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7 +# asm 2: vand >r1=q3,<r1=q3,<mask=q6 +vand q3,q3,q6 + +# qhasm: 2x t0 = x4 unsigned>> 26 +# asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26 +# asm 2: vshr.u64 >t0=q9,<x4=q7,#26 +vshr.u64 q9,q7,#26 + +# qhasm: 2x r2 += t2 +# asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9 +# asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8 +vadd.i64 q8,q13,q8 + +# qhasm: x4 &= mask +# asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7 +# asm 2: vand >x4=q10,<x4=q7,<mask=q6 +vand q10,q7,q6 + +# qhasm: 2x x01 = r0 + t0 +# asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10 +# asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9 +vadd.i64 q5,q5,q9 + +# qhasm: r0 aligned= mem128[ptr] +# asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128] +vld1.8 {d14-d15},[r2,: 128] + +# qhasm: ptr = &z34_stack +# asm 1: lea >ptr=int32#3,<z34_stack=stack128#9 +# asm 2: lea >ptr=r2,<z34_stack=[sp,#128] +add r2,sp,#128 + +# qhasm: 2x t0 <<= 2 +# asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2 +# asm 2: vshl.i64 >t0=q9,<t0=q9,#2 +vshl.i64 q9,q9,#2 + +# qhasm: 2x t3 = r2 unsigned>> 26 +# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26 +# asm 2: vshr.u64 >t3=q13,<r2=q8,#26 +vshr.u64 q13,q8,#26 + +# qhasm: 2x x01 += t0 +# asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10 +# asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9 +vadd.i64 q14,q5,q9 + +# qhasm: z34 aligned= mem128[ptr] +# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128] +# asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128] +vld1.8 {d10-d11},[r2,: 128] + +# qhasm: x23 = r2 & mask +# asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7 +# asm 2: vand >x23=q9,<r2=q8,<mask=q6 +vand q9,q8,q6 + +# qhasm: 2x r3 += t3 +# asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14 +# asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13 +vadd.i64 q4,q4,q13 + +# qhasm: input_2 += 32 +# asm 1: add >input_2=int32#2,<input_2=int32#2,#32 +# asm 2: add >input_2=r1,<input_2=r1,#32 +add r1,r1,#32 + +# qhasm: 2x t1 = x01 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26 +# asm 2: vshr.u64 >t1=q13,<x01=q14,#26 +vshr.u64 q13,q14,#26 + +# qhasm: x23 = x23[0,2,1,3] +# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top +# asm 2: vtrn.32 <x23=d18,<x23=d19 +vtrn.32 d18,d19 + +# qhasm: x01 = x01 & mask +# asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7 +# asm 2: vand >x01=q8,<x01=q14,<mask=q6 +vand q8,q14,q6 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14 +# asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13 +vadd.i64 q3,q3,q13 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26 +# asm 2: vshr.u64 >t4=q13,<r3=q4,#26 +vshr.u64 q13,q4,#26 + +# qhasm: x01 = x01[0,2,1,3] +# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top +# asm 2: vtrn.32 <x01=d16,<x01=d17 +vtrn.32 d16,d17 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 +# asm 2: vand >r3=q4,<r3=q4,<mask=q6 +vand q4,q4,q6 + +# qhasm: r1 = r1[0,2,1,3] +# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top +# asm 2: vtrn.32 <r1=d6,<r1=d7 +vtrn.32 d6,d7 + +# qhasm: 2x x4 += t4 +# asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14 +# asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13 +vadd.i64 q10,q10,q13 + +# qhasm: r3 = r3[0,2,1,3] +# asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top +# asm 2: vtrn.32 <r3=d8,<r3=d9 +vtrn.32 d8,d9 + +# qhasm: x01 = x01[0,1] r1[0,1] +# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 +# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 +vext.32 d17,d6,d6,#0 + +# qhasm: x23 = x23[0,1] r3[0,1] +# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0 +# asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0 +vext.32 d19,d8,d8,#0 + +# qhasm: x4 = x4[0,2,1,3] +# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top +# asm 2: vtrn.32 <x4=d20,<x4=d21 +vtrn.32 d20,d21 + +# qhasm: unsigned>? len - 64 +# asm 1: cmp <len=int32#4,#64 +# asm 2: cmp <len=r3,#64 +cmp r3,#64 + +# qhasm: goto mainloop2 if unsigned> +bhi ._mainloop2 + +# qhasm: input_2 -= 32 +# asm 1: sub >input_2=int32#3,<input_2=int32#2,#32 +# asm 2: sub >input_2=r2,<input_2=r1,#32 +sub r2,r1,#32 + +# qhasm: below64bytes: +._below64bytes: + +# qhasm: unsigned>? len - 32 +# asm 1: cmp <len=int32#4,#32 +# asm 2: cmp <len=r3,#32 +cmp r3,#32 + +# qhasm: goto end if !unsigned> +bls ._end + +# qhasm: mainloop: +._mainloop: + +# qhasm: new r0 + +# qhasm: ptr = &two24 +# asm 1: lea >ptr=int32#2,<two24=stack128#1 +# asm 2: lea >ptr=r1,<two24=[sp,#0] +add r1,sp,#0 + +# qhasm: r4 aligned= mem128[ptr] +# asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128] +# asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128] +vld1.8 {d8-d9},[r1,: 128] + +# qhasm: u4 aligned= mem128[ptr] +# asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128] +# asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128] +vld1.8 {d10-d11},[r1,: 128] + +# qhasm: c01 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]! +# asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]! +vld1.8 {d14-d15},[r2]! + +# qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3] +# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top +# asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5 +vmlal.u32 q4,d16,d5 + +# qhasm: c23 = mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]! +# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]! +vld1.8 {d26-d27},[r2]! + +# qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1] +# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot +# asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4 +vmlal.u32 q4,d17,d4 + +# qhasm: r0 = u4[1]c01[0]r0[2,3] +# asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1 +# asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1 +vext.32 d6,d10,d14,#1 + +# qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3] +# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top +# asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3 +vmlal.u32 q4,d18,d3 + +# qhasm: r0 = r0[0,1]u4[1]c23[0] +# asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1 +# asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1 +vext.32 d7,d10,d26,#1 + +# qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1] +# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2 +vmlal.u32 q4,d19,d2 + +# qhasm: r0 = r0[1]r0[0]r0[3]r0[2] +# asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4 +# asm 2: vrev64.i32 >r0=q3,<r0=q3 +vrev64.i32 q3,q3 + +# qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0 +vmlal.u32 q4,d20,d0 + +# qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1] +# asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot +# asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22 +vmlal.u32 q3,d20,d22 + +# qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24 +vmlal.u32 q3,d18,d24 + +# qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3] +# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top +# asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23 +vmlal.u32 q3,d19,d23 + +# qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3] +# asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14 +# asm 2: vtrn.32 <c01=q7,<c23=q13 +vtrn.32 q7,q13 + +# qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0 +vmlal.u32 q3,d16,d0 + +# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 +# asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18 +# asm 2: vshll.u32 >r3=q5,<c23=d27,#18 +vshll.u32 q5,d27,#18 + +# qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25 +vmlal.u32 q3,d17,d25 + +# qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1] +# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot +# asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4 +vmlal.u32 q5,d16,d4 + +# qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3] +# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top +# asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3 +vmlal.u32 q5,d17,d3 + +# qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1] +# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2 +vmlal.u32 q5,d18,d2 + +# qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1] +# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0 +vmlal.u32 q5,d19,d0 + +# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 +# asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6 +# asm 2: vshll.u32 >r1=q13,<c23=d26,#6 +vshll.u32 q13,d26,#6 + +# qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25 +vmlal.u32 q5,d20,d25 + +# qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1] +# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2 +vmlal.u32 q13,d16,d2 + +# qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1] +# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0 +vmlal.u32 q13,d17,d0 + +# qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25 +vmlal.u32 q13,d18,d25 + +# qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24 +vmlal.u32 q13,d19,d24 + +# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 +# asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12 +# asm 2: vshll.u32 >r2=q7,<c01=d15,#12 +vshll.u32 q7,d15,#12 + +# qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3] +# asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top +# asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23 +vmlal.u32 q13,d20,d23 + +# qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3] +# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top +# asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3 +vmlal.u32 q7,d16,d3 + +# qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1] +# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot +# asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2 +vmlal.u32 q7,d17,d2 + +# qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1] +# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot +# asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0 +vmlal.u32 q7,d18,d0 + +# qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3] +# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top +# asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25 +vmlal.u32 q7,d19,d25 + +# qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1] +# asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot +# asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24 +vmlal.u32 q7,d20,d24 + +# qhasm: 2x t1 = r0 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26 +# asm 2: vshr.u64 >t1=q8,<r0=q3,#26 +vshr.u64 q8,q3,#26 + +# qhasm: r0 &= mask +# asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7 +# asm 2: vand >r0=q3,<r0=q3,<mask=q6 +vand q3,q3,q6 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9 +# asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8 +vadd.i64 q8,q13,q8 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26 +# asm 2: vshr.u64 >t4=q9,<r3=q5,#26 +vshr.u64 q9,q5,#26 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 +# asm 2: vand >r3=q5,<r3=q5,<mask=q6 +vand q5,q5,q6 + +# qhasm: 2x r4 += t4 +# asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10 +# asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9 +vadd.i64 q4,q4,q9 + +# qhasm: 2x t2 = r1 unsigned>> 26 +# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26 +# asm 2: vshr.u64 >t2=q9,<r1=q8,#26 +vshr.u64 q9,q8,#26 + +# qhasm: r1 &= mask +# asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7 +# asm 2: vand >r1=q10,<r1=q8,<mask=q6 +vand q10,q8,q6 + +# qhasm: 2x t0 = r4 unsigned>> 26 +# asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26 +# asm 2: vshr.u64 >t0=q8,<r4=q4,#26 +vshr.u64 q8,q4,#26 + +# qhasm: 2x r2 += t2 +# asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10 +# asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9 +vadd.i64 q7,q7,q9 + +# qhasm: r4 &= mask +# asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7 +# asm 2: vand >r4=q4,<r4=q4,<mask=q6 +vand q4,q4,q6 + +# qhasm: 2x r0 += t0 +# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 +# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 +vadd.i64 q3,q3,q8 + +# qhasm: 2x t0 <<= 2 +# asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2 +# asm 2: vshl.i64 >t0=q8,<t0=q8,#2 +vshl.i64 q8,q8,#2 + +# qhasm: 2x t3 = r2 unsigned>> 26 +# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26 +# asm 2: vshr.u64 >t3=q13,<r2=q7,#26 +vshr.u64 q13,q7,#26 + +# qhasm: 2x r0 += t0 +# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 +# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 +vadd.i64 q3,q3,q8 + +# qhasm: x23 = r2 & mask +# asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7 +# asm 2: vand >x23=q9,<r2=q7,<mask=q6 +vand q9,q7,q6 + +# qhasm: 2x r3 += t3 +# asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14 +# asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13 +vadd.i64 q5,q5,q13 + +# qhasm: 2x t1 = r0 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26 +# asm 2: vshr.u64 >t1=q7,<r0=q3,#26 +vshr.u64 q7,q3,#26 + +# qhasm: x01 = r0 & mask +# asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7 +# asm 2: vand >x01=q8,<r0=q3,<mask=q6 +vand q8,q3,q6 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8 +# asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7 +vadd.i64 q3,q10,q7 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26 +# asm 2: vshr.u64 >t4=q7,<r3=q5,#26 +vshr.u64 q7,q5,#26 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 +# asm 2: vand >r3=q5,<r3=q5,<mask=q6 +vand q5,q5,q6 + +# qhasm: 2x x4 = r4 + t4 +# asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8 +# asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7 +vadd.i64 q10,q4,q7 + +# qhasm: len -= 32 +# asm 1: sub >len=int32#4,<len=int32#4,#32 +# asm 2: sub >len=r3,<len=r3,#32 +sub r3,r3,#32 + +# qhasm: x01 = x01[0,2,1,3] +# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top +# asm 2: vtrn.32 <x01=d16,<x01=d17 +vtrn.32 d16,d17 + +# qhasm: x23 = x23[0,2,1,3] +# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top +# asm 2: vtrn.32 <x23=d18,<x23=d19 +vtrn.32 d18,d19 + +# qhasm: r1 = r1[0,2,1,3] +# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top +# asm 2: vtrn.32 <r1=d6,<r1=d7 +vtrn.32 d6,d7 + +# qhasm: r3 = r3[0,2,1,3] +# asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top +# asm 2: vtrn.32 <r3=d10,<r3=d11 +vtrn.32 d10,d11 + +# qhasm: x4 = x4[0,2,1,3] +# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top +# asm 2: vtrn.32 <x4=d20,<x4=d21 +vtrn.32 d20,d21 + +# qhasm: x01 = x01[0,1] r1[0,1] +# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 +# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 +vext.32 d17,d6,d6,#0 + +# qhasm: x23 = x23[0,1] r3[0,1] +# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0 +# asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0 +vext.32 d19,d10,d10,#0 + +# qhasm: unsigned>? len - 32 +# asm 1: cmp <len=int32#4,#32 +# asm 2: cmp <len=r3,#32 +cmp r3,#32 + +# qhasm: goto mainloop if unsigned> +bhi ._mainloop + +# qhasm: end: +._end: + +# qhasm: mem128[input_0] = x01;input_0+=16 +# asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]! +# asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]! +vst1.8 {d16-d17},[r0]! + +# qhasm: mem128[input_0] = x23;input_0+=16 +# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]! +# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]! +vst1.8 {d18-d19},[r0]! + +# qhasm: mem64[input_0] = x4[0] +# asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1] +# asm 2: vst1.8 <x4=d20,[<input_0=r0] +vst1.8 d20,[r0] + +# qhasm: len = len +# asm 1: mov >len=int32#1,<len=int32#4 +# asm 2: mov >len=r0,<len=r3 +mov r0,r3 + +# qhasm: qpopreturn len +mov sp,r12 +vpop {q4,q5,q6,q7} +bx lr + +# qhasm: int32 input_0 + +# qhasm: int32 input_1 + +# qhasm: int32 input_2 + +# qhasm: int32 input_3 + +# qhasm: stack32 input_4 + +# qhasm: stack32 input_5 + +# qhasm: stack32 input_6 + +# qhasm: stack32 input_7 + +# qhasm: int32 caller_r4 + +# qhasm: int32 caller_r5 + +# qhasm: int32 caller_r6 + +# qhasm: int32 caller_r7 + +# qhasm: int32 caller_r8 + +# qhasm: int32 caller_r9 + +# qhasm: int32 caller_r10 + +# qhasm: int32 caller_r11 + +# qhasm: int32 caller_r12 + +# qhasm: int32 caller_r14 + +# qhasm: reg128 caller_q4 + +# qhasm: reg128 caller_q5 + +# qhasm: reg128 caller_q6 + +# qhasm: reg128 caller_q7 + +# qhasm: reg128 r0 + +# qhasm: reg128 r1 + +# qhasm: reg128 r2 + +# qhasm: reg128 r3 + +# qhasm: reg128 r4 + +# qhasm: reg128 x01 + +# qhasm: reg128 x23 + +# qhasm: reg128 x4 + +# qhasm: reg128 y01 + +# qhasm: reg128 y23 + +# qhasm: reg128 y4 + +# qhasm: reg128 _5y01 + +# qhasm: reg128 _5y23 + +# qhasm: reg128 _5y4 + +# qhasm: reg128 c01 + +# qhasm: reg128 c23 + +# qhasm: reg128 c4 + +# qhasm: reg128 t0 + +# qhasm: reg128 t1 + +# qhasm: reg128 t2 + +# qhasm: reg128 t3 + +# qhasm: reg128 t4 + +# qhasm: reg128 mask + +# qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod +.align 2 +.global openssl_poly1305_neon2_addmulmod +.type openssl_poly1305_neon2_addmulmod STT_FUNC +openssl_poly1305_neon2_addmulmod: +sub sp,sp,#0 + +# qhasm: 2x mask = 0xffffffff +# asm 1: vmov.i64 >mask=reg128#1,#0xffffffff +# asm 2: vmov.i64 >mask=q0,#0xffffffff +vmov.i64 q0,#0xffffffff + +# qhasm: y01 aligned= mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]! +# asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]! +vld1.8 {d2-d3},[r2,: 128]! + +# qhasm: 4x _5y01 = y01 << 2 +# asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2 +# asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2 +vshl.i32 q2,q1,#2 + +# qhasm: y23 aligned= mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]! +# asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]! +vld1.8 {d6-d7},[r2,: 128]! + +# qhasm: 4x _5y23 = y23 << 2 +# asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2 +# asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2 +vshl.i32 q8,q3,#2 + +# qhasm: y4 aligned= mem64[input_2]y4[1] +# asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64] +# asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64] +vld1.8 {d18},[r2,: 64] + +# qhasm: 4x _5y4 = y4 << 2 +# asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2 +# asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2 +vshl.i32 q10,q9,#2 + +# qhasm: x01 aligned= mem128[input_1];input_1+=16 +# asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]! +# asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]! +vld1.8 {d22-d23},[r1,: 128]! + +# qhasm: 4x _5y01 += y01 +# asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2 +# asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1 +vadd.i32 q2,q2,q1 + +# qhasm: x23 aligned= mem128[input_1];input_1+=16 +# asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]! +# asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]! +vld1.8 {d24-d25},[r1,: 128]! + +# qhasm: 4x _5y23 += y23 +# asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4 +# asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3 +vadd.i32 q8,q8,q3 + +# qhasm: 4x _5y4 += y4 +# asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10 +# asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9 +vadd.i32 q10,q10,q9 + +# qhasm: c01 aligned= mem128[input_3];input_3+=16 +# asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]! +# asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]! +vld1.8 {d26-d27},[r3,: 128]! + +# qhasm: 4x x01 += c01 +# asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14 +# asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13 +vadd.i32 q11,q11,q13 + +# qhasm: c23 aligned= mem128[input_3];input_3+=16 +# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]! +# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]! +vld1.8 {d26-d27},[r3,: 128]! + +# qhasm: 4x x23 += c23 +# asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14 +# asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13 +vadd.i32 q12,q12,q13 + +# qhasm: x4 aligned= mem64[input_1]x4[1] +# asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64] +# asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64] +vld1.8 {d26},[r1,: 64] + +# qhasm: 2x mask unsigned>>=6 +# asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6 +# asm 2: vshr.u64 >mask=q0,<mask=q0,#6 +vshr.u64 q0,q0,#6 + +# qhasm: c4 aligned= mem64[input_3]c4[1] +# asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64] +# asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64] +vld1.8 {d28},[r3,: 64] + +# qhasm: 4x x4 += c4 +# asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15 +# asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14 +vadd.i32 q13,q13,q14 + +# qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1] +# asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot +# asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2 +vmull.u32 q14,d22,d2 + +# qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1] +# asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot +# asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20 +vmlal.u32 q14,d23,d20 + +# qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3] +# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top +# asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17 +vmlal.u32 q14,d24,d17 + +# qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1] +# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot +# asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16 +vmlal.u32 q14,d25,d16 + +# qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3] +# asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top +# asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5 +vmlal.u32 q14,d26,d5 + +# qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3] +# asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top +# asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3 +vmull.u32 q2,d22,d3 + +# qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1] +# asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot +# asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2 +vmlal.u32 q2,d23,d2 + +# qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1] +# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot +# asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20 +vmlal.u32 q2,d24,d20 + +# qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3] +# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top +# asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17 +vmlal.u32 q2,d25,d17 + +# qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1] +# asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot +# asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16 +vmlal.u32 q2,d26,d16 + +# qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1] +# asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot +# asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6 +vmull.u32 q15,d22,d6 + +# qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3] +# asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top +# asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3 +vmlal.u32 q15,d23,d3 + +# qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1] +# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot +# asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2 +vmlal.u32 q15,d24,d2 + +# qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1] +# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot +# asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20 +vmlal.u32 q15,d25,d20 + +# qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3] +# asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top +# asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17 +vmlal.u32 q15,d26,d17 + +# qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3] +# asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top +# asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7 +vmull.u32 q8,d22,d7 + +# qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1] +# asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot +# asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6 +vmlal.u32 q8,d23,d6 + +# qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3] +# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top +# asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3 +vmlal.u32 q8,d24,d3 + +# qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1] +# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot +# asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2 +vmlal.u32 q8,d25,d2 + +# qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1] +# asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot +# asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20 +vmlal.u32 q8,d26,d20 + +# qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1] +# asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot +# asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18 +vmull.u32 q9,d22,d18 + +# qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3] +# asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top +# asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7 +vmlal.u32 q9,d23,d7 + +# qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1] +# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot +# asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6 +vmlal.u32 q9,d24,d6 + +# qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3] +# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top +# asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3 +vmlal.u32 q9,d25,d3 + +# qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1] +# asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot +# asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2 +vmlal.u32 q9,d26,d2 + +# qhasm: 2x t1 = r0 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26 +# asm 2: vshr.u64 >t1=q1,<r0=q14,#26 +vshr.u64 q1,q14,#26 + +# qhasm: r0 &= mask +# asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1 +# asm 2: vand >r0=q3,<r0=q14,<mask=q0 +vand q3,q14,q0 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2 +# asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1 +vadd.i64 q1,q2,q1 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26 +# asm 2: vshr.u64 >t4=q2,<r3=q8,#26 +vshr.u64 q2,q8,#26 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1 +# asm 2: vand >r3=q8,<r3=q8,<mask=q0 +vand q8,q8,q0 + +# qhasm: 2x r4 += t4 +# asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3 +# asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2 +vadd.i64 q2,q9,q2 + +# qhasm: 2x t2 = r1 unsigned>> 26 +# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26 +# asm 2: vshr.u64 >t2=q9,<r1=q1,#26 +vshr.u64 q9,q1,#26 + +# qhasm: r1 &= mask +# asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1 +# asm 2: vand >r1=q1,<r1=q1,<mask=q0 +vand q1,q1,q0 + +# qhasm: 2x t0 = r4 unsigned>> 26 +# asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26 +# asm 2: vshr.u64 >t0=q10,<r4=q2,#26 +vshr.u64 q10,q2,#26 + +# qhasm: 2x r2 += t2 +# asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10 +# asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9 +vadd.i64 q9,q15,q9 + +# qhasm: r4 &= mask +# asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1 +# asm 2: vand >r4=q2,<r4=q2,<mask=q0 +vand q2,q2,q0 + +# qhasm: 2x r0 += t0 +# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 +# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 +vadd.i64 q3,q3,q10 + +# qhasm: 2x t0 <<= 2 +# asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2 +# asm 2: vshl.i64 >t0=q10,<t0=q10,#2 +vshl.i64 q10,q10,#2 + +# qhasm: 2x t3 = r2 unsigned>> 26 +# asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26 +# asm 2: vshr.u64 >t3=q11,<r2=q9,#26 +vshr.u64 q11,q9,#26 + +# qhasm: 2x r0 += t0 +# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 +# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 +vadd.i64 q3,q3,q10 + +# qhasm: x23 = r2 & mask +# asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1 +# asm 2: vand >x23=q9,<r2=q9,<mask=q0 +vand q9,q9,q0 + +# qhasm: 2x r3 += t3 +# asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12 +# asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11 +vadd.i64 q8,q8,q11 + +# qhasm: 2x t1 = r0 unsigned>> 26 +# asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26 +# asm 2: vshr.u64 >t1=q10,<r0=q3,#26 +vshr.u64 q10,q3,#26 + +# qhasm: x23 = x23[0,2,1,3] +# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top +# asm 2: vtrn.32 <x23=d18,<x23=d19 +vtrn.32 d18,d19 + +# qhasm: x01 = r0 & mask +# asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1 +# asm 2: vand >x01=q3,<r0=q3,<mask=q0 +vand q3,q3,q0 + +# qhasm: 2x r1 += t1 +# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11 +# asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10 +vadd.i64 q1,q1,q10 + +# qhasm: 2x t4 = r3 unsigned>> 26 +# asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26 +# asm 2: vshr.u64 >t4=q10,<r3=q8,#26 +vshr.u64 q10,q8,#26 + +# qhasm: x01 = x01[0,2,1,3] +# asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top +# asm 2: vtrn.32 <x01=d6,<x01=d7 +vtrn.32 d6,d7 + +# qhasm: r3 &= mask +# asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1 +# asm 2: vand >r3=q0,<r3=q8,<mask=q0 +vand q0,q8,q0 + +# qhasm: r1 = r1[0,2,1,3] +# asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top +# asm 2: vtrn.32 <r1=d2,<r1=d3 +vtrn.32 d2,d3 + +# qhasm: 2x x4 = r4 + t4 +# asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11 +# asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10 +vadd.i64 q2,q2,q10 + +# qhasm: r3 = r3[0,2,1,3] +# asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top +# asm 2: vtrn.32 <r3=d0,<r3=d1 +vtrn.32 d0,d1 + +# qhasm: x01 = x01[0,1] r1[0,1] +# asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0 +# asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0 +vext.32 d7,d2,d2,#0 + +# qhasm: x23 = x23[0,1] r3[0,1] +# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0 +# asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0 +vext.32 d19,d0,d0,#0 + +# qhasm: x4 = x4[0,2,1,3] +# asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top +# asm 2: vtrn.32 <x4=d4,<x4=d5 +vtrn.32 d4,d5 + +# qhasm: mem128[input_0] aligned= x01;input_0+=16 +# asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]! +# asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]! +vst1.8 {d6-d7},[r0,: 128]! + +# qhasm: mem128[input_0] aligned= x23;input_0+=16 +# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]! +# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]! +vst1.8 {d18-d19},[r0,: 128]! + +# qhasm: mem64[input_0] aligned= x4[0] +# asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64] +# asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64] +vst1.8 d4,[r0,: 64] + +# qhasm: return +add sp,sp,#0 +bx lr + +#endif diff --git a/crypto/poly1305/poly1305_vec.c b/crypto/poly1305/poly1305_vec.c new file mode 100644 index 00000000..89fcacbe --- /dev/null +++ b/crypto/poly1305/poly1305_vec.c @@ -0,0 +1,887 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* This implementation of poly1305 is by Andrew Moon + * (https://github.com/floodyberry/poly1305-donna) and released as public + * domain. It implements SIMD vectorization based on the algorithm described in + * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte + * block size */ + +#include <openssl/poly1305.h> + + +#if !defined(OPENSSL_WINDOWS) && defined(OPENSSL_X86_64) + +#include <emmintrin.h> + +#define ALIGN(x) __attribute__((aligned(x))) +/* inline is not a keyword in C89. */ +#define INLINE +#define U8TO64_LE(m) (*(uint64_t *)(m)) +#define U8TO32_LE(m) (*(uint32_t *)(m)) +#define U64TO8_LE(m, v) (*(uint64_t *)(m)) = v + +typedef __m128i xmmi; +typedef unsigned __int128 uint128_t; + +static const uint32_t ALIGN(16) poly1305_x64_sse2_message_mask[4] = { + (1 << 26) - 1, 0, (1 << 26) - 1, 0}; +static const uint32_t ALIGN(16) poly1305_x64_sse2_5[4] = {5, 0, 5, 0}; +static const uint32_t ALIGN(16) poly1305_x64_sse2_1shl128[4] = {(1 << 24), 0, + (1 << 24), 0}; + +static uint128_t INLINE add128(uint128_t a, uint128_t b) { return a + b; } + +static uint128_t INLINE add128_64(uint128_t a, uint64_t b) { return a + b; } + +static uint128_t INLINE mul64x64_128(uint64_t a, uint64_t b) { + return (uint128_t)a * b; +} + +static uint64_t INLINE lo128(uint128_t a) { return (uint64_t)a; } + +static uint64_t INLINE shr128(uint128_t v, const int shift) { + return (uint64_t)(v >> shift); +} + +static uint64_t INLINE shr128_pair(uint64_t hi, uint64_t lo, const int shift) { + return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift); +} + +typedef struct poly1305_power_t { + union { + xmmi v; + uint64_t u[2]; + uint32_t d[4]; + } R20, R21, R22, R23, R24, S21, S22, S23, S24; +} poly1305_power; + +typedef struct poly1305_state_internal_t { + poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 + bytes of free storage */ + union { + xmmi H[5]; /* 80 bytes */ + uint64_t HH[10]; + }; + /* uint64_t r0,r1,r2; [24 bytes] */ + /* uint64_t pad0,pad1; [16 bytes] */ + uint64_t started; /* 8 bytes */ + uint64_t leftover; /* 8 bytes */ + uint8_t buffer[64]; /* 64 bytes */ +} poly1305_state_internal; /* 448 bytes total + 63 bytes for + alignment = 511 bytes raw */ + +static poly1305_state_internal INLINE *poly1305_aligned_state( + poly1305_state *state) { + return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63); +} + +/* copy 0-63 bytes */ +static void INLINE +poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { + size_t offset = src - dst; + if (bytes & 32) { + _mm_storeu_si128((xmmi *)(dst + 0), + _mm_loadu_si128((xmmi *)(dst + offset + 0))); + _mm_storeu_si128((xmmi *)(dst + 16), + _mm_loadu_si128((xmmi *)(dst + offset + 16))); + dst += 32; + } + if (bytes & 16) { + _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((xmmi *)(dst + offset))); + dst += 16; + } + if (bytes & 8) { + *(uint64_t *)dst = *(uint64_t *)(dst + offset); + dst += 8; + } + if (bytes & 4) { + *(uint32_t *)dst = *(uint32_t *)(dst + offset); + dst += 4; + } + if (bytes & 2) { + *(uint16_t *)dst = *(uint16_t *)(dst + offset); + dst += 2; + } + if (bytes & 1) { + *(uint8_t *)dst = *(uint8_t *)(dst + offset); + } +} + +/* zero 0-15 bytes */ +static void INLINE poly1305_block_zero(uint8_t *dst, size_t bytes) { + if (bytes & 8) { + *(uint64_t *)dst = 0; + dst += 8; + } + if (bytes & 4) { + *(uint32_t *)dst = 0; + dst += 4; + } + if (bytes & 2) { + *(uint16_t *)dst = 0; + dst += 2; + } + if (bytes & 1) { + *(uint8_t *)dst = 0; + } +} + +static size_t INLINE poly1305_min(size_t a, size_t b) { + return (a < b) ? a : b; +} + +void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { + poly1305_state_internal *st = poly1305_aligned_state(state); + poly1305_power *p; + uint64_t r0, r1, r2; + uint64_t t0, t1; + + /* clamp key */ + t0 = U8TO64_LE(key + 0); + t1 = U8TO64_LE(key + 8); + r0 = t0 & 0xffc0fffffff; + t0 >>= 44; + t0 |= t1 << 20; + r1 = t0 & 0xfffffc0ffff; + t1 >>= 24; + r2 = t1 & 0x00ffffffc0f; + + /* store r in un-used space of st->P[1] */ + p = &st->P[1]; + p->R20.d[1] = (uint32_t)(r0); + p->R20.d[3] = (uint32_t)(r0 >> 32); + p->R21.d[1] = (uint32_t)(r1); + p->R21.d[3] = (uint32_t)(r1 >> 32); + p->R22.d[1] = (uint32_t)(r2); + p->R22.d[3] = (uint32_t)(r2 >> 32); + + /* store pad */ + p->R23.d[1] = U8TO32_LE(key + 16); + p->R23.d[3] = U8TO32_LE(key + 20); + p->R24.d[1] = U8TO32_LE(key + 24); + p->R24.d[3] = U8TO32_LE(key + 28); + + /* H = 0 */ + st->H[0] = _mm_setzero_si128(); + st->H[1] = _mm_setzero_si128(); + st->H[2] = _mm_setzero_si128(); + st->H[3] = _mm_setzero_si128(); + st->H[4] = _mm_setzero_si128(); + + st->started = 0; + st->leftover = 0; +} + +static void poly1305_first_block(poly1305_state_internal *st, + const uint8_t *m) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + xmmi T5, T6; + poly1305_power *p; + uint128_t d[3]; + uint64_t r0, r1, r2; + uint64_t r20, r21, r22, s22; + uint64_t pad0, pad1; + uint64_t c; + uint64_t i; + + /* pull out stored info */ + p = &st->P[1]; + + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; + pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; + + /* compute powers r^2,r^4 */ + r20 = r0; + r21 = r1; + r22 = r2; + for (i = 0; i < 2; i++) { + s22 = r22 * (5 << 2); + + d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22)); + d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21)); + d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20)); + + r20 = lo128(d[0]) & 0xfffffffffff; + c = shr128(d[0], 44); + d[1] = add128_64(d[1], c); + r21 = lo128(d[1]) & 0xfffffffffff; + c = shr128(d[1], 44); + d[2] = add128_64(d[2], c); + r22 = lo128(d[2]) & 0x3ffffffffff; + c = shr128(d[2], 42); + r20 += c * 5; + c = (r20 >> 44); + r20 = r20 & 0xfffffffffff; + r21 += c; + + p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)(r20)&0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R21.v = _mm_shuffle_epi32( + _mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R22.v = + _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R23.v = _mm_shuffle_epi32( + _mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16))), + _MM_SHUFFLE(1, 0, 1, 0)); + p->S21.v = _mm_mul_epu32(p->R21.v, FIVE); + p->S22.v = _mm_mul_epu32(p->R22.v, FIVE); + p->S23.v = _mm_mul_epu32(p->R23.v, FIVE); + p->S24.v = _mm_mul_epu32(p->R24.v, FIVE); + p--; + } + + /* put saved info back */ + p = &st->P[1]; + p->R20.d[1] = (uint32_t)(r0); + p->R20.d[3] = (uint32_t)(r0 >> 32); + p->R21.d[1] = (uint32_t)(r1); + p->R21.d[3] = (uint32_t)(r1 >> 32); + p->R22.d[1] = (uint32_t)(r2); + p->R22.d[3] = (uint32_t)(r2 >> 32); + p->R23.d[1] = (uint32_t)(pad0); + p->R23.d[3] = (uint32_t)(pad0 >> 32); + p->R24.d[1] = (uint32_t)(pad1); + p->R24.d[3] = (uint32_t)(pad1 >> 32); + + /* H = [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + st->H[0] = _mm_and_si128(MMASK, T5); + st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + st->H[2] = _mm_and_si128(MMASK, T5); + st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); +} + +static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, + size_t bytes) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + + poly1305_power *p; + xmmi H0, H1, H2, H3, H4; + xmmi T0, T1, T2, T3, T4, T5, T6; + xmmi M0, M1, M2, M3, M4; + xmmi C1, C2; + + H0 = st->H[0]; + H1 = st->H[1]; + H2 = st->H[2]; + H3 = st->H[3]; + H4 = st->H[4]; + + while (bytes >= 64) { + /* H *= [r^4,r^4] */ + p = &st->P[0]; + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My]*[r^2,r^2] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + p = &st->P[1]; + T5 = _mm_mul_epu32(M0, p->R20.v); + T6 = _mm_mul_epu32(M0, p->R21.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M1, p->S24.v); + T6 = _mm_mul_epu32(M1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M2, p->S23.v); + T6 = _mm_mul_epu32(M2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M3, p->S22.v); + T6 = _mm_mul_epu32(M3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M4, p->S21.v); + T6 = _mm_mul_epu32(M4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M0, p->R22.v); + T6 = _mm_mul_epu32(M0, p->R23.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M1, p->R21.v); + T6 = _mm_mul_epu32(M1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M2, p->R20.v); + T6 = _mm_mul_epu32(M2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M3, p->S24.v); + T6 = _mm_mul_epu32(M3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M4, p->S23.v); + T6 = _mm_mul_epu32(M4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M0, p->R24.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), + _mm_loadl_epi64((xmmi *)(m + 48))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), + _mm_loadl_epi64((xmmi *)(m + 56))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + T0 = _mm_add_epi64(T0, M0); + T1 = _mm_add_epi64(T1, M1); + T2 = _mm_add_epi64(T2, M2); + T3 = _mm_add_epi64(T3, M3); + T4 = _mm_add_epi64(T4, M4); + + /* reduce */ + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */ + H0 = T0; + H1 = T1; + H2 = T2; + H3 = T3; + H4 = T4; + + m += 64; + bytes -= 64; + } + + st->H[0] = H0; + st->H[1] = H1; + st->H[2] = H2; + st->H[3] = H3; + st->H[4] = H4; +} + +static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, + size_t bytes) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + + poly1305_power *p; + xmmi H0, H1, H2, H3, H4; + xmmi M0, M1, M2, M3, M4; + xmmi T0, T1, T2, T3, T4, T5, T6; + xmmi C1, C2; + + uint64_t r0, r1, r2; + uint64_t t0, t1, t2, t3, t4; + uint64_t c; + size_t consumed = 0; + + H0 = st->H[0]; + H1 = st->H[1]; + H2 = st->H[2]; + H3 = st->H[3]; + H4 = st->H[4]; + + /* p = [r^2,r^2] */ + p = &st->P[1]; + + if (bytes >= 32) { + /* H *= [r^2,r^2] */ + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + T0 = _mm_add_epi64(T0, M0); + T1 = _mm_add_epi64(T1, M1); + T2 = _mm_add_epi64(T2, M2); + T3 = _mm_add_epi64(T3, M3); + T4 = _mm_add_epi64(T4, M4); + + /* reduce */ + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = (H*[r^2,r^2] + [Mx,My]) */ + H0 = T0; + H1 = T1; + H2 = T2; + H3 = T3; + H4 = T4; + + consumed = 32; + } + + /* finalize, H *= [r^2,r] */ + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + + p->R20.d[2] = (uint32_t)(r0)&0x3ffffff; + p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff; + p->R22.d[2] = (uint32_t)((r1 >> 8)) & 0x3ffffff; + p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff; + p->R24.d[2] = (uint32_t)((r2 >> 16)); + p->S21.d[2] = p->R21.d[2] * 5; + p->S22.d[2] = p->R22.d[2] * 5; + p->S23.d[2] = p->R23.d[2] * 5; + p->S24.d[2] = p->R24.d[2] * 5; + + /* H *= [r^2,r] */ + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = H[0]+H[1] */ + H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8)); + H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8)); + H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8)); + H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8)); + H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8)); + + t0 = _mm_cvtsi128_si32(H0); + c = (t0 >> 26); + t0 &= 0x3ffffff; + t1 = _mm_cvtsi128_si32(H1) + c; + c = (t1 >> 26); + t1 &= 0x3ffffff; + t2 = _mm_cvtsi128_si32(H2) + c; + c = (t2 >> 26); + t2 &= 0x3ffffff; + t3 = _mm_cvtsi128_si32(H3) + c; + c = (t3 >> 26); + t3 &= 0x3ffffff; + t4 = _mm_cvtsi128_si32(H4) + c; + c = (t4 >> 26); + t4 &= 0x3ffffff; + t0 = t0 + (c * 5); + c = (t0 >> 26); + t0 &= 0x3ffffff; + t1 = t1 + c; + + st->HH[0] = ((t0) | (t1 << 26)) & 0xfffffffffffull; + st->HH[1] = ((t1 >> 18) | (t2 << 8) | (t3 << 34)) & 0xfffffffffffull; + st->HH[2] = ((t3 >> 10) | (t4 << 16)) & 0x3ffffffffffull; + + return consumed; +} + +void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, + size_t bytes) { + poly1305_state_internal *st = poly1305_aligned_state(state); + size_t want; + + /* need at least 32 initial bytes to start the accelerated branch */ + if (!st->started) { + if ((st->leftover == 0) && (bytes > 32)) { + poly1305_first_block(st, m); + m += 32; + bytes -= 32; + } else { + want = poly1305_min(32 - st->leftover, bytes); + poly1305_block_copy(st->buffer + st->leftover, m, want); + bytes -= want; + m += want; + st->leftover += want; + if ((st->leftover < 32) || (bytes == 0)) + return; + poly1305_first_block(st, st->buffer); + st->leftover = 0; + } + st->started = 1; + } + + /* handle leftover */ + if (st->leftover) { + want = poly1305_min(64 - st->leftover, bytes); + poly1305_block_copy(st->buffer + st->leftover, m, want); + bytes -= want; + m += want; + st->leftover += want; + if (st->leftover < 64) + return; + poly1305_blocks(st, st->buffer, 64); + st->leftover = 0; + } + + /* process 64 byte blocks */ + if (bytes >= 64) { + want = (bytes & ~63); + poly1305_blocks(st, m, want); + m += want; + bytes -= want; + } + + if (bytes) { + poly1305_block_copy(st->buffer + st->leftover, m, bytes); + st->leftover += bytes; + } +} + +void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { + poly1305_state_internal *st = poly1305_aligned_state(state); + size_t leftover = st->leftover; + uint8_t *m = st->buffer; + uint128_t d[3]; + uint64_t h0, h1, h2; + uint64_t t0, t1; + uint64_t g0, g1, g2, c, nc; + uint64_t r0, r1, r2, s1, s2; + poly1305_power *p; + + if (st->started) { + size_t consumed = poly1305_combine(st, m, leftover); + leftover -= consumed; + m += consumed; + } + + /* st->HH will either be 0 or have the combined result */ + h0 = st->HH[0]; + h1 = st->HH[1]; + h2 = st->HH[2]; + + p = &st->P[1]; + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + s1 = r1 * (5 << 2); + s2 = r2 * (5 << 2); + + if (leftover < 16) + goto poly1305_donna_atmost15bytes; + +poly1305_donna_atleast16bytes: + t0 = U8TO64_LE(m + 0); + t1 = U8TO64_LE(m + 8); + h0 += t0 & 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += t0 & 0xfffffffffff; + h2 += (t1 >> 24) | ((uint64_t)1 << 40); + +poly1305_donna_mul: + d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), + mul64x64_128(h2, s1)); + d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), + mul64x64_128(h2, s2)); + d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), + mul64x64_128(h2, r0)); + h0 = lo128(d[0]) & 0xfffffffffff; + c = shr128(d[0], 44); + d[1] = add128_64(d[1], c); + h1 = lo128(d[1]) & 0xfffffffffff; + c = shr128(d[1], 44); + d[2] = add128_64(d[2], c); + h2 = lo128(d[2]) & 0x3ffffffffff; + c = shr128(d[2], 42); + h0 += c * 5; + + m += 16; + leftover -= 16; + if (leftover >= 16) + goto poly1305_donna_atleast16bytes; + +/* final bytes */ +poly1305_donna_atmost15bytes: + if (!leftover) + goto poly1305_donna_finish; + + m[leftover++] = 1; + poly1305_block_zero(m + leftover, 16 - leftover); + leftover = 16; + + t0 = U8TO64_LE(m + 0); + t1 = U8TO64_LE(m + 8); + h0 += t0 & 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += t0 & 0xfffffffffff; + h2 += (t1 >> 24); + + goto poly1305_donna_mul; + +poly1305_donna_finish: + c = (h0 >> 44); + h0 &= 0xfffffffffff; + h1 += c; + c = (h1 >> 44); + h1 &= 0xfffffffffff; + h2 += c; + c = (h2 >> 42); + h2 &= 0x3ffffffffff; + h0 += c * 5; + + g0 = h0 + 5; + c = (g0 >> 44); + g0 &= 0xfffffffffff; + g1 = h1 + c; + c = (g1 >> 44); + g1 &= 0xfffffffffff; + g2 = h2 + c - ((uint64_t)1 << 42); + + c = (g2 >> 63) - 1; + nc = ~c; + h0 = (h0 & nc) | (g0 & c); + h1 = (h1 & nc) | (g1 & c); + h2 = (h2 & nc) | (g2 & c); + + /* pad */ + t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; + t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; + h0 += (t0 & 0xfffffffffff); + c = (h0 >> 44); + h0 &= 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += (t0 & 0xfffffffffff) + c; + c = (h1 >> 44); + h1 &= 0xfffffffffff; + t1 = (t1 >> 24); + h2 += (t1)+c; + + U64TO8_LE(mac + 0, ((h0) | (h1 << 44))); + U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24))); +} + +#endif /* !OPENSSL_WINDOWS && OPENSSL_X86_64 */ |