Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/boringssl.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Smith <brian@briansmith.org>2015-11-01 23:13:24 +0300
committerAdam Langley <agl@google.com>2015-11-03 05:04:38 +0300
commit00461cf201b66205653fd6159ae260f453738641 (patch)
tree656151ba243139bf71ee1cfa1712c7fb75ee4c60 /crypto/sha
parentecc2591b6cbb376dfa0ea1dfce35f1f172ffd0d5 (diff)
Improve crypto/digest/md32_common.h mechanism.
The documentation in md32_common.h is now (more) correct with respect to the most important details of the layout of |HASH_CTX|. The documentation explaining why sha512.c doesn't use md32_common.h is now more accurate as well. Before, the C implementations of HASH_BLOCK_DATA_ORDER took a pointer to the |HASH_CTX| and the assembly language implementations tool a pointer to the hash state |h| member of |HASH_CTX|. (This worked because |h| is always the first member of |HASH_CTX|.) Now, the C implementations take a pointer directly to |h| too. The definitions of |MD4_CTX|, |MD5_CTX|, and |SHA1_CTX| were changed to be consistent with |SHA256_CTX| and |SHA512_CTX| in storing the hash state in an array. This will break source compatibility with any external code that accesses the hash state directly, but will not affect binary compatibility. The second parameter of |HASH_BLOCK_DATA_ORDER| is now of type |const uint8_t *|; previously it was |void *| and all implementations had a |uint8_t *data| variable to access it as an array of bytes. This change paves the way for future refactorings such as automatically generating the |*_Init| functions and/or sharing one I-U-F implementation across all digest algorithms. Change-Id: I30513bb40b5f1d2c8932551d54073c35484b3f8b Reviewed-on: https://boringssl-review.googlesource.com/6401 Reviewed-by: Adam Langley <agl@google.com>
Diffstat (limited to 'crypto/sha')
-rw-r--r--crypto/sha/sha1.c59
-rw-r--r--crypto/sha/sha256.c50
-rw-r--r--crypto/sha/sha512.c108
3 files changed, 100 insertions, 117 deletions
diff --git a/crypto/sha/sha1.c b/crypto/sha/sha1.c
index c03e6081..b3318c57 100644
--- a/crypto/sha/sha1.c
+++ b/crypto/sha/sha1.c
@@ -69,11 +69,11 @@
int SHA1_Init(SHA_CTX *sha) {
memset(sha, 0, sizeof(SHA_CTX));
- sha->h0 = 0x67452301UL;
- sha->h1 = 0xefcdab89UL;
- sha->h2 = 0x98badcfeUL;
- sha->h3 = 0x10325476UL;
- sha->h4 = 0xc3d2e1f0UL;
+ sha->h[0] = 0x67452301UL;
+ sha->h[1] = 0xefcdab89UL;
+ sha->h[2] = 0x98badcfeUL;
+ sha->h[3] = 0x10325476UL;
+ sha->h[4] = 0xc3d2e1f0UL;
return 1;
}
@@ -96,21 +96,20 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
#define DATA_ORDER_IS_BIG_ENDIAN
-#define HASH_LONG uint32_t
#define HASH_CTX SHA_CTX
#define HASH_CBLOCK 64
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
- ll = (c)->h0; \
+ ll = (c)->h[0]; \
(void) HOST_l2c(ll, (s)); \
- ll = (c)->h1; \
+ ll = (c)->h[1]; \
(void) HOST_l2c(ll, (s)); \
- ll = (c)->h2; \
+ ll = (c)->h[2]; \
(void) HOST_l2c(ll, (s)); \
- ll = (c)->h3; \
+ ll = (c)->h[3]; \
(void) HOST_l2c(ll, (s)); \
- ll = (c)->h4; \
+ ll = (c)->h[4]; \
(void) HOST_l2c(ll, (s)); \
} while (0)
@@ -124,7 +123,7 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
#ifndef SHA1_ASM
static
#endif
-void sha1_block_data_order(SHA_CTX *c, const void *p, size_t num);
+void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
#include "../digest/md32_common.h"
@@ -186,17 +185,17 @@ void sha1_block_data_order(SHA_CTX *c, const void *p, size_t num);
#define X(i) XX##i
#if !defined(SHA1_ASM)
-static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) {
- const uint8_t *data = p;
+static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
+ size_t num) {
register uint32_t A, B, C, D, E, T, l;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
XX11, XX12, XX13, XX14, XX15;
- A = c->h0;
- B = c->h1;
- C = c->h2;
- D = c->h3;
- E = c->h4;
+ A = state[0];
+ B = state[1];
+ C = state[2];
+ D = state[3];
+ E = state[4];
for (;;) {
const union {
@@ -204,7 +203,7 @@ static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) {
char little;
} is_endian = {1};
- if (!is_endian.little && ((size_t)p % 4) == 0) {
+ if (!is_endian.little && ((uintptr_t)data % 4) == 0) {
const uint32_t *W = (const uint32_t *)data;
X(0) = W[0];
@@ -361,21 +360,21 @@ static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) {
BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
- c->h0 = (c->h0 + E) & 0xffffffffL;
- c->h1 = (c->h1 + T) & 0xffffffffL;
- c->h2 = (c->h2 + A) & 0xffffffffL;
- c->h3 = (c->h3 + B) & 0xffffffffL;
- c->h4 = (c->h4 + C) & 0xffffffffL;
+ state[0] = (state[0] + E) & 0xffffffffL;
+ state[1] = (state[1] + T) & 0xffffffffL;
+ state[2] = (state[2] + A) & 0xffffffffL;
+ state[3] = (state[3] + B) & 0xffffffffL;
+ state[4] = (state[4] + C) & 0xffffffffL;
if (--num == 0) {
break;
}
- A = c->h0;
- B = c->h1;
- C = c->h2;
- D = c->h3;
- E = c->h4;
+ A = state[0];
+ B = state[1];
+ C = state[2];
+ D = state[3];
+ E = state[4];
}
}
#endif
diff --git a/crypto/sha/sha256.c b/crypto/sha/sha256.c
index 8276bbb5..53480dd0 100644
--- a/crypto/sha/sha256.c
+++ b/crypto/sha/sha256.c
@@ -135,7 +135,6 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
#define DATA_ORDER_IS_BIG_ENDIAN
-#define HASH_LONG uint32_t
#define HASH_CTX SHA256_CTX
#define HASH_CBLOCK 64
@@ -185,12 +184,12 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
#ifndef SHA256_ASM
static
#endif
-void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num);
+void sha256_block_data_order(uint32_t *state, const uint8_t *in, size_t num);
#include "../digest/md32_common.h"
#ifndef SHA256_ASM
-static const HASH_LONG K256[64] = {
+static const uint32_t K256[64] = {
0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
@@ -234,29 +233,28 @@ static const HASH_LONG K256[64] = {
ROUND_00_15(i, a, b, c, d, e, f, g, h); \
} while (0)
-static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
+static void sha256_block_data_order(uint32_t *state, const uint8_t *data,
size_t num) {
uint32_t a, b, c, d, e, f, g, h, s0, s1, T1;
- HASH_LONG X[16];
+ uint32_t X[16];
int i;
- const uint8_t *data = in;
const union {
long one;
char little;
} is_endian = {1};
while (num--) {
- a = ctx->h[0];
- b = ctx->h[1];
- c = ctx->h[2];
- d = ctx->h[3];
- e = ctx->h[4];
- f = ctx->h[5];
- g = ctx->h[6];
- h = ctx->h[7];
-
- if (!is_endian.little && sizeof(HASH_LONG) == 4 && ((size_t)in % 4) == 0) {
- const HASH_LONG *W = (const HASH_LONG *)data;
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
+
+ if (!is_endian.little && ((uintptr_t)data % 4) == 0) {
+ const uint32_t *W = (const uint32_t *)data;
T1 = X[0] = W[0];
ROUND_00_15(0, a, b, c, d, e, f, g, h);
@@ -293,7 +291,7 @@ static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
data += HASH_CBLOCK;
} else {
- HASH_LONG l;
+ uint32_t l;
HOST_c2l(data, l);
T1 = X[0] = l;
@@ -356,14 +354,14 @@ static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X);
}
- ctx->h[0] += a;
- ctx->h[1] += b;
- ctx->h[2] += c;
- ctx->h[3] += d;
- ctx->h[4] += e;
- ctx->h[5] += f;
- ctx->h[6] += g;
- ctx->h[7] += h;
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
}
}
diff --git a/crypto/sha/sha512.c b/crypto/sha/sha512.c
index 57c96ab9..5e77a1ec 100644
--- a/crypto/sha/sha512.c
+++ b/crypto/sha/sha512.c
@@ -65,27 +65,15 @@
/* IMPLEMENTATION NOTES.
*
- * As you might have noticed 32-bit hash algorithms:
- *
- * - permit SHA_LONG to be wider than 32-bit (case on CRAY);
- * - optimized versions implement two transform functions: one operating
- * on [aligned] data in host byte order and one - on data in input
- * stream byte order;
- * - share common byte-order neutral collector and padding function
- * implementations, ../md32_common.h;
- *
- * Neither of the above applies to this SHA-512 implementations. Reasons
+ * The 32-bit hash algorithms share a common byte-order neutral collector and
+ * padding function implementations that operate on unaligned data,
+ * ../md32_common.h. This SHA-512 implementation does not. Reasons
* [in reverse order] are:
*
- * - it's the only 64-bit hash algorithm for the moment of this writing,
+ * - It's the only 64-bit hash algorithm for the moment of this writing,
* there is no need for common collector/padding implementation [yet];
- * - by supporting only one transform function [which operates on
- * *aligned* data in input stream byte order, big-endian in this case]
- * we minimize burden of maintenance in two ways: a) collector/padding
- * function is simpler; b) only one transform function to stare at;
- * - SHA_LONG64 is required to be exactly 64-bit in order to be able to
- * apply a number of optimizations to mitigate potential performance
- * penalties caused by previous design decision; */
+ * - By supporting only a transform function that operates on *aligned* data
+ * the collector/padding function is simpler and easier to optimize. */
#if !defined(OPENSSL_NO_ASM) && \
(defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
@@ -163,7 +151,7 @@ uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out) {
#if !defined(SHA512_ASM)
static
#endif
-void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num);
+void sha512_block_data_order(uint64_t *state, const uint64_t *W, size_t num);
int SHA384_Final(uint8_t *md, SHA512_CTX *sha) {
@@ -181,7 +169,7 @@ void SHA512_Transform(SHA512_CTX *c, const uint8_t *data) {
data = c->u.p;
}
#endif
- sha512_block_data_order(c, data, 1);
+ sha512_block_data_order(c->h, (uint64_t *)data, 1);
}
int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
@@ -213,7 +201,7 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
memcpy(p + c->num, data, n), c->num = 0;
len -= n;
data += n;
- sha512_block_data_order(c, p, 1);
+ sha512_block_data_order(c->h, (uint64_t *)p, 1);
}
}
@@ -222,14 +210,14 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
if ((size_t)data % sizeof(c->u.d[0]) != 0) {
while (len >= sizeof(c->u)) {
memcpy(p, data, sizeof(c->u));
- sha512_block_data_order(c, p, 1);
+ sha512_block_data_order(c->h, (uint64_t *)p, 1);
len -= sizeof(c->u);
data += sizeof(c->u);
}
} else
#endif
{
- sha512_block_data_order(c, data, len / sizeof(c->u));
+ sha512_block_data_order(c->h, (uint64_t *)data, len / sizeof(c->u));
data += len;
len %= sizeof(c->u);
data -= len;
@@ -253,7 +241,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
if (n > (sizeof(sha->u) - 16)) {
memset(p + n, 0, sizeof(sha->u) - n);
n = 0;
- sha512_block_data_order(sha, p, 1);
+ sha512_block_data_order(sha->h, (uint64_t *)p, 1);
}
memset(p + n, 0, sizeof(sha->u) - 16 - n);
@@ -274,7 +262,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
p[sizeof(sha->u) - 15] = (uint8_t)(sha->Nh >> 48);
p[sizeof(sha->u) - 16] = (uint8_t)(sha->Nh >> 56);
- sha512_block_data_order(sha, p, 1);
+ sha512_block_data_order(sha->h, (uint64_t *)p, 1);
if (md == NULL) {
/* TODO(davidben): This NULL check is absent in other low-level hash 'final'
@@ -443,23 +431,22 @@ static uint64_t __fastcall __pull64be(const void *x) {
* This code should give better results on 32-bit CPU with less than
* ~24 registers, both size and performance wise...
*/
-static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
size_t num) {
- const uint64_t *W = in;
uint64_t A, E, T;
uint64_t X[9 + 80], *F;
int i;
while (num--) {
F = X + 80;
- A = ctx->h[0];
- F[1] = ctx->h[1];
- F[2] = ctx->h[2];
- F[3] = ctx->h[3];
- E = ctx->h[4];
- F[5] = ctx->h[5];
- F[6] = ctx->h[6];
- F[7] = ctx->h[7];
+ A = state[0];
+ F[1] = state[1];
+ F[2] = state[2];
+ F[3] = state[3];
+ E = state[4];
+ F[5] = state[5];
+ F[6] = state[6];
+ F[7] = state[7];
for (i = 0; i < 16; i++, F--) {
T = PULL64(W[i]);
@@ -484,14 +471,14 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
A = T + Sigma0(A) + Maj(A, F[1], F[2]);
}
- ctx->h[0] += A;
- ctx->h[1] += F[1];
- ctx->h[2] += F[2];
- ctx->h[3] += F[3];
- ctx->h[4] += E;
- ctx->h[5] += F[5];
- ctx->h[6] += F[6];
- ctx->h[7] += F[7];
+ state[0] += A;
+ state[1] += F[1];
+ state[2] += F[2];
+ state[3] += F[3];
+ state[4] += E;
+ state[5] += F[5];
+ state[6] += F[6];
+ state[7] += F[7];
W += 16;
}
@@ -517,23 +504,22 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
ROUND_00_15(i + j, a, b, c, d, e, f, g, h); \
} while (0)
-static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
size_t num) {
- const uint64_t *W = in;
uint64_t a, b, c, d, e, f, g, h, s0, s1, T1;
uint64_t X[16];
int i;
while (num--) {
- a = ctx->h[0];
- b = ctx->h[1];
- c = ctx->h[2];
- d = ctx->h[3];
- e = ctx->h[4];
- f = ctx->h[5];
- g = ctx->h[6];
- h = ctx->h[7];
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
T1 = X[0] = PULL64(W[0]);
ROUND_00_15(0, a, b, c, d, e, f, g, h);
@@ -587,14 +573,14 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X);
}
- ctx->h[0] += a;
- ctx->h[1] += b;
- ctx->h[2] += c;
- ctx->h[3] += d;
- ctx->h[4] += e;
- ctx->h[5] += f;
- ctx->h[6] += g;
- ctx->h[7] += h;
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
W += 16;
}