Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/boringssl.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/modes')
-rw-r--r--crypto/modes/asm/aesni-gcm-x86_64.pl46
-rw-r--r--crypto/modes/asm/ghash-x86_64.pl5
-rw-r--r--crypto/modes/cfb.c11
-rw-r--r--crypto/modes/ctr.c28
-rw-r--r--crypto/modes/gcm.c88
-rw-r--r--crypto/modes/gcm_test.c6
-rw-r--r--crypto/modes/internal.h42
-rw-r--r--crypto/modes/ofb.c6
8 files changed, 135 insertions, 97 deletions
diff --git a/crypto/modes/asm/aesni-gcm-x86_64.pl b/crypto/modes/asm/aesni-gcm-x86_64.pl
index 26135e6c..5d58bbbb 100644
--- a/crypto/modes/asm/aesni-gcm-x86_64.pl
+++ b/crypto/modes/asm/aesni-gcm-x86_64.pl
@@ -41,11 +41,12 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
+# This must be kept in sync with |$avx| in ghash-x86_64.pl; otherwise tags will
+# be computed incorrectly.
+#
# In upstream, this is controlled by shelling out to the compiler to check
# versions, but BoringSSL is intended to be used with pre-generated perlasm
# output, so this isn't useful anyway.
-#
-# TODO(davidben): Enable this after testing. $avx goes up to 2.
$avx = 0;
open OUT,"| \"$^X\" $xlate $flavour $output";
@@ -96,6 +97,23 @@ _aesni_ctr32_ghash_6x:
vpxor $rndkey,$inout3,$inout3
vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
+
+ # At this point, the current block of 96 (0x60) bytes has already been
+ # loaded into registers. Concurrently with processing it, we want to
+ # load the next 96 bytes of input for the next round. Obviously, we can
+ # only do this if there are at least 96 more bytes of input beyond the
+ # input we're currently processing, or else we'd read past the end of
+ # the input buffer. Here, we set |%r12| to 96 if there are at least 96
+ # bytes of input beyond the 96 bytes we're already processing, and we
+ # set |%r12| to 0 otherwise. In the case where we set |%r12| to 96,
+ # we'll read in the next block so that it is in registers for the next
+ # loop iteration. In the case where we set |%r12| to 0, we'll re-read
+ # the current block and then ignore what we re-read.
+ #
+ # At this point, |$in0| points to the current (already read into
+ # registers) block, and |$end0| points to 2*96 bytes before the end of
+ # the input. Thus, |$in0| > |$end0| means that we do not have the next
+ # 96-byte block to read in, and |$in0| <= |$end0| means we do.
xor %r12,%r12
cmp $in0,$end0
@@ -388,6 +406,9 @@ $code.=<<___;
.align 32
aesni_gcm_decrypt:
xor $ret,$ret
+
+ # We call |_aesni_ctr32_ghash_6x|, which requires at least 96 (0x60)
+ # bytes of input.
cmp \$0x60,$len # minimal accepted length
jb .Lgcm_dec_abort
@@ -442,7 +463,15 @@ $code.=<<___;
vmovdqu 0x50($inp),$Z3 # I[5]
lea ($inp),$in0
vmovdqu 0x40($inp),$Z0
+
+ # |_aesni_ctr32_ghash_6x| requires |$end0| to point to 2*96 (0xc0)
+ # bytes before the end of the input. Note, in particular, that this is
+ # correct even if |$len| is not an even multiple of 96 or 16. XXX: This
+ # seems to require that |$inp| + |$len| >= 2*96 (0xc0); i.e. |$inp| must
+ # not be near the very beginning of the address space when |$len| < 2*96
+ # (0xc0).
lea -0xc0($inp,$len),$end0
+
vmovdqu 0x30($inp),$Z1
shr \$4,$len
xor $ret,$ret
@@ -598,6 +627,10 @@ _aesni_ctr32_6x:
.align 32
aesni_gcm_encrypt:
xor $ret,$ret
+
+ # We call |_aesni_ctr32_6x| twice, each call consuming 96 bytes of
+ # input. Then we call |_aesni_ctr32_ghash_6x|, which requires at
+ # least 96 more bytes of input.
cmp \$0x60*3,$len # minimal accepted length
jb .Lgcm_enc_abort
@@ -647,7 +680,16 @@ $code.=<<___;
.Lenc_no_key_aliasing:
lea ($out),$in0
+
+ # |_aesni_ctr32_ghash_6x| requires |$end0| to point to 2*96 (0xc0)
+ # bytes before the end of the input. Note, in particular, that this is
+ # correct even if |$len| is not an even multiple of 96 or 16. Unlike in
+ # the decryption case, there's no caveat that |$out| must not be near
+ # the very beginning of the address space, because we know that
+ # |$len| >= 3*96 from the check above, and so we know
+ # |$out| + |$len| >= 2*96 (0xc0).
lea -0xc0($out,$len),$end0
+
shr \$4,$len
call _aesni_ctr32_6x
diff --git a/crypto/modes/asm/ghash-x86_64.pl b/crypto/modes/asm/ghash-x86_64.pl
index e42ca321..5a11fb94 100644
--- a/crypto/modes/asm/ghash-x86_64.pl
+++ b/crypto/modes/asm/ghash-x86_64.pl
@@ -90,11 +90,12 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
+# This must be kept in sync with |$avx| in aesni-gcm-x86_64.pl; otherwise tags
+# will be computed incorrectly.
+#
# In upstream, this is controlled by shelling out to the compiler to check
# versions, but BoringSSL is intended to be used with pre-generated perlasm
# output, so this isn't useful anyway.
-#
-# TODO(davidben): Enable this after testing. $avx goes up to 2.
$avx = 0;
open OUT,"| \"$^X\" $xlate $flavour $output";
diff --git a/crypto/modes/cfb.c b/crypto/modes/cfb.c
index c58614ba..51b883e8 100644
--- a/crypto/modes/cfb.c
+++ b/crypto/modes/cfb.c
@@ -57,14 +57,13 @@
OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size);
void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
- const void *key, uint8_t ivec[16], int *num, int enc,
- block128_f block) {
- unsigned int n;
+ const void *key, uint8_t ivec[16], unsigned *num,
+ int enc, block128_f block) {
size_t l = 0;
assert(in && out && key && ivec && num);
- n = *num;
+ unsigned n = *num;
if (enc) {
while (n && len) {
@@ -199,7 +198,7 @@ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits,
/* N.B. This expects the input to be packed, MS bit first */
void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
- const void *key, uint8_t ivec[16], int *num,
+ const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block) {
size_t n;
uint8_t c[1], d[1];
@@ -217,7 +216,7 @@ void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
size_t length, const void *key,
- unsigned char ivec[16], int *num, int enc,
+ unsigned char ivec[16], unsigned *num, int enc,
block128_f block) {
size_t n;
diff --git a/crypto/modes/ctr.c b/crypto/modes/ctr.c
index 0baed5d4..b84e72c5 100644
--- a/crypto/modes/ctr.c
+++ b/crypto/modes/ctr.c
@@ -59,17 +59,13 @@
/* increment counter (128-bit int) by 1 */
static void ctr128_inc(uint8_t *counter) {
- uint32_t n = 16;
- uint8_t c;
+ uint32_t n = 16, c = 1;
do {
--n;
- c = counter[n];
- ++c;
- counter[n] = c;
- if (c) {
- return;
- }
+ c += counter[n];
+ counter[n] = (uint8_t) c;
+ c >>= 8;
} while (n);
}
@@ -104,7 +100,7 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
}
#if STRICT_ALIGNMENT
- if (((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
+ if (((size_t)in | (size_t)out | (size_t)ecount_buf) % sizeof(size_t) != 0) {
size_t l = 0;
while (l < len) {
if (n == 0) {
@@ -124,7 +120,7 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(ivec, ecount_buf, key);
ctr128_inc(ivec);
- for (; n < 16; n += sizeof(size_t)) {
+ for (n = 0; n < 16; n += sizeof(size_t)) {
*(size_t *)(out + n) = *(const size_t *)(in + n) ^
*(const size_t *)(ecount_buf + n);
}
@@ -146,17 +142,13 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
/* increment upper 96 bits of 128-bit counter by 1 */
static void ctr96_inc(uint8_t *counter) {
- uint32_t n = 12;
- uint8_t c;
+ uint32_t n = 12, c = 1;
do {
--n;
- c = counter[n];
- ++c;
- counter[n] = c;
- if (c) {
- return;
- }
+ c += counter[n];
+ counter[n] = (uint8_t) c;
+ c >>= 8;
} while (n);
}
diff --git a/crypto/modes/gcm.c b/crypto/modes/gcm.c
index 8cc138dc..b8571313 100644
--- a/crypto/modes/gcm.c
+++ b/crypto/modes/gcm.c
@@ -55,6 +55,7 @@
#include <openssl/cpu.h>
#include "internal.h"
+#include "../internal.h"
#if !defined(OPENSSL_NO_ASM) && \
@@ -337,7 +338,18 @@ void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
#else
void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
-void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len);
+void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
+ size_t len);
+#define AESNI_GCM
+static int aesni_gcm_enabled(GCM128_CONTEXT *ctx, ctr128_f stream) {
+ return stream == aesni_ctr32_encrypt_blocks &&
+ ctx->ghash == gcm_ghash_avx;
+}
+
+size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
+ const void *key, uint8_t ivec[16], uint64_t *Xi);
+size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
+ const void *key, uint8_t ivec[16], uint64_t *Xi);
#endif
#if defined(OPENSSL_X86)
@@ -380,14 +392,14 @@ void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
static int neon_capable(void) {
return 0;
}
-void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
+static void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
abort();
}
-void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
+static void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
abort();
}
-void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) {
+static void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16],
+ const uint8_t *inp, size_t len) {
abort();
}
#endif
@@ -405,17 +417,6 @@ void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
#endif
#endif
-GCM128_CONTEXT *CRYPTO_gcm128_new(const void *key, block128_f block) {
- GCM128_CONTEXT *ret;
-
- ret = OPENSSL_malloc(sizeof(GCM128_CONTEXT));
- if (ret != NULL) {
- CRYPTO_gcm128_init(ret, key, block);
- }
-
- return ret;
-}
-
void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key,
block128_f block) {
const union {
@@ -991,12 +992,6 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
ctx->ares = 0;
}
- if (is_endian.little) {
- ctr = GETU32(ctx->Yi.c + 12);
- } else {
- ctr = ctx->Yi.d[3];
- }
-
n = ctx->mres;
if (n) {
while (n && len) {
@@ -1011,6 +1006,24 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
return 1;
}
}
+
+#if defined(AESNI_GCM)
+ if (aesni_gcm_enabled(ctx, stream)) {
+ /* |aesni_gcm_encrypt| may not process all the input given to it. It may
+ * not process *any* of its input if it is deemed too small. */
+ size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
+ in += bulk;
+ out += bulk;
+ len -= bulk;
+ }
+#endif
+
+ if (is_endian.little) {
+ ctr = GETU32(ctx->Yi.c + 12);
+ } else {
+ ctr = ctx->Yi.d[3];
+ }
+
#if defined(GHASH)
while (len >= GHASH_CHUNK) {
(*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
@@ -1100,12 +1113,6 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
ctx->ares = 0;
}
- if (is_endian.little) {
- ctr = GETU32(ctx->Yi.c + 12);
- } else {
- ctr = ctx->Yi.d[3];
- }
-
n = ctx->mres;
if (n) {
while (n && len) {
@@ -1122,6 +1129,24 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
return 1;
}
}
+
+#if defined(AESNI_GCM)
+ if (aesni_gcm_enabled(ctx, stream)) {
+ /* |aesni_gcm_decrypt| may not process all the input given to it. It may
+ * not process *any* of its input if it is deemed too small. */
+ size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
+ in += bulk;
+ out += bulk;
+ len -= bulk;
+ }
+#endif
+
+ if (is_endian.little) {
+ ctr = GETU32(ctx->Yi.c + 12);
+ } else {
+ ctr = ctx->Yi.d[3];
+ }
+
#if defined(GHASH)
while (len >= GHASH_CHUNK) {
GHASH(ctx, in, GHASH_CHUNK);
@@ -1235,13 +1260,6 @@ void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) {
memcpy(tag, ctx->Xi.c, len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
}
-void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx) {
- if (ctx) {
- OPENSSL_cleanse(ctx, sizeof(*ctx));
- OPENSSL_free(ctx);
- }
-}
-
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
int crypto_gcm_clmul_enabled(void) {
#ifdef GHASH_ASM
diff --git a/crypto/modes/gcm_test.c b/crypto/modes/gcm_test.c
index 9414ac6e..19c295b5 100644
--- a/crypto/modes/gcm_test.c
+++ b/crypto/modes/gcm_test.c
@@ -282,8 +282,8 @@ static int decode_hex(uint8_t **out, size_t *out_len, const char *in,
uint8_t v, v2;
if (!from_hex(&v, in[i]) ||
!from_hex(&v2, in[i+1])) {
- fprintf(stderr, "%u: invalid hex digit in %s around offset %u.\n",
- test_num, description, (unsigned)i);
+ fprintf(stderr, "%u: invalid hex digit in %s around offset %zu.\n",
+ test_num, description, i);
goto err;
}
buf[i/2] = (v << 4) | v2;
@@ -336,7 +336,7 @@ static int run_test_case(unsigned test_num, const struct test_case *test) {
}
out = OPENSSL_malloc(plaintext_len);
- if (out == NULL) {
+ if (plaintext_len != 0 && out == NULL) {
goto out;
}
if (AES_set_encrypt_key(key, key_len*8, &aes_key)) {
diff --git a/crypto/modes/internal.h b/crypto/modes/internal.h
index 7255a7ca..430d0401 100644
--- a/crypto/modes/internal.h
+++ b/crypto/modes/internal.h
@@ -121,9 +121,9 @@ extern "C" {
#endif
#elif defined(_MSC_VER)
#if _MSC_VER >= 1300
-#pragma warning(push, 3)
+OPENSSL_MSVC_PRAGMA(warning(push, 3))
#include <intrin.h>
-#pragma warning(pop)
+OPENSSL_MSVC_PRAGMA(warning(pop))
#pragma intrinsic(_byteswap_uint64, _byteswap_ulong)
#define BSWAP8(x) _byteswap_uint64((uint64_t)(x))
#define BSWAP4(x) _byteswap_ulong((uint32_t)(x))
@@ -179,16 +179,6 @@ struct gcm128_context {
block128_f block;
};
-struct ccm128_context {
- union {
- uint64_t u[2];
- uint8_t c[16];
- } nonce, cmac;
- uint64_t blocks;
- block128_f block;
- void *key;
-};
-
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
/* crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is
* used. */
@@ -210,7 +200,7 @@ typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks,
* incremented by this function. */
void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16],
- uint8_t ecount_buf[16], unsigned int *num,
+ uint8_t ecount_buf[16], unsigned *num,
block128_f block);
/* CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes
@@ -219,7 +209,7 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
* function. */
void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16],
- uint8_t ecount_buf[16], unsigned int *num,
+ uint8_t ecount_buf[16], unsigned *num,
ctr128_f ctr);
@@ -232,11 +222,6 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
typedef struct gcm128_context GCM128_CONTEXT;
-/* CRYPTO_gcm128_new allocates a fresh |GCM128_CONTEXT| and calls
- * |CRYPTO_gcm128_init|. It returns the new context, or NULL on error. */
-OPENSSL_EXPORT GCM128_CONTEXT *CRYPTO_gcm128_new(const void *key,
- block128_f block);
-
/* CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with
* the given key. */
OPENSSL_EXPORT void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key,
@@ -297,9 +282,6 @@ OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag,
OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag,
size_t len);
-/* CRYPTO_gcm128_release clears and frees |ctx|. */
-OPENSSL_EXPORT void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx);
-
/* CBC. */
@@ -331,7 +313,7 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
* call. */
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out,
size_t len, const void *key, uint8_t ivec[16],
- int *num, block128_f block);
+ unsigned *num, block128_f block);
/* CFB. */
@@ -341,21 +323,21 @@ void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out,
* |len| be a multiple of any value and any partial blocks are stored in |ivec|
* and |*num|, the latter must be zero before the initial call. */
void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
- const void *key, uint8_t ivec[16], int *num, int enc,
- block128_f block);
+ const void *key, uint8_t ivec[16], unsigned *num,
+ int enc, block128_f block);
/* CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
* from |in| to |out| using |block| in CFB-8 mode. Prior to the first call
* |num| should be set to zero. */
void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len,
- const void *key, uint8_t ivec[16], int *num,
+ const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block);
/* CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
* from |in| to |out| using |block| in CFB-1 mode. Prior to the first call
* |num| should be set to zero. */
void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
- const void *key, uint8_t ivec[16], int *num,
+ const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block);
size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len,
@@ -363,6 +345,12 @@ size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len,
block128_f block);
+#if !defined(OPENSSL_NO_ASM) && \
+ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64))
+void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks,
+ const void *key, const uint8_t *ivec);
+#endif
+
#if defined(__cplusplus)
} /* extern C */
#endif
diff --git a/crypto/modes/ofb.c b/crypto/modes/ofb.c
index 63c3165a..2c5bdc9a 100644
--- a/crypto/modes/ofb.c
+++ b/crypto/modes/ofb.c
@@ -56,13 +56,11 @@
OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size);
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
- const void *key, uint8_t ivec[16], int *num,
+ const void *key, uint8_t ivec[16], unsigned *num,
block128_f block) {
- unsigned int n;
-
assert(in && out && key && ivec && num);
- n = *num;
+ unsigned n = *num;
while (n && len) {
*(out++) = *(in++) ^ ivec[n];