boringssl/crypto/fipsmodule/modes/gcm.c

878 lines
24 KiB
C
Raw Normal View History

/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ==================================================================== */
#include <openssl/base.h>
#include <assert.h>
#include <string.h>
#include <openssl/mem.h>
#include <openssl/cpu.h>
#include "internal.h"
#include "../../internal.h"
#define PACK(s) ((size_t)(s) << (sizeof(size_t) * 8 - 16))
#define REDUCE1BIT(V) \
do { \
if (sizeof(size_t) == 8) { \
uint64_t T = UINT64_C(0xe100000000000000) & (0 - ((V).lo & 1)); \
(V).lo = ((V).hi << 63) | ((V).lo >> 1); \
(V).hi = ((V).hi >> 1) ^ T; \
} else { \
uint32_t T = 0xe1000000U & (0 - (uint32_t)((V).lo & 1)); \
(V).lo = ((V).hi << 63) | ((V).lo >> 1); \
(V).hi = ((V).hi >> 1) ^ ((uint64_t)T << 32); \
} \
} while (0)
// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
// bits of a |size_t|.
static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
void gcm_init_4bit(u128 Htable[16], const uint64_t H[2]) {
u128 V;
Htable[0].hi = 0;
Htable[0].lo = 0;
V.hi = H[0];
V.lo = H[1];
Htable[8] = V;
REDUCE1BIT(V);
Htable[4] = V;
REDUCE1BIT(V);
Htable[2] = V;
REDUCE1BIT(V);
Htable[1] = V;
Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
V = Htable[4];
Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
V = Htable[8];
Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
#if defined(GHASH_ASM) && defined(OPENSSL_ARM)
for (int j = 0; j < 16; ++j) {
V = Htable[j];
Htable[j].hi = V.lo;
Htable[j].lo = V.hi;
}
#endif
}
Add PPC64LE assembly for AES-GCM. This change adds AES and GHASH assembly from upstream, with the aim of speeding up AES-GCM. The PPC64LE assembly matches the interface of the ARMv8 assembly so I've changed the prefix of both sets of asm functions to be the same ("aes_hw_"). Otherwise, the new assmebly files and Perlasm match exactly those from upstream's c536b6be1a (from their master branch). Before: Did 1879000 AES-128-GCM (16 bytes) seal operations in 1000428us (1878196.1 ops/sec): 30.1 MB/s Did 61000 AES-128-GCM (1350 bytes) seal operations in 1006660us (60596.4 ops/sec): 81.8 MB/s Did 11000 AES-128-GCM (8192 bytes) seal operations in 1072649us (10255.0 ops/sec): 84.0 MB/s Did 1665000 AES-256-GCM (16 bytes) seal operations in 1000591us (1664016.6 ops/sec): 26.6 MB/s Did 52000 AES-256-GCM (1350 bytes) seal operations in 1006971us (51640.0 ops/sec): 69.7 MB/s Did 8840 AES-256-GCM (8192 bytes) seal operations in 1013294us (8724.0 ops/sec): 71.5 MB/s After: Did 4994000 AES-128-GCM (16 bytes) seal operations in 1000017us (4993915.1 ops/sec): 79.9 MB/s Did 1389000 AES-128-GCM (1350 bytes) seal operations in 1000073us (1388898.6 ops/sec): 1875.0 MB/s Did 319000 AES-128-GCM (8192 bytes) seal operations in 1000101us (318967.8 ops/sec): 2613.0 MB/s Did 4668000 AES-256-GCM (16 bytes) seal operations in 1000149us (4667304.6 ops/sec): 74.7 MB/s Did 1202000 AES-256-GCM (1350 bytes) seal operations in 1000646us (1201224.0 ops/sec): 1621.7 MB/s Did 269000 AES-256-GCM (8192 bytes) seal operations in 1002804us (268247.8 ops/sec): 2197.5 MB/s Change-Id: Id848562bd4e1aa79a4683012501dfa5e6c08cfcc Reviewed-on: https://boringssl-review.googlesource.com/11262 Reviewed-by: Adam Langley <agl@google.com> Commit-Queue: Adam Langley <agl@google.com> CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
2016-09-23 20:47:24 +01:00
#if !defined(GHASH_ASM) || defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE)
static const size_t rem_4bit[16] = {
PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)};
void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) {
u128 Z;
int cnt = 15;
size_t rem, nlo, nhi;
nlo = ((const uint8_t *)Xi)[15];
nhi = nlo >> 4;
nlo &= 0xf;
Z.hi = Htable[nlo].hi;
Z.lo = Htable[nlo].lo;
while (1) {
rem = (size_t)Z.lo & 0xf;
Z.lo = (Z.hi << 60) | (Z.lo >> 4);
Z.hi = (Z.hi >> 4);
if (sizeof(size_t) == 8) {
Z.hi ^= rem_4bit[rem];
} else {
Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
}
Z.hi ^= Htable[nhi].hi;
Z.lo ^= Htable[nhi].lo;
if (--cnt < 0) {
break;
}
nlo = ((const uint8_t *)Xi)[cnt];
nhi = nlo >> 4;
nlo &= 0xf;
rem = (size_t)Z.lo & 0xf;
Z.lo = (Z.hi << 60) | (Z.lo >> 4);
Z.hi = (Z.hi >> 4);
if (sizeof(size_t) == 8) {
Z.hi ^= rem_4bit[rem];
} else {
Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
}
Z.hi ^= Htable[nlo].hi;
Z.lo ^= Htable[nlo].lo;
}
Xi[0] = CRYPTO_bswap8(Z.hi);
Xi[1] = CRYPTO_bswap8(Z.lo);
}
// Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
// details... Compiler-generated code doesn't seem to give any
// performance improvement, at least not on x86[_64]. It's here
// mostly as reference and a placeholder for possible future
// non-trivial optimization[s]...
void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) {
u128 Z;
int cnt;
size_t rem, nlo, nhi;
do {
cnt = 15;
nlo = ((const uint8_t *)Xi)[15];
nlo ^= inp[15];
nhi = nlo >> 4;
nlo &= 0xf;
Z.hi = Htable[nlo].hi;
Z.lo = Htable[nlo].lo;
while (1) {
rem = (size_t)Z.lo & 0xf;
Z.lo = (Z.hi << 60) | (Z.lo >> 4);
Z.hi = (Z.hi >> 4);
if (sizeof(size_t) == 8) {
Z.hi ^= rem_4bit[rem];
} else {
Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
}
Z.hi ^= Htable[nhi].hi;
Z.lo ^= Htable[nhi].lo;
if (--cnt < 0) {
break;
}
nlo = ((const uint8_t *)Xi)[cnt];
nlo ^= inp[cnt];
nhi = nlo >> 4;
nlo &= 0xf;
rem = (size_t)Z.lo & 0xf;
Z.lo = (Z.hi << 60) | (Z.lo >> 4);
Z.hi = (Z.hi >> 4);
if (sizeof(size_t) == 8) {
Z.hi ^= rem_4bit[rem];
} else {
Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
}
Z.hi ^= Htable[nlo].hi;
Z.lo ^= Htable[nlo].lo;
}
Xi[0] = CRYPTO_bswap8(Z.hi);
Xi[1] = CRYPTO_bswap8(Z.lo);
} while (inp += 16, len -= 16);
}
#endif // !GHASH_ASM || AARCH64 || PPC64LE
#define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->gcm_key.Htable)
#define GHASH(ctx, in, len) \
gcm_ghash_4bit((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
// GHASH_CHUNK is "stride parameter" missioned to mitigate cache
// trashing effect. In other words idea is to hash data while it's
// still in L1 cache after encryption pass...
#define GHASH_CHUNK (3 * 1024)
#if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86)
Add a constant-time pshufb-based GHASH implementation. We currently require clmul instructions for constant-time GHASH on x86_64. Otherwise, it falls back to a variable-time 4-bit table implementation. However, a significant proportion of clients lack these instructions. Inspired by vpaes, we can use pshufb and a slightly different order of incorporating the bits to make a constant-time GHASH. This requires SSSE3, which is very common. Benchmarking old machines we had on hand, it appears to be a no-op on Sandy Bridge and a small slowdown for Penryn. Sandy Bridge (Intel Pentium CPU 987 @ 1.50GHz): (Note: these numbers are before 16-byte-aligning the table. That was an improvement on Penryn, so it's possible Sandy Bridge is now better.) Before: Did 4244750 AES-128-GCM (16 bytes) seal operations in 4015000us (1057222.9 ops/sec): 16.9 MB/s Did 442000 AES-128-GCM (1350 bytes) seal operations in 4016000us (110059.8 ops/sec): 148.6 MB/s Did 84000 AES-128-GCM (8192 bytes) seal operations in 4015000us (20921.5 ops/sec): 171.4 MB/s Did 3349250 AES-256-GCM (16 bytes) seal operations in 4016000us (833976.6 ops/sec): 13.3 MB/s Did 343500 AES-256-GCM (1350 bytes) seal operations in 4016000us (85532.9 ops/sec): 115.5 MB/s Did 65250 AES-256-GCM (8192 bytes) seal operations in 4015000us (16251.6 ops/sec): 133.1 MB/s After: Did 4229250 AES-128-GCM (16 bytes) seal operations in 4016000us (1053100.1 ops/sec): 16.8 MB/s [-0.4%] Did 442250 AES-128-GCM (1350 bytes) seal operations in 4016000us (110122.0 ops/sec): 148.7 MB/s [+0.1%] Did 83500 AES-128-GCM (8192 bytes) seal operations in 4015000us (20797.0 ops/sec): 170.4 MB/s [-0.6%] Did 3286500 AES-256-GCM (16 bytes) seal operations in 4016000us (818351.6 ops/sec): 13.1 MB/s [-1.9%] Did 342750 AES-256-GCM (1350 bytes) seal operations in 4015000us (85367.4 ops/sec): 115.2 MB/s [-0.2%] Did 65250 AES-256-GCM (8192 bytes) seal operations in 4016000us (16247.5 ops/sec): 133.1 MB/s [-0.0%] Penryn (Intel Core 2 Duo CPU P8600 @ 2.40GHz): Before: Did 1179000 AES-128-GCM (16 bytes) seal operations in 1000139us (1178836.1 ops/sec): 18.9 MB/s Did 97000 AES-128-GCM (1350 bytes) seal operations in 1006347us (96388.2 ops/sec): 130.1 MB/s Did 18000 AES-128-GCM (8192 bytes) seal operations in 1028943us (17493.7 ops/sec): 143.3 MB/s Did 977000 AES-256-GCM (16 bytes) seal operations in 1000197us (976807.6 ops/sec): 15.6 MB/s Did 82000 AES-256-GCM (1350 bytes) seal operations in 1012434us (80992.9 ops/sec): 109.3 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1006528us (14902.7 ops/sec): 122.1 MB/s After: Did 1306000 AES-128-GCM (16 bytes) seal operations in 1000153us (1305800.2 ops/sec): 20.9 MB/s [+10.8%] Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009852us (93082.9 ops/sec): 125.7 MB/s [-3.4%] Did 17000 AES-128-GCM (8192 bytes) seal operations in 1012096us (16796.8 ops/sec): 137.6 MB/s [-4.0%] Did 1070000 AES-256-GCM (16 bytes) seal operations in 1000929us (1069006.9 ops/sec): 17.1 MB/s [+9.4%] Did 79000 AES-256-GCM (1350 bytes) seal operations in 1002209us (78825.9 ops/sec): 106.4 MB/s [-2.7%] Did 15000 AES-256-GCM (8192 bytes) seal operations in 1061489us (14131.1 ops/sec): 115.8 MB/s [-5.2%] Change-Id: I1c3760a77af7bee4aee3745d1c648d9e34594afb Reviewed-on: https://boringssl-review.googlesource.com/c/34267 Commit-Queue: David Benjamin <davidben@google.com> Reviewed-by: Adam Langley <agl@google.com>
2019-01-09 03:35:56 +00:00
void gcm_init_ssse3(u128 Htable[16], const uint64_t Xi[2]) {
// Run the existing 4-bit version.
gcm_init_4bit(Htable, Xi);
// First, swap hi and lo. The "4bit" version places hi first. It treats the
// two fields separately, so the order does not matter, but ghash-ssse3 reads
// the entire state into one 128-bit register.
for (int i = 0; i < 16; i++) {
uint64_t tmp = Htable[i].hi;
Htable[i].hi = Htable[i].lo;
Htable[i].lo = tmp;
}
// Treat |Htable| as a 16x16 byte table and transpose it. Thus, Htable[i]
// contains the i'th byte of j*H for all j.
uint8_t *Hbytes = (uint8_t *)Htable;
for (int i = 0; i < 16; i++) {
for (int j = 0; j < i; j++) {
uint8_t tmp = Hbytes[16*i + j];
Hbytes[16*i + j] = Hbytes[16*j + i];
Hbytes[16*j + i] = tmp;
}
}
}
#endif // GHASH_ASM_X86_64 || GHASH_ASM_X86
Add a constant-time pshufb-based GHASH implementation. We currently require clmul instructions for constant-time GHASH on x86_64. Otherwise, it falls back to a variable-time 4-bit table implementation. However, a significant proportion of clients lack these instructions. Inspired by vpaes, we can use pshufb and a slightly different order of incorporating the bits to make a constant-time GHASH. This requires SSSE3, which is very common. Benchmarking old machines we had on hand, it appears to be a no-op on Sandy Bridge and a small slowdown for Penryn. Sandy Bridge (Intel Pentium CPU 987 @ 1.50GHz): (Note: these numbers are before 16-byte-aligning the table. That was an improvement on Penryn, so it's possible Sandy Bridge is now better.) Before: Did 4244750 AES-128-GCM (16 bytes) seal operations in 4015000us (1057222.9 ops/sec): 16.9 MB/s Did 442000 AES-128-GCM (1350 bytes) seal operations in 4016000us (110059.8 ops/sec): 148.6 MB/s Did 84000 AES-128-GCM (8192 bytes) seal operations in 4015000us (20921.5 ops/sec): 171.4 MB/s Did 3349250 AES-256-GCM (16 bytes) seal operations in 4016000us (833976.6 ops/sec): 13.3 MB/s Did 343500 AES-256-GCM (1350 bytes) seal operations in 4016000us (85532.9 ops/sec): 115.5 MB/s Did 65250 AES-256-GCM (8192 bytes) seal operations in 4015000us (16251.6 ops/sec): 133.1 MB/s After: Did 4229250 AES-128-GCM (16 bytes) seal operations in 4016000us (1053100.1 ops/sec): 16.8 MB/s [-0.4%] Did 442250 AES-128-GCM (1350 bytes) seal operations in 4016000us (110122.0 ops/sec): 148.7 MB/s [+0.1%] Did 83500 AES-128-GCM (8192 bytes) seal operations in 4015000us (20797.0 ops/sec): 170.4 MB/s [-0.6%] Did 3286500 AES-256-GCM (16 bytes) seal operations in 4016000us (818351.6 ops/sec): 13.1 MB/s [-1.9%] Did 342750 AES-256-GCM (1350 bytes) seal operations in 4015000us (85367.4 ops/sec): 115.2 MB/s [-0.2%] Did 65250 AES-256-GCM (8192 bytes) seal operations in 4016000us (16247.5 ops/sec): 133.1 MB/s [-0.0%] Penryn (Intel Core 2 Duo CPU P8600 @ 2.40GHz): Before: Did 1179000 AES-128-GCM (16 bytes) seal operations in 1000139us (1178836.1 ops/sec): 18.9 MB/s Did 97000 AES-128-GCM (1350 bytes) seal operations in 1006347us (96388.2 ops/sec): 130.1 MB/s Did 18000 AES-128-GCM (8192 bytes) seal operations in 1028943us (17493.7 ops/sec): 143.3 MB/s Did 977000 AES-256-GCM (16 bytes) seal operations in 1000197us (976807.6 ops/sec): 15.6 MB/s Did 82000 AES-256-GCM (1350 bytes) seal operations in 1012434us (80992.9 ops/sec): 109.3 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1006528us (14902.7 ops/sec): 122.1 MB/s After: Did 1306000 AES-128-GCM (16 bytes) seal operations in 1000153us (1305800.2 ops/sec): 20.9 MB/s [+10.8%] Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009852us (93082.9 ops/sec): 125.7 MB/s [-3.4%] Did 17000 AES-128-GCM (8192 bytes) seal operations in 1012096us (16796.8 ops/sec): 137.6 MB/s [-4.0%] Did 1070000 AES-256-GCM (16 bytes) seal operations in 1000929us (1069006.9 ops/sec): 17.1 MB/s [+9.4%] Did 79000 AES-256-GCM (1350 bytes) seal operations in 1002209us (78825.9 ops/sec): 106.4 MB/s [-2.7%] Did 15000 AES-256-GCM (8192 bytes) seal operations in 1061489us (14131.1 ops/sec): 115.8 MB/s [-5.2%] Change-Id: I1c3760a77af7bee4aee3745d1c648d9e34594afb Reviewed-on: https://boringssl-review.googlesource.com/c/34267 Commit-Queue: David Benjamin <davidben@google.com> Reviewed-by: Adam Langley <agl@google.com>
2019-01-09 03:35:56 +00:00
#ifdef GCM_FUNCREF_4BIT
#undef GCM_MUL
#define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable)
#undef GHASH
#define GHASH(ctx, in, len) \
(*gcm_ghash_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
#endif // GCM_FUNCREF_4BIT
void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
u128 *out_key, u128 out_table[16], int *out_is_avx,
const uint8_t gcm_key[16]) {
*out_is_avx = 0;
union {
uint64_t u[2];
uint8_t c[16];
} H;
OPENSSL_memcpy(H.c, gcm_key, 16);
// H is stored in host byte order
H.u[0] = CRYPTO_bswap8(H.u[0]);
H.u[1] = CRYPTO_bswap8(H.u[1]);
OPENSSL_memcpy(out_key, H.c, 16);
#if defined(GHASH_ASM_X86_64)
if (crypto_gcm_clmul_enabled()) {
if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { // AVX+MOVBE
gcm_init_avx(out_table, H.u);
*out_mult = gcm_gmult_avx;
*out_hash = gcm_ghash_avx;
*out_is_avx = 1;
return;
}
gcm_init_clmul(out_table, H.u);
*out_mult = gcm_gmult_clmul;
*out_hash = gcm_ghash_clmul;
return;
}
Add a constant-time pshufb-based GHASH implementation. We currently require clmul instructions for constant-time GHASH on x86_64. Otherwise, it falls back to a variable-time 4-bit table implementation. However, a significant proportion of clients lack these instructions. Inspired by vpaes, we can use pshufb and a slightly different order of incorporating the bits to make a constant-time GHASH. This requires SSSE3, which is very common. Benchmarking old machines we had on hand, it appears to be a no-op on Sandy Bridge and a small slowdown for Penryn. Sandy Bridge (Intel Pentium CPU 987 @ 1.50GHz): (Note: these numbers are before 16-byte-aligning the table. That was an improvement on Penryn, so it's possible Sandy Bridge is now better.) Before: Did 4244750 AES-128-GCM (16 bytes) seal operations in 4015000us (1057222.9 ops/sec): 16.9 MB/s Did 442000 AES-128-GCM (1350 bytes) seal operations in 4016000us (110059.8 ops/sec): 148.6 MB/s Did 84000 AES-128-GCM (8192 bytes) seal operations in 4015000us (20921.5 ops/sec): 171.4 MB/s Did 3349250 AES-256-GCM (16 bytes) seal operations in 4016000us (833976.6 ops/sec): 13.3 MB/s Did 343500 AES-256-GCM (1350 bytes) seal operations in 4016000us (85532.9 ops/sec): 115.5 MB/s Did 65250 AES-256-GCM (8192 bytes) seal operations in 4015000us (16251.6 ops/sec): 133.1 MB/s After: Did 4229250 AES-128-GCM (16 bytes) seal operations in 4016000us (1053100.1 ops/sec): 16.8 MB/s [-0.4%] Did 442250 AES-128-GCM (1350 bytes) seal operations in 4016000us (110122.0 ops/sec): 148.7 MB/s [+0.1%] Did 83500 AES-128-GCM (8192 bytes) seal operations in 4015000us (20797.0 ops/sec): 170.4 MB/s [-0.6%] Did 3286500 AES-256-GCM (16 bytes) seal operations in 4016000us (818351.6 ops/sec): 13.1 MB/s [-1.9%] Did 342750 AES-256-GCM (1350 bytes) seal operations in 4015000us (85367.4 ops/sec): 115.2 MB/s [-0.2%] Did 65250 AES-256-GCM (8192 bytes) seal operations in 4016000us (16247.5 ops/sec): 133.1 MB/s [-0.0%] Penryn (Intel Core 2 Duo CPU P8600 @ 2.40GHz): Before: Did 1179000 AES-128-GCM (16 bytes) seal operations in 1000139us (1178836.1 ops/sec): 18.9 MB/s Did 97000 AES-128-GCM (1350 bytes) seal operations in 1006347us (96388.2 ops/sec): 130.1 MB/s Did 18000 AES-128-GCM (8192 bytes) seal operations in 1028943us (17493.7 ops/sec): 143.3 MB/s Did 977000 AES-256-GCM (16 bytes) seal operations in 1000197us (976807.6 ops/sec): 15.6 MB/s Did 82000 AES-256-GCM (1350 bytes) seal operations in 1012434us (80992.9 ops/sec): 109.3 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1006528us (14902.7 ops/sec): 122.1 MB/s After: Did 1306000 AES-128-GCM (16 bytes) seal operations in 1000153us (1305800.2 ops/sec): 20.9 MB/s [+10.8%] Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009852us (93082.9 ops/sec): 125.7 MB/s [-3.4%] Did 17000 AES-128-GCM (8192 bytes) seal operations in 1012096us (16796.8 ops/sec): 137.6 MB/s [-4.0%] Did 1070000 AES-256-GCM (16 bytes) seal operations in 1000929us (1069006.9 ops/sec): 17.1 MB/s [+9.4%] Did 79000 AES-256-GCM (1350 bytes) seal operations in 1002209us (78825.9 ops/sec): 106.4 MB/s [-2.7%] Did 15000 AES-256-GCM (8192 bytes) seal operations in 1061489us (14131.1 ops/sec): 115.8 MB/s [-5.2%] Change-Id: I1c3760a77af7bee4aee3745d1c648d9e34594afb Reviewed-on: https://boringssl-review.googlesource.com/c/34267 Commit-Queue: David Benjamin <davidben@google.com> Reviewed-by: Adam Langley <agl@google.com>
2019-01-09 03:35:56 +00:00
if (gcm_ssse3_capable()) {
gcm_init_ssse3(out_table, H.u);
*out_mult = gcm_gmult_ssse3;
*out_hash = gcm_ghash_ssse3;
return;
}
#elif defined(GHASH_ASM_X86)
if (crypto_gcm_clmul_enabled()) {
gcm_init_clmul(out_table, H.u);
*out_mult = gcm_gmult_clmul;
*out_hash = gcm_ghash_clmul;
return;
}
if (gcm_ssse3_capable()) {
gcm_init_ssse3(out_table, H.u);
*out_mult = gcm_gmult_ssse3;
*out_hash = gcm_ghash_ssse3;
return;
}
#elif defined(GHASH_ASM_ARM)
if (gcm_pmull_capable()) {
gcm_init_v8(out_table, H.u);
*out_mult = gcm_gmult_v8;
*out_hash = gcm_ghash_v8;
return;
}
if (gcm_neon_capable()) {
gcm_init_neon(out_table, H.u);
*out_mult = gcm_gmult_neon;
*out_hash = gcm_ghash_neon;
return;
}
Add PPC64LE assembly for AES-GCM. This change adds AES and GHASH assembly from upstream, with the aim of speeding up AES-GCM. The PPC64LE assembly matches the interface of the ARMv8 assembly so I've changed the prefix of both sets of asm functions to be the same ("aes_hw_"). Otherwise, the new assmebly files and Perlasm match exactly those from upstream's c536b6be1a (from their master branch). Before: Did 1879000 AES-128-GCM (16 bytes) seal operations in 1000428us (1878196.1 ops/sec): 30.1 MB/s Did 61000 AES-128-GCM (1350 bytes) seal operations in 1006660us (60596.4 ops/sec): 81.8 MB/s Did 11000 AES-128-GCM (8192 bytes) seal operations in 1072649us (10255.0 ops/sec): 84.0 MB/s Did 1665000 AES-256-GCM (16 bytes) seal operations in 1000591us (1664016.6 ops/sec): 26.6 MB/s Did 52000 AES-256-GCM (1350 bytes) seal operations in 1006971us (51640.0 ops/sec): 69.7 MB/s Did 8840 AES-256-GCM (8192 bytes) seal operations in 1013294us (8724.0 ops/sec): 71.5 MB/s After: Did 4994000 AES-128-GCM (16 bytes) seal operations in 1000017us (4993915.1 ops/sec): 79.9 MB/s Did 1389000 AES-128-GCM (1350 bytes) seal operations in 1000073us (1388898.6 ops/sec): 1875.0 MB/s Did 319000 AES-128-GCM (8192 bytes) seal operations in 1000101us (318967.8 ops/sec): 2613.0 MB/s Did 4668000 AES-256-GCM (16 bytes) seal operations in 1000149us (4667304.6 ops/sec): 74.7 MB/s Did 1202000 AES-256-GCM (1350 bytes) seal operations in 1000646us (1201224.0 ops/sec): 1621.7 MB/s Did 269000 AES-256-GCM (8192 bytes) seal operations in 1002804us (268247.8 ops/sec): 2197.5 MB/s Change-Id: Id848562bd4e1aa79a4683012501dfa5e6c08cfcc Reviewed-on: https://boringssl-review.googlesource.com/11262 Reviewed-by: Adam Langley <agl@google.com> Commit-Queue: Adam Langley <agl@google.com> CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
2016-09-23 20:47:24 +01:00
#elif defined(GHASH_ASM_PPC64LE)
if (CRYPTO_is_PPC64LE_vcrypto_capable()) {
gcm_init_p8(out_table, H.u);
*out_mult = gcm_gmult_p8;
*out_hash = gcm_ghash_p8;
return;
Add PPC64LE assembly for AES-GCM. This change adds AES and GHASH assembly from upstream, with the aim of speeding up AES-GCM. The PPC64LE assembly matches the interface of the ARMv8 assembly so I've changed the prefix of both sets of asm functions to be the same ("aes_hw_"). Otherwise, the new assmebly files and Perlasm match exactly those from upstream's c536b6be1a (from their master branch). Before: Did 1879000 AES-128-GCM (16 bytes) seal operations in 1000428us (1878196.1 ops/sec): 30.1 MB/s Did 61000 AES-128-GCM (1350 bytes) seal operations in 1006660us (60596.4 ops/sec): 81.8 MB/s Did 11000 AES-128-GCM (8192 bytes) seal operations in 1072649us (10255.0 ops/sec): 84.0 MB/s Did 1665000 AES-256-GCM (16 bytes) seal operations in 1000591us (1664016.6 ops/sec): 26.6 MB/s Did 52000 AES-256-GCM (1350 bytes) seal operations in 1006971us (51640.0 ops/sec): 69.7 MB/s Did 8840 AES-256-GCM (8192 bytes) seal operations in 1013294us (8724.0 ops/sec): 71.5 MB/s After: Did 4994000 AES-128-GCM (16 bytes) seal operations in 1000017us (4993915.1 ops/sec): 79.9 MB/s Did 1389000 AES-128-GCM (1350 bytes) seal operations in 1000073us (1388898.6 ops/sec): 1875.0 MB/s Did 319000 AES-128-GCM (8192 bytes) seal operations in 1000101us (318967.8 ops/sec): 2613.0 MB/s Did 4668000 AES-256-GCM (16 bytes) seal operations in 1000149us (4667304.6 ops/sec): 74.7 MB/s Did 1202000 AES-256-GCM (1350 bytes) seal operations in 1000646us (1201224.0 ops/sec): 1621.7 MB/s Did 269000 AES-256-GCM (8192 bytes) seal operations in 1002804us (268247.8 ops/sec): 2197.5 MB/s Change-Id: Id848562bd4e1aa79a4683012501dfa5e6c08cfcc Reviewed-on: https://boringssl-review.googlesource.com/11262 Reviewed-by: Adam Langley <agl@google.com> Commit-Queue: Adam Langley <agl@google.com> CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
2016-09-23 20:47:24 +01:00
}
#endif
gcm_init_4bit(out_table, H.u);
#if defined(GHASH_ASM_X86)
*out_mult = gcm_gmult_4bit_mmx;
*out_hash = gcm_ghash_4bit_mmx;
#else
*out_mult = gcm_gmult_4bit;
*out_hash = gcm_ghash_4bit;
#endif
}
void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, const AES_KEY *aes_key,
block128_f block, int block_is_hwaes) {
OPENSSL_memset(gcm_key, 0, sizeof(*gcm_key));
gcm_key->block = block;
uint8_t ghash_key[16];
OPENSSL_memset(ghash_key, 0, sizeof(ghash_key));
(*block)(ghash_key, ghash_key, aes_key);
int is_avx;
CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, &gcm_key->H,
gcm_key->Htable, &is_avx, ghash_key);
gcm_key->use_aesni_gcm_crypt = (is_avx && block_is_hwaes) ? 1 : 0;
}
void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const AES_KEY *key,
const uint8_t *iv, size_t len) {
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
#endif
ctx->Yi.u[0] = 0;
ctx->Yi.u[1] = 0;
ctx->Xi.u[0] = 0;
ctx->Xi.u[1] = 0;
ctx->len.u[0] = 0; // AAD length
ctx->len.u[1] = 0; // message length
ctx->ares = 0;
ctx->mres = 0;
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint32_t ctr;
if (len == 12) {
OPENSSL_memcpy(ctx->Yi.c, iv, 12);
ctx->Yi.c[15] = 1;
ctr = 1;
} else {
uint64_t len0 = len;
while (len >= 16) {
for (size_t i = 0; i < 16; ++i) {
ctx->Yi.c[i] ^= iv[i];
}
GCM_MUL(ctx, Yi);
iv += 16;
len -= 16;
}
if (len) {
for (size_t i = 0; i < len; ++i) {
ctx->Yi.c[i] ^= iv[i];
}
GCM_MUL(ctx, Yi);
}
len0 <<= 3;
ctx->Yi.u[1] ^= CRYPTO_bswap8(len0);
GCM_MUL(ctx, Yi);
ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
}
(*ctx->gcm_key.block)(ctx->Yi.c, ctx->EK0.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
}
int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) {
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) = ctx->gcm_key.ghash;
#endif
#endif
if (ctx->len.u[1]) {
return 0;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint64_t alen = ctx->len.u[0] + len;
if (alen > (UINT64_C(1) << 61) || (sizeof(len) == 8 && alen < len)) {
return 0;
}
ctx->len.u[0] = alen;
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
unsigned n = ctx->ares;
if (n) {
while (n && len) {
ctx->Xi.c[n] ^= *(aad++);
--len;
n = (n + 1) % 16;
}
if (n == 0) {
GCM_MUL(ctx, Xi);
} else {
ctx->ares = n;
return 1;
}
}
// Process a whole number of blocks.
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
GHASH(ctx, aad, len_blocks);
aad += len_blocks;
len -= len_blocks;
}
// Process the remainder.
if (len != 0) {
n = (unsigned int)len;
for (size_t i = 0; i < len; ++i) {
ctx->Xi.c[i] ^= aad[i];
}
}
ctx->ares = n;
return 1;
}
int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
Appease UBSan on pointer alignment. Even without strict-aliasing, C does not allow casting pointers to types that don't match their alignment. After this change, UBSan is happy with our code at default settings but for the negative left shift language bug. Note: architectures without unaligned loads do not generate the same code for memcpy and pointer casts. But even ARMv6 can perform unaligned loads and stores (ARMv5 couldn't), so we should be okay here. Before: Did 11086000 AES-128-GCM (16 bytes) seal operations in 5000391us (2217026.6 ops/sec): 35.5 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005208us (73923.0 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5029958us (12525.0 ops/sec): 102.6 MB/s Did 9894000 AES-256-GCM (16 bytes) seal operations in 5000017us (1978793.3 ops/sec): 31.7 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005564us (63129.7 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5054156us (10684.3 ops/sec): 87.5 MB/s After: Did 11026000 AES-128-GCM (16 bytes) seal operations in 5000197us (2205113.1 ops/sec): 35.3 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005781us (73914.5 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5032695us (12518.1 ops/sec): 102.5 MB/s Did 9831750 AES-256-GCM (16 bytes) seal operations in 5000010us (1966346.1 ops/sec): 31.5 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005702us (63128.0 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5053642us (10685.4 ops/sec): 87.5 MB/s (Tested with the no-asm builds; most of this code isn't reachable otherwise.) Change-Id: I025c365d26491abed0116b0de3b7612159e52297 Reviewed-on: https://boringssl-review.googlesource.com/22804 Reviewed-by: Adam Langley <agl@google.com>
2017-11-07 22:24:10 +00:00
const uint8_t *in, uint8_t *out, size_t len) {
block128_f block = ctx->gcm_key.block;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) = ctx->gcm_key.ghash;
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint64_t mlen = ctx->len.u[1] + len;
if (mlen > ((UINT64_C(1) << 36) - 32) ||
(sizeof(len) == 8 && mlen < len)) {
return 0;
}
ctx->len.u[1] = mlen;
if (ctx->ares) {
// First call to encrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
unsigned n = ctx->mres;
if (n) {
while (n && len) {
ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
--len;
n = (n + 1) % 16;
}
if (n == 0) {
GCM_MUL(ctx, Xi);
} else {
ctx->mres = n;
return 1;
}
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint32_t ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
while (j) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Appease UBSan on pointer alignment. Even without strict-aliasing, C does not allow casting pointers to types that don't match their alignment. After this change, UBSan is happy with our code at default settings but for the negative left shift language bug. Note: architectures without unaligned loads do not generate the same code for memcpy and pointer casts. But even ARMv6 can perform unaligned loads and stores (ARMv5 couldn't), so we should be okay here. Before: Did 11086000 AES-128-GCM (16 bytes) seal operations in 5000391us (2217026.6 ops/sec): 35.5 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005208us (73923.0 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5029958us (12525.0 ops/sec): 102.6 MB/s Did 9894000 AES-256-GCM (16 bytes) seal operations in 5000017us (1978793.3 ops/sec): 31.7 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005564us (63129.7 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5054156us (10684.3 ops/sec): 87.5 MB/s After: Did 11026000 AES-128-GCM (16 bytes) seal operations in 5000197us (2205113.1 ops/sec): 35.3 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005781us (73914.5 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5032695us (12518.1 ops/sec): 102.5 MB/s Did 9831750 AES-256-GCM (16 bytes) seal operations in 5000010us (1966346.1 ops/sec): 31.5 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005702us (63128.0 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5053642us (10685.4 ops/sec): 87.5 MB/s (Tested with the no-asm builds; most of this code isn't reachable otherwise.) Change-Id: I025c365d26491abed0116b0de3b7612159e52297 Reviewed-on: https://boringssl-review.googlesource.com/22804 Reviewed-by: Adam Langley <agl@google.com>
2017-11-07 22:24:10 +00:00
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
}
out += 16;
in += 16;
j -= 16;
}
GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
len -= GHASH_CHUNK;
}
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
while (len >= 16) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Appease UBSan on pointer alignment. Even without strict-aliasing, C does not allow casting pointers to types that don't match their alignment. After this change, UBSan is happy with our code at default settings but for the negative left shift language bug. Note: architectures without unaligned loads do not generate the same code for memcpy and pointer casts. But even ARMv6 can perform unaligned loads and stores (ARMv5 couldn't), so we should be okay here. Before: Did 11086000 AES-128-GCM (16 bytes) seal operations in 5000391us (2217026.6 ops/sec): 35.5 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005208us (73923.0 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5029958us (12525.0 ops/sec): 102.6 MB/s Did 9894000 AES-256-GCM (16 bytes) seal operations in 5000017us (1978793.3 ops/sec): 31.7 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005564us (63129.7 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5054156us (10684.3 ops/sec): 87.5 MB/s After: Did 11026000 AES-128-GCM (16 bytes) seal operations in 5000197us (2205113.1 ops/sec): 35.3 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005781us (73914.5 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5032695us (12518.1 ops/sec): 102.5 MB/s Did 9831750 AES-256-GCM (16 bytes) seal operations in 5000010us (1966346.1 ops/sec): 31.5 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005702us (63128.0 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5053642us (10685.4 ops/sec): 87.5 MB/s (Tested with the no-asm builds; most of this code isn't reachable otherwise.) Change-Id: I025c365d26491abed0116b0de3b7612159e52297 Reviewed-on: https://boringssl-review.googlesource.com/22804 Reviewed-by: Adam Langley <agl@google.com>
2017-11-07 22:24:10 +00:00
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
}
out += 16;
in += 16;
len -= 16;
}
GHASH(ctx, out - len_blocks, len_blocks);
}
if (len) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
++n;
}
}
ctx->mres = n;
return 1;
}
int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
const unsigned char *in, unsigned char *out,
size_t len) {
block128_f block = ctx->gcm_key.block;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) = ctx->gcm_key.ghash;
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint64_t mlen = ctx->len.u[1] + len;
if (mlen > ((UINT64_C(1) << 36) - 32) ||
(sizeof(len) == 8 && mlen < len)) {
return 0;
}
ctx->len.u[1] = mlen;
if (ctx->ares) {
// First call to decrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
unsigned n = ctx->mres;
if (n) {
while (n && len) {
uint8_t c = *(in++);
*(out++) = c ^ ctx->EKi.c[n];
ctx->Xi.c[n] ^= c;
--len;
n = (n + 1) % 16;
}
if (n == 0) {
GCM_MUL(ctx, Xi);
} else {
ctx->mres = n;
return 1;
}
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint32_t ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
GHASH(ctx, in, GHASH_CHUNK);
while (j) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Appease UBSan on pointer alignment. Even without strict-aliasing, C does not allow casting pointers to types that don't match their alignment. After this change, UBSan is happy with our code at default settings but for the negative left shift language bug. Note: architectures without unaligned loads do not generate the same code for memcpy and pointer casts. But even ARMv6 can perform unaligned loads and stores (ARMv5 couldn't), so we should be okay here. Before: Did 11086000 AES-128-GCM (16 bytes) seal operations in 5000391us (2217026.6 ops/sec): 35.5 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005208us (73923.0 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5029958us (12525.0 ops/sec): 102.6 MB/s Did 9894000 AES-256-GCM (16 bytes) seal operations in 5000017us (1978793.3 ops/sec): 31.7 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005564us (63129.7 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5054156us (10684.3 ops/sec): 87.5 MB/s After: Did 11026000 AES-128-GCM (16 bytes) seal operations in 5000197us (2205113.1 ops/sec): 35.3 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005781us (73914.5 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5032695us (12518.1 ops/sec): 102.5 MB/s Did 9831750 AES-256-GCM (16 bytes) seal operations in 5000010us (1966346.1 ops/sec): 31.5 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005702us (63128.0 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5053642us (10685.4 ops/sec): 87.5 MB/s (Tested with the no-asm builds; most of this code isn't reachable otherwise.) Change-Id: I025c365d26491abed0116b0de3b7612159e52297 Reviewed-on: https://boringssl-review.googlesource.com/22804 Reviewed-by: Adam Langley <agl@google.com>
2017-11-07 22:24:10 +00:00
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
}
out += 16;
in += 16;
j -= 16;
}
len -= GHASH_CHUNK;
}
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
GHASH(ctx, in, len_blocks);
while (len >= 16) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Appease UBSan on pointer alignment. Even without strict-aliasing, C does not allow casting pointers to types that don't match their alignment. After this change, UBSan is happy with our code at default settings but for the negative left shift language bug. Note: architectures without unaligned loads do not generate the same code for memcpy and pointer casts. But even ARMv6 can perform unaligned loads and stores (ARMv5 couldn't), so we should be okay here. Before: Did 11086000 AES-128-GCM (16 bytes) seal operations in 5000391us (2217026.6 ops/sec): 35.5 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005208us (73923.0 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5029958us (12525.0 ops/sec): 102.6 MB/s Did 9894000 AES-256-GCM (16 bytes) seal operations in 5000017us (1978793.3 ops/sec): 31.7 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005564us (63129.7 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5054156us (10684.3 ops/sec): 87.5 MB/s After: Did 11026000 AES-128-GCM (16 bytes) seal operations in 5000197us (2205113.1 ops/sec): 35.3 MB/s Did 370000 AES-128-GCM (1350 bytes) seal operations in 5005781us (73914.5 ops/sec): 99.8 MB/s Did 63000 AES-128-GCM (8192 bytes) seal operations in 5032695us (12518.1 ops/sec): 102.5 MB/s Did 9831750 AES-256-GCM (16 bytes) seal operations in 5000010us (1966346.1 ops/sec): 31.5 MB/s Did 316000 AES-256-GCM (1350 bytes) seal operations in 5005702us (63128.0 ops/sec): 85.2 MB/s Did 54000 AES-256-GCM (8192 bytes) seal operations in 5053642us (10685.4 ops/sec): 87.5 MB/s (Tested with the no-asm builds; most of this code isn't reachable otherwise.) Change-Id: I025c365d26491abed0116b0de3b7612159e52297 Reviewed-on: https://boringssl-review.googlesource.com/22804 Reviewed-by: Adam Langley <agl@google.com>
2017-11-07 22:24:10 +00:00
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
}
out += 16;
in += 16;
len -= 16;
}
}
if (len) {
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
uint8_t c = in[n];
ctx->Xi.c[n] ^= c;
out[n] = c ^ ctx->EKi.c[n];
++n;
}
}
ctx->mres = n;
return 1;
}
int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key,
const uint8_t *in, uint8_t *out, size_t len,
ctr128_f stream) {
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) = ctx->gcm_key.ghash;
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint64_t mlen = ctx->len.u[1] + len;
if (mlen > ((UINT64_C(1) << 36) - 32) ||
(sizeof(len) == 8 && mlen < len)) {
return 0;
}
ctx->len.u[1] = mlen;
if (ctx->ares) {
// First call to encrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
unsigned n = ctx->mres;
if (n) {
while (n && len) {
ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
--len;
n = (n + 1) % 16;
}
if (n == 0) {
GCM_MUL(ctx, Xi);
} else {
ctx->mres = n;
return 1;
}
}
#if defined(AESNI_GCM)
if (ctx->gcm_key.use_aesni_gcm_crypt) {
// |aesni_gcm_encrypt| may not process all the input given to it. It may
// not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
in += bulk;
out += bulk;
len -= bulk;
}
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint32_t ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
while (len >= GHASH_CHUNK) {
(*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
GHASH(ctx, out, GHASH_CHUNK);
out += GHASH_CHUNK;
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
size_t j = len_blocks / 16;
(*stream)(in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
in += len_blocks;
len -= len_blocks;
GHASH(ctx, out, len_blocks);
out += len_blocks;
}
if (len) {
(*ctx->gcm_key.block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
++n;
}
}
ctx->mres = n;
return 1;
}
int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const AES_KEY *key,
const uint8_t *in, uint8_t *out, size_t len,
ctr128_f stream) {
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len) = ctx->gcm_key.ghash;
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint64_t mlen = ctx->len.u[1] + len;
if (mlen > ((UINT64_C(1) << 36) - 32) ||
(sizeof(len) == 8 && mlen < len)) {
return 0;
}
ctx->len.u[1] = mlen;
if (ctx->ares) {
// First call to decrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
unsigned n = ctx->mres;
if (n) {
while (n && len) {
uint8_t c = *(in++);
*(out++) = c ^ ctx->EKi.c[n];
ctx->Xi.c[n] ^= c;
--len;
n = (n + 1) % 16;
}
if (n == 0) {
GCM_MUL(ctx, Xi);
} else {
ctx->mres = n;
return 1;
}
}
#if defined(AESNI_GCM)
if (ctx->gcm_key.use_aesni_gcm_crypt) {
// |aesni_gcm_decrypt| may not process all the input given to it. It may
// not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
in += bulk;
out += bulk;
len -= bulk;
}
#endif
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
uint32_t ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
while (len >= GHASH_CHUNK) {
GHASH(ctx, in, GHASH_CHUNK);
(*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
out += GHASH_CHUNK;
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
size_t j = len_blocks / 16;
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
GHASH(ctx, in, len_blocks);
(*stream)(in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
out += len_blocks;
in += len_blocks;
len -= len_blocks;
}
if (len) {
(*ctx->gcm_key.block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
uint8_t c = in[n];
ctx->Xi.c[n] ^= c;
out[n] = c ^ ctx->EKi.c[n];
++n;
}
}
ctx->mres = n;
return 1;
}
int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len) {
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
ctx->gcm_key.gmult;
#endif
if (ctx->mres || ctx->ares) {
GCM_MUL(ctx, Xi);
}
Always define GHASH. There is a C implementation of gcm_ghash_4bit to pair with gcm_gmult_4bit. It's even slightly faster per the numbers below (x86_64 OPENSSL_NO_ASM build), but, more importantly, we trim down the combinatorial explosion of GCM implementations and free up complexity budget for potentially using bsaes better in the future. Old: Did 2557000 AES-128-GCM (16 bytes) seal operations in 1000057us (2556854.3 ops/sec): 40.9 MB/s Did 94000 AES-128-GCM (1350 bytes) seal operations in 1009613us (93105.0 ops/sec): 125.7 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1024768us (16589.1 ops/sec): 135.9 MB/s Did 2511000 AES-256-GCM (16 bytes) seal operations in 1000196us (2510507.9 ops/sec): 40.2 MB/s Did 84000 AES-256-GCM (1350 bytes) seal operations in 1000412us (83965.4 ops/sec): 113.4 MB/s Did 15000 AES-256-GCM (8192 bytes) seal operations in 1046963us (14327.2 ops/sec): 117.4 MB/s New: Did 2739000 AES-128-GCM (16 bytes) seal operations in 1000322us (2738118.3 ops/sec): 43.8 MB/s Did 100000 AES-128-GCM (1350 bytes) seal operations in 1008190us (99187.7 ops/sec): 133.9 MB/s Did 17000 AES-128-GCM (8192 bytes) seal operations in 1006360us (16892.6 ops/sec): 138.4 MB/s Did 2546000 AES-256-GCM (16 bytes) seal operations in 1000150us (2545618.2 ops/sec): 40.7 MB/s Did 86000 AES-256-GCM (1350 bytes) seal operations in 1000970us (85916.7 ops/sec): 116.0 MB/s Did 14850 AES-256-GCM (8192 bytes) seal operations in 1023459us (14509.6 ops/sec): 118.9 MB/s While I'm here, tighten up some of the functions and align the ctr32 and non-ctr32 paths. Bug: 256 Change-Id: Id4df699cefc8630dd5a350d44f927900340f5e60 Reviewed-on: https://boringssl-review.googlesource.com/c/34869 Reviewed-by: Adam Langley <agl@google.com>
2019-02-02 19:06:06 +00:00
ctx->Xi.u[0] ^= CRYPTO_bswap8(ctx->len.u[0] << 3);
ctx->Xi.u[1] ^= CRYPTO_bswap8(ctx->len.u[1] << 3);
GCM_MUL(ctx, Xi);
ctx->Xi.u[0] ^= ctx->EK0.u[0];
ctx->Xi.u[1] ^= ctx->EK0.u[1];
if (tag && len <= sizeof(ctx->Xi)) {
return CRYPTO_memcmp(ctx->Xi.c, tag, len) == 0;
} else {
return 0;
}
}
void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) {
CRYPTO_gcm128_finish(ctx, NULL, 0);
OPENSSL_memcpy(tag, ctx->Xi.c,
len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
}
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
int crypto_gcm_clmul_enabled(void) {
#ifdef GHASH_ASM
const uint32_t *ia32cap = OPENSSL_ia32cap_get();
return (ia32cap[0] & (1 << 24)) && // check FXSR bit
(ia32cap[1] & (1 << 1)); // check PCLMULQDQ bit
#else
return 0;
#endif
}
#endif