diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 8fb78f1a..d25581e0 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -74,6 +74,8 @@ add_subdirectory(aes) add_subdirectory(des) add_subdirectory(rc4) add_subdirectory(conf) +add_subdirectory(chacha) +add_subdirectory(poly1305) # Level 1, depends only on 0.* add_subdirectory(digest) @@ -134,6 +136,8 @@ add_library( $ $ $ + $ + $ $ $ $ diff --git a/crypto/chacha/CMakeLists.txt b/crypto/chacha/CMakeLists.txt new file mode 100644 index 00000000..d23ecb1d --- /dev/null +++ b/crypto/chacha/CMakeLists.txt @@ -0,0 +1,20 @@ +include_directories(. .. ../../include) + +if (${ARCH} STREQUAL "arm") + set( + CHACHA_ARCH_SOURCES + + chacha_vec_arm.S + ) +endif() + +add_library( + chacha + + OBJECT + + chacha_generic.c + chacha_vec.c + + ${CHACHA_ARCH_SOURCES} +) diff --git a/crypto/chacha/chacha.h b/crypto/chacha/chacha.h new file mode 100644 index 00000000..ce53d49f --- /dev/null +++ b/crypto/chacha/chacha.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_CHACHA_H +#define OPENSSL_HEADER_CHACHA_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and + * nonce and writes the result to |out|, which may be equal to |in|. The + * initial block counter is specified by |counter|. */ +void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, + size_t in_len, const uint8_t key[32], + const uint8_t nonce[8], size_t counter); + + +#if defined(__cplusplus) +} /* extern C */ +#endif + +#endif /* OPENSSL_HEADER_CHACHA_H */ diff --git a/crypto/chacha/chacha_generic.c b/crypto/chacha/chacha_generic.c new file mode 100644 index 00000000..1e5b70d3 --- /dev/null +++ b/crypto/chacha/chacha_generic.c @@ -0,0 +1,141 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* Adapted from the public domain, estream code by D. Bernstein. */ + +#include + +#include + +#if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) && !defined(OPENSSL_X86) + +/* sigma contains the ChaCha constants, which happen to be an ASCII string. */ +static const char sigma[16] = "expand 32-byte k"; + +#define ROTATE(v, n) (((v) << (n)) | ((v) >> (32 - (n)))) +#define XOR(v, w) ((v) ^ (w)) +#define PLUS(x, y) ((x) + (y)) +#define PLUSONE(v) (PLUS((v), 1)) + +#define U32TO8_LITTLE(p, v) \ + { \ + (p)[0] = (v >> 0) & 0xff; \ + (p)[1] = (v >> 8) & 0xff; \ + (p)[2] = (v >> 16) & 0xff; \ + (p)[3] = (v >> 24) & 0xff; \ + } + +#define U8TO32_LITTLE(p) \ + (((uint32_t)((p)[0])) | ((uint32_t)((p)[1]) << 8) | \ + ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) + +/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */ +#define QUARTERROUND(a,b,c,d) \ + x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \ + x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \ + x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \ + x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7); + +#if defined(OPENSSL_ARM) +/* Defined in chacha_vec.c */ +void CRYPTO_chacha_20_neon(uint8_t *out, const uint8_t *in, size_t in_len, + const uint8_t key[32], const uint8_t nonce[8], + size_t counter); +#endif + +/* chacha_core performs |num_rounds| rounds of ChaCha20 on the input words in + * |input| and writes the 64 output bytes to |output|. */ +static void chacha_core(uint8_t output[64], const uint32_t input[16], + int num_rounds) { + uint32_t x[16]; + int i; + + memcpy(x, input, sizeof(uint32_t) * 16); + for (i = 20; i > 0; i -= 2) { + QUARTERROUND(0, 4, 8, 12) + QUARTERROUND(1, 5, 9, 13) + QUARTERROUND(2, 6, 10, 14) + QUARTERROUND(3, 7, 11, 15) + QUARTERROUND(0, 5, 10, 15) + QUARTERROUND(1, 6, 11, 12) + QUARTERROUND(2, 7, 8, 13) + QUARTERROUND(3, 4, 9, 14) + } + + for (i = 0; i < 16; ++i) { + x[i] = PLUS(x[i], input[i]); + } + for (i = 0; i < 16; ++i) { + U32TO8_LITTLE(output + 4 * i, x[i]); + } +} + +void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, + const uint8_t key[32], const uint8_t nonce[8], + size_t counter) { + uint32_t input[16]; + uint8_t buf[64]; + size_t todo, i; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable() && ((intptr_t)in & 15) == 0 && + ((intptr_t)out & 15) == 0) { + CRYPTO_chacha_20_neon(out, in, in_len, key, nonce, counter); + return; + } +#endif + + input[0] = U8TO32_LITTLE(sigma + 0); + input[1] = U8TO32_LITTLE(sigma + 4); + input[2] = U8TO32_LITTLE(sigma + 8); + input[3] = U8TO32_LITTLE(sigma + 12); + + input[4] = U8TO32_LITTLE(key + 0); + input[5] = U8TO32_LITTLE(key + 4); + input[6] = U8TO32_LITTLE(key + 8); + input[7] = U8TO32_LITTLE(key + 12); + + input[8] = U8TO32_LITTLE(key + 16); + input[9] = U8TO32_LITTLE(key + 20); + input[10] = U8TO32_LITTLE(key + 24); + input[11] = U8TO32_LITTLE(key + 28); + + input[12] = counter; + input[13] = ((uint64_t)counter) >> 32; + input[14] = U8TO32_LITTLE(nonce + 0); + input[15] = U8TO32_LITTLE(nonce + 4); + + while (in_len > 0) { + todo = sizeof(buf); + if (in_len < todo) { + todo = in_len; + } + + chacha_core(buf, input, 20); + for (i = 0; i < todo; i++) { + out[i] = in[i] ^ buf[i]; + } + + out += todo; + in += todo; + in_len -= todo; + + input[12]++; + if (input[12] == 0) { + input[13]++; + } + } +} + +#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 && !OPENSSL_X86 && !OPENSSL_ARM */ diff --git a/crypto/chacha/chacha_vec.c b/crypto/chacha/chacha_vec.c new file mode 100644 index 00000000..d06d1dd5 --- /dev/null +++ b/crypto/chacha/chacha_vec.c @@ -0,0 +1,329 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* ==================================================================== + * + * When updating this file, also update chacha_vec_arm.S + * + * ==================================================================== */ + + +/* This implementation is by Ted Krovetz and was submitted to SUPERCOP and + * marked as public domain. It was been altered to allow for non-aligned inputs + * and to allow the block counter to be passed in specifically. */ + +#include + +#if !defined(OPENSSL_WINDOWS) && (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) + +#define CHACHA_RNDS 20 /* 8 (high speed), 20 (conservative), 12 (middle) */ + +/* Architecture-neutral way to specify 16-byte vector of ints */ +typedef unsigned vec __attribute__((vector_size(16))); + +/* This implementation is designed for Neon, SSE and AltiVec machines. The + * following specify how to do certain vector operations efficiently on + * each architecture, using intrinsics. + * This implementation supports parallel processing of multiple blocks, + * including potentially using general-purpose registers. */ +#if __ARM_NEON__ +#include +#define GPR_TOO 1 +#define VBPI 2 +#define ONE (vec) vsetq_lane_u32(1, vdupq_n_u32(0), 0) +#define LOAD(m) (vec)(*((vec *)(m))) +#define STORE(m, r) (*((vec *)(m))) = (r) +#define ROTV1(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 1) +#define ROTV2(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 2) +#define ROTV3(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 3) +#define ROTW16(x) (vec) vrev32q_u16((uint16x8_t)x) +#if __clang__ +#define ROTW7(x) (x << ((vec) {7, 7, 7, 7})) ^ (x >> ((vec) {25, 25, 25, 25})) +#define ROTW8(x) (x << ((vec) {8, 8, 8, 8})) ^ (x >> ((vec) {24, 24, 24, 24})) +#define ROTW12(x) \ + (x << ((vec) {12, 12, 12, 12})) ^ (x >> ((vec) {20, 20, 20, 20})) +#else +#define ROTW7(x) \ + (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 7), (uint32x4_t)x, 25) +#define ROTW8(x) \ + (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 8), (uint32x4_t)x, 24) +#define ROTW12(x) \ + (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 12), (uint32x4_t)x, 20) +#endif +#elif __SSE2__ +#include +#define GPR_TOO 0 +#if __clang__ +#define VBPI 4 +#else +#define VBPI 3 +#endif +#define ONE (vec) _mm_set_epi32(0, 0, 0, 1) +#define LOAD(m) (vec) _mm_loadu_si128((__m128i *)(m)) +#define STORE(m, r) _mm_storeu_si128((__m128i *)(m), (__m128i)(r)) +#define ROTV1(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(0, 3, 2, 1)) +#define ROTV2(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(1, 0, 3, 2)) +#define ROTV3(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(2, 1, 0, 3)) +#define ROTW7(x) \ + (vec)(_mm_slli_epi32((__m128i)x, 7) ^ _mm_srli_epi32((__m128i)x, 25)) +#define ROTW12(x) \ + (vec)(_mm_slli_epi32((__m128i)x, 12) ^ _mm_srli_epi32((__m128i)x, 20)) +#if __SSSE3__ +#include +#define ROTW8(x) \ + (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, \ + 11, 6, 5, 4, 7, 2, 1, 0, 3)) +#define ROTW16(x) \ + (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, \ + 10, 5, 4, 7, 6, 1, 0, 3, 2)) +#else +#define ROTW8(x) \ + (vec)(_mm_slli_epi32((__m128i)x, 8) ^ _mm_srli_epi32((__m128i)x, 24)) +#define ROTW16(x) \ + (vec)(_mm_slli_epi32((__m128i)x, 16) ^ _mm_srli_epi32((__m128i)x, 16)) +#endif +#else +#error-- Implementation supports only machines with neon or SSE2 +#endif + +#ifndef REVV_BE +#define REVV_BE(x) (x) +#endif + +#ifndef REVW_BE +#define REVW_BE(x) (x) +#endif + +#define BPI (VBPI + GPR_TOO) /* Blocks computed per loop iteration */ + +#define DQROUND_VECTORS(a,b,c,d) \ + a += b; d ^= a; d = ROTW16(d); \ + c += d; b ^= c; b = ROTW12(b); \ + a += b; d ^= a; d = ROTW8(d); \ + c += d; b ^= c; b = ROTW7(b); \ + b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); \ + a += b; d ^= a; d = ROTW16(d); \ + c += d; b ^= c; b = ROTW12(b); \ + a += b; d ^= a; d = ROTW8(d); \ + c += d; b ^= c; b = ROTW7(b); \ + b = ROTV3(b); c = ROTV2(c); d = ROTV1(d); + +#define QROUND_WORDS(a,b,c,d) \ + a = a+b; d ^= a; d = d<<16 | d>>16; \ + c = c+d; b ^= c; b = b<<12 | b>>20; \ + a = a+b; d ^= a; d = d<< 8 | d>>24; \ + c = c+d; b ^= c; b = b<< 7 | b>>25; + +#define WRITE_XOR(in, op, d, v0, v1, v2, v3) \ + STORE(op + d + 0, LOAD(in + d + 0) ^ REVV_BE(v0)); \ + STORE(op + d + 4, LOAD(in + d + 4) ^ REVV_BE(v1)); \ + STORE(op + d + 8, LOAD(in + d + 8) ^ REVV_BE(v2)); \ + STORE(op + d +12, LOAD(in + d +12) ^ REVV_BE(v3)); + +#if __ARM_NEON__ +/* For ARM, we can't depend on NEON support, so this function is compiled with + * a different name, along with the generic code, and can be enabled at + * run-time. */ +void CRYPTO_chacha_20_neon( +#else +void CRYPTO_chacha_20( +#endif + uint8_t *out, + const uint8_t *in, + size_t inlen, + const uint8_t key[32], + const uint8_t nonce[8], + size_t counter) + { + unsigned iters, i, *op=(unsigned *)out, *ip=(unsigned *)in, *kp; +#if defined(__ARM_NEON__) + unsigned *np; +#endif + vec s0, s1, s2, s3; +#if !defined(__ARM_NEON__) && !defined(__SSE2__) + __attribute__ ((aligned (16))) unsigned key[8], nonce[4]; +#endif + __attribute__ ((aligned (16))) unsigned chacha_const[] = + {0x61707865,0x3320646E,0x79622D32,0x6B206574}; +#if defined(__ARM_NEON__) || defined(__SSE2__) + kp = (unsigned *)key; +#else + ((vec *)key)[0] = REVV_BE(((vec *)key)[0]); + ((vec *)key)[1] = REVV_BE(((vec *)key)[1]); + nonce[0] = REVW_BE(((unsigned *)nonce)[0]); + nonce[1] = REVW_BE(((unsigned *)nonce)[1]); + nonce[2] = REVW_BE(((unsigned *)nonce)[2]); + nonce[3] = REVW_BE(((unsigned *)nonce)[3]); + kp = (unsigned *)key; + np = (unsigned *)nonce; +#endif +#if defined(__ARM_NEON__) + np = (unsigned*) nonce; +#endif + s0 = LOAD(chacha_const); + s1 = LOAD(&((vec*)kp)[0]); + s2 = LOAD(&((vec*)kp)[1]); + s3 = (vec){ + counter & 0xffffffff, +#if __ARM_NEON__ || defined(OPENSSL_X86) + 0, /* can't right-shift 32 bits on a 32-bit system. */ +#else + counter >> 32, +#endif + ((uint32_t*)nonce)[0], + ((uint32_t*)nonce)[1] + }; + + for (iters = 0; iters < inlen/(BPI*64); iters++) + { +#if GPR_TOO + register unsigned x0, x1, x2, x3, x4, x5, x6, x7, x8, + x9, x10, x11, x12, x13, x14, x15; +#endif +#if VBPI > 2 + vec v8,v9,v10,v11; +#endif +#if VBPI > 3 + vec v12,v13,v14,v15; +#endif + + vec v0,v1,v2,v3,v4,v5,v6,v7; + v4 = v0 = s0; v5 = v1 = s1; v6 = v2 = s2; v3 = s3; + v7 = v3 + ONE; +#if VBPI > 2 + v8 = v4; v9 = v5; v10 = v6; + v11 = v7 + ONE; +#endif +#if VBPI > 3 + v12 = v8; v13 = v9; v14 = v10; + v15 = v11 + ONE; +#endif +#if GPR_TOO + x0 = chacha_const[0]; x1 = chacha_const[1]; + x2 = chacha_const[2]; x3 = chacha_const[3]; + x4 = kp[0]; x5 = kp[1]; x6 = kp[2]; x7 = kp[3]; + x8 = kp[4]; x9 = kp[5]; x10 = kp[6]; x11 = kp[7]; + x12 = counter+BPI*iters+(BPI-1); x13 = 0; + x14 = np[0]; x15 = np[1]; +#endif + for (i = CHACHA_RNDS/2; i; i--) + { + DQROUND_VECTORS(v0,v1,v2,v3) + DQROUND_VECTORS(v4,v5,v6,v7) +#if VBPI > 2 + DQROUND_VECTORS(v8,v9,v10,v11) +#endif +#if VBPI > 3 + DQROUND_VECTORS(v12,v13,v14,v15) +#endif +#if GPR_TOO + QROUND_WORDS( x0, x4, x8,x12) + QROUND_WORDS( x1, x5, x9,x13) + QROUND_WORDS( x2, x6,x10,x14) + QROUND_WORDS( x3, x7,x11,x15) + QROUND_WORDS( x0, x5,x10,x15) + QROUND_WORDS( x1, x6,x11,x12) + QROUND_WORDS( x2, x7, x8,x13) + QROUND_WORDS( x3, x4, x9,x14) +#endif + } + + WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3) + s3 += ONE; + WRITE_XOR(ip, op, 16, v4+s0, v5+s1, v6+s2, v7+s3) + s3 += ONE; +#if VBPI > 2 + WRITE_XOR(ip, op, 32, v8+s0, v9+s1, v10+s2, v11+s3) + s3 += ONE; +#endif +#if VBPI > 3 + WRITE_XOR(ip, op, 48, v12+s0, v13+s1, v14+s2, v15+s3) + s3 += ONE; +#endif + ip += VBPI*16; + op += VBPI*16; +#if GPR_TOO + op[0] = REVW_BE(REVW_BE(ip[0]) ^ (x0 + chacha_const[0])); + op[1] = REVW_BE(REVW_BE(ip[1]) ^ (x1 + chacha_const[1])); + op[2] = REVW_BE(REVW_BE(ip[2]) ^ (x2 + chacha_const[2])); + op[3] = REVW_BE(REVW_BE(ip[3]) ^ (x3 + chacha_const[3])); + op[4] = REVW_BE(REVW_BE(ip[4]) ^ (x4 + kp[0])); + op[5] = REVW_BE(REVW_BE(ip[5]) ^ (x5 + kp[1])); + op[6] = REVW_BE(REVW_BE(ip[6]) ^ (x6 + kp[2])); + op[7] = REVW_BE(REVW_BE(ip[7]) ^ (x7 + kp[3])); + op[8] = REVW_BE(REVW_BE(ip[8]) ^ (x8 + kp[4])); + op[9] = REVW_BE(REVW_BE(ip[9]) ^ (x9 + kp[5])); + op[10] = REVW_BE(REVW_BE(ip[10]) ^ (x10 + kp[6])); + op[11] = REVW_BE(REVW_BE(ip[11]) ^ (x11 + kp[7])); + op[12] = REVW_BE(REVW_BE(ip[12]) ^ (x12 + counter+BPI*iters+(BPI-1))); + op[13] = REVW_BE(REVW_BE(ip[13]) ^ (x13)); + op[14] = REVW_BE(REVW_BE(ip[14]) ^ (x14 + np[0])); + op[15] = REVW_BE(REVW_BE(ip[15]) ^ (x15 + np[1])); + s3 += ONE; + ip += 16; + op += 16; +#endif + } + + for (iters = inlen%(BPI*64)/64; iters != 0; iters--) + { + vec v0 = s0, v1 = s1, v2 = s2, v3 = s3; + for (i = CHACHA_RNDS/2; i; i--) + { + DQROUND_VECTORS(v0,v1,v2,v3); + } + WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3) + s3 += ONE; + ip += 16; + op += 16; + } + + inlen = inlen % 64; + if (inlen) + { + __attribute__ ((aligned (16))) vec buf[4]; + vec v0,v1,v2,v3; + v0 = s0; v1 = s1; v2 = s2; v3 = s3; + for (i = CHACHA_RNDS/2; i; i--) + { + DQROUND_VECTORS(v0,v1,v2,v3); + } + + if (inlen >= 16) + { + STORE(op + 0, LOAD(ip + 0) ^ REVV_BE(v0 + s0)); + if (inlen >= 32) + { + STORE(op + 4, LOAD(ip + 4) ^ REVV_BE(v1 + s1)); + if (inlen >= 48) + { + STORE(op + 8, LOAD(ip + 8) ^ + REVV_BE(v2 + s2)); + buf[3] = REVV_BE(v3 + s3); + } + else + buf[2] = REVV_BE(v2 + s2); + } + else + buf[1] = REVV_BE(v1 + s1); + } + else + buf[0] = REVV_BE(v0 + s0); + + for (i=inlen & ~15; i + +#include +#include +#include +#include +#include + +#include "internal.h" + + +#define POLY1305_TAG_LEN 16 +#define CHACHA20_NONCE_LEN 8 + +struct aead_chacha20_poly1305_ctx { + unsigned char key[32]; + unsigned char tag_len; +}; + +static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t tag_len) { + struct aead_chacha20_poly1305_ctx *c20_ctx; + + if (tag_len == 0) { + tag_len = POLY1305_TAG_LEN; + } + + if (tag_len > POLY1305_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_init, CIPHER_R_TOO_LARGE); + return 0; + } + + if (key_len != sizeof(c20_ctx->key)) { + return 0; /* internal error - EVP_AEAD_CTX_init should catch this. */ + } + + c20_ctx = OPENSSL_malloc(sizeof(struct aead_chacha20_poly1305_ctx)); + if (c20_ctx == NULL) { + return 0; + } + + memcpy(c20_ctx->key, key, key_len); + c20_ctx->tag_len = tag_len; + ctx->aead_state = c20_ctx; + + return 1; +} + +static void aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx) { + struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; + OPENSSL_cleanse(c20_ctx->key, sizeof(c20_ctx->key)); + OPENSSL_free(c20_ctx); +} + +static void poly1305_update_with_length(poly1305_state *poly1305, + const uint8_t *data, size_t data_len) { + size_t j = data_len; + uint8_t length_bytes[8]; + unsigned i; + + for (i = 0; i < sizeof(length_bytes); i++) { + length_bytes[i] = j; + j >>= 8; + } + + CRYPTO_poly1305_update(poly1305, data, data_len); + CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes)); +} + +#if __arm__ +#define ALIGNED __attribute__((aligned(16))) +#else +#define ALIGNED +#endif + +static int aead_chacha20_poly1305_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, + size_t *out_len, size_t max_out_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *ad, size_t ad_len) { + const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; + uint8_t poly1305_key[32] ALIGNED; + poly1305_state poly1305; + const uint64_t in_len_64 = in_len; + + /* The underlying ChaCha implementation may not overflow the block + * counter into the second counter word. Therefore we disallow + * individual operations that work on more than 256GB at a time. + * |in_len_64| is needed because, on 32-bit platforms, size_t is only + * 32-bits and this produces a warning because it's always false. + * Casting to uint64_t inside the conditional is not sufficient to stop + * the warning. */ + if (in_len_64 >= (1ull << 32) * 64 - 64) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_seal, CIPHER_R_TOO_LARGE); + return 0; + } + + if (in_len + c20_ctx->tag_len < in_len) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_seal, CIPHER_R_TOO_LARGE); + return 0; + } + + if (max_out_len < in_len + c20_ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_seal, + CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + if (nonce_len != CHACHA20_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_seal, CIPHER_R_IV_TOO_LARGE); + return 0; + } + + memset(poly1305_key, 0, sizeof(poly1305_key)); + CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), + c20_ctx->key, nonce, 0); + + CRYPTO_poly1305_init(&poly1305, poly1305_key); + poly1305_update_with_length(&poly1305, ad, ad_len); + CRYPTO_chacha_20(out, in, in_len, c20_ctx->key, nonce, 1); + poly1305_update_with_length(&poly1305, out, in_len); + + if (c20_ctx->tag_len != POLY1305_TAG_LEN) { + uint8_t tag[POLY1305_TAG_LEN]; + CRYPTO_poly1305_finish(&poly1305, tag); + memcpy(out + in_len, tag, c20_ctx->tag_len); + *out_len = in_len + c20_ctx->tag_len; + return 1; + } + + CRYPTO_poly1305_finish(&poly1305, out + in_len); + *out_len = in_len + c20_ctx->tag_len; + return 1; +} + +static int aead_chacha20_poly1305_open(const EVP_AEAD_CTX *ctx, uint8_t *out, + size_t *out_len, size_t max_out_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *ad, size_t ad_len) { + const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; + uint8_t mac[POLY1305_TAG_LEN]; + uint8_t poly1305_key[32] ALIGNED; + size_t plaintext_len; + poly1305_state poly1305; + const uint64_t in_len_64 = in_len; + + if (in_len < c20_ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_open, CIPHER_R_BAD_DECRYPT); + return 0; + } + + /* The underlying ChaCha implementation may not overflow the block + * counter into the second counter word. Therefore we disallow + * individual operations that work on more than 256GB at a time. + * |in_len_64| is needed because, on 32-bit platforms, size_t is only + * 32-bits and this produces a warning because it's always false. + * Casting to uint64_t inside the conditional is not sufficient to stop + * the warning. */ + if (in_len_64 >= (1ull << 32) * 64 - 64) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_open, CIPHER_R_TOO_LARGE); + return 0; + } + + if (nonce_len != CHACHA20_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_open, CIPHER_R_IV_TOO_LARGE); + return 0; + } + + plaintext_len = in_len - c20_ctx->tag_len; + + if (max_out_len < plaintext_len) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_open, + CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + memset(poly1305_key, 0, sizeof(poly1305_key)); + CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), + c20_ctx->key, nonce, 0); + + CRYPTO_poly1305_init(&poly1305, poly1305_key); + poly1305_update_with_length(&poly1305, ad, ad_len); + poly1305_update_with_length(&poly1305, in, plaintext_len); + CRYPTO_poly1305_finish(&poly1305, mac); + + if (CRYPTO_memcmp(mac, in + plaintext_len, c20_ctx->tag_len) != 0) { + OPENSSL_PUT_ERROR(CIPHER, aead_chacha20_poly1305_open, CIPHER_R_BAD_DECRYPT); + return 0; + } + + CRYPTO_chacha_20(out, in, plaintext_len, c20_ctx->key, nonce, 1); + *out_len = plaintext_len; + return 1; +} + +static const EVP_AEAD aead_chacha20_poly1305 = { + 32, /* key len */ + CHACHA20_NONCE_LEN, /* nonce len */ + POLY1305_TAG_LEN, /* overhead */ + POLY1305_TAG_LEN, /* max tag length */ + aead_chacha20_poly1305_init, aead_chacha20_poly1305_cleanup, + aead_chacha20_poly1305_seal, aead_chacha20_poly1305_open, +}; + +const EVP_AEAD *EVP_aead_chacha20_poly1305() { return &aead_chacha20_poly1305; } diff --git a/crypto/poly1305/CMakeLists.txt b/crypto/poly1305/CMakeLists.txt new file mode 100644 index 00000000..65d7dbbe --- /dev/null +++ b/crypto/poly1305/CMakeLists.txt @@ -0,0 +1,21 @@ +include_directories(. .. ../../include) + +if (${ARCH} STREQUAL "arm") + set( + POLY1305_ARCH_SOURCES + + poly1305_arm_asm.S + ) +endif() + +add_library( + poly1305 + + OBJECT + + poly1305.c + poly1305_arm.c + poly1305_vec.c + + ${POLY1305_ARCH_SOURCES} +) diff --git a/crypto/poly1305/poly1305.c b/crypto/poly1305/poly1305.c new file mode 100644 index 00000000..256ad69b --- /dev/null +++ b/crypto/poly1305/poly1305.c @@ -0,0 +1,323 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* This implementation of poly1305 is by Andrew Moon + * (https://github.com/floodyberry/poly1305-donna) and released as public + * domain. */ + +#include + +#include + +#include + + +#if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) + +#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) +/* We can assume little-endian. */ +static uint32_t U8TO32_LE(const uint8_t *m) { + uint32_t r; + memcpy(&r, m, sizeof(r)); + return r; +} + +static void U32TO8_LE(uint8_t *m, uint32_t v) { memcpy(m, &v, sizeof(v)); } +#else +static uint32_t U8TO32_LE(const uint8_t *m) { + return (uint32_t)m[0] | (uint32_t)m[1] << 8 | (uint32_t)m[2] << 16 | + (uint32_t)m[3] << 24; +} + +static void U32TO8_LE(uint8_t *m, uint32_t v) { + m[0] = v; + m[1] = v >> 8; + m[2] = v >> 16; + m[3] = v >> 24; +} +#endif + +#if defined(OPENSSL_ARM) +void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]); + +void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, + size_t in_len); + +void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]); +#endif + +static uint64_t mul32x32_64(uint32_t a, uint32_t b) { return (uint64_t)a * b; } + +struct poly1305_state_st { + uint32_t r0, r1, r2, r3, r4; + uint32_t s1, s2, s3, s4; + uint32_t h0, h1, h2, h3, h4; + uint8_t buf[16]; + unsigned int buf_used; + uint8_t key[16]; +}; + +/* poly1305_blocks updates |state| given some amount of input data. This + * function may only be called with a |len| that is not a multiple of 16 at the + * end of the data. Otherwise the input must be buffered into 16 byte blocks. */ +static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, + size_t len) { + uint32_t t0, t1, t2, t3; + uint64_t t[5]; + uint32_t b; + uint64_t c; + size_t j; + uint8_t mp[16]; + + if (len < 16) { + goto poly1305_donna_atmost15bytes; + } + +poly1305_donna_16bytes: + t0 = U8TO32_LE(in); + t1 = U8TO32_LE(in + 4); + t2 = U8TO32_LE(in + 8); + t3 = U8TO32_LE(in + 12); + + in += 16; + len -= 16; + + state->h0 += t0 & 0x3ffffff; + state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; + state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; + state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; + state->h4 += (t3 >> 8) | (1 << 24); + +poly1305_donna_mul: + t[0] = mul32x32_64(state->h0, state->r0) + mul32x32_64(state->h1, state->s4) + + mul32x32_64(state->h2, state->s3) + mul32x32_64(state->h3, state->s2) + + mul32x32_64(state->h4, state->s1); + t[1] = mul32x32_64(state->h0, state->r1) + mul32x32_64(state->h1, state->r0) + + mul32x32_64(state->h2, state->s4) + mul32x32_64(state->h3, state->s3) + + mul32x32_64(state->h4, state->s2); + t[2] = mul32x32_64(state->h0, state->r2) + mul32x32_64(state->h1, state->r1) + + mul32x32_64(state->h2, state->r0) + mul32x32_64(state->h3, state->s4) + + mul32x32_64(state->h4, state->s3); + t[3] = mul32x32_64(state->h0, state->r3) + mul32x32_64(state->h1, state->r2) + + mul32x32_64(state->h2, state->r1) + mul32x32_64(state->h3, state->r0) + + mul32x32_64(state->h4, state->s4); + t[4] = mul32x32_64(state->h0, state->r4) + mul32x32_64(state->h1, state->r3) + + mul32x32_64(state->h2, state->r2) + mul32x32_64(state->h3, state->r1) + + mul32x32_64(state->h4, state->r0); + + state->h0 = (uint32_t)t[0] & 0x3ffffff; + c = (t[0] >> 26); + t[1] += c; + state->h1 = (uint32_t)t[1] & 0x3ffffff; + b = (uint32_t)(t[1] >> 26); + t[2] += b; + state->h2 = (uint32_t)t[2] & 0x3ffffff; + b = (uint32_t)(t[2] >> 26); + t[3] += b; + state->h3 = (uint32_t)t[3] & 0x3ffffff; + b = (uint32_t)(t[3] >> 26); + t[4] += b; + state->h4 = (uint32_t)t[4] & 0x3ffffff; + b = (uint32_t)(t[4] >> 26); + state->h0 += b * 5; + + if (len >= 16) + goto poly1305_donna_16bytes; + +/* final bytes */ +poly1305_donna_atmost15bytes: + if (!len) + return; + + for (j = 0; j < len; j++) + mp[j] = in[j]; + mp[j++] = 1; + for (; j < 16; j++) + mp[j] = 0; + len = 0; + + t0 = U8TO32_LE(mp + 0); + t1 = U8TO32_LE(mp + 4); + t2 = U8TO32_LE(mp + 8); + t3 = U8TO32_LE(mp + 12); + + state->h0 += t0 & 0x3ffffff; + state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; + state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; + state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; + state->h4 += (t3 >> 8); + + goto poly1305_donna_mul; +} + +void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + uint32_t t0, t1, t2, t3; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_init_neon(statep, key); + return; + } +#endif + + t0 = U8TO32_LE(key + 0); + t1 = U8TO32_LE(key + 4); + t2 = U8TO32_LE(key + 8); + t3 = U8TO32_LE(key + 12); + + /* precompute multipliers */ + state->r0 = t0 & 0x3ffffff; + t0 >>= 26; + t0 |= t1 << 6; + state->r1 = t0 & 0x3ffff03; + t1 >>= 20; + t1 |= t2 << 12; + state->r2 = t1 & 0x3ffc0ff; + t2 >>= 14; + t2 |= t3 << 18; + state->r3 = t2 & 0x3f03fff; + t3 >>= 8; + state->r4 = t3 & 0x00fffff; + + state->s1 = state->r1 * 5; + state->s2 = state->r2 * 5; + state->s3 = state->r3 * 5; + state->s4 = state->r4 * 5; + + /* init state */ + state->h0 = 0; + state->h1 = 0; + state->h2 = 0; + state->h3 = 0; + state->h4 = 0; + + state->buf_used = 0; + memcpy(state->key, key + 16, sizeof(state->key)); +} + +void CRYPTO_poly1305_update(poly1305_state *statep, const uint8_t *in, + size_t in_len) { + unsigned int i; + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_update_neon(statep, in, in_len); + return; + } +#endif + + if (state->buf_used) { + unsigned int todo = 16 - state->buf_used; + if (todo > in_len) + todo = in_len; + for (i = 0; i < todo; i++) + state->buf[state->buf_used + i] = in[i]; + state->buf_used += todo; + in_len -= todo; + in += todo; + + if (state->buf_used == 16) { + poly1305_update(state, state->buf, 16); + state->buf_used = 0; + } + } + + if (in_len >= 16) { + size_t todo = in_len & ~0xf; + poly1305_update(state, in, todo); + in += todo; + in_len &= 0xf; + } + + if (in_len) { + for (i = 0; i < in_len; i++) + state->buf[i] = in[i]; + state->buf_used = in_len; + } +} + +void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { + struct poly1305_state_st *state = (struct poly1305_state_st *)statep; + uint64_t f0, f1, f2, f3; + uint32_t g0, g1, g2, g3, g4; + uint32_t b, nb; + +#if defined(OPENSSL_ARM) + if (CRYPTO_is_NEON_capable()) { + CRYPTO_poly1305_finish_neon(statep, mac); + return; + } +#endif + + if (state->buf_used) + poly1305_update(state, state->buf, state->buf_used); + + b = state->h0 >> 26; + state->h0 = state->h0 & 0x3ffffff; + state->h1 += b; + b = state->h1 >> 26; + state->h1 = state->h1 & 0x3ffffff; + state->h2 += b; + b = state->h2 >> 26; + state->h2 = state->h2 & 0x3ffffff; + state->h3 += b; + b = state->h3 >> 26; + state->h3 = state->h3 & 0x3ffffff; + state->h4 += b; + b = state->h4 >> 26; + state->h4 = state->h4 & 0x3ffffff; + state->h0 += b * 5; + + g0 = state->h0 + 5; + b = g0 >> 26; + g0 &= 0x3ffffff; + g1 = state->h1 + b; + b = g1 >> 26; + g1 &= 0x3ffffff; + g2 = state->h2 + b; + b = g2 >> 26; + g2 &= 0x3ffffff; + g3 = state->h3 + b; + b = g3 >> 26; + g3 &= 0x3ffffff; + g4 = state->h4 + b - (1 << 26); + + b = (g4 >> 31) - 1; + nb = ~b; + state->h0 = (state->h0 & nb) | (g0 & b); + state->h1 = (state->h1 & nb) | (g1 & b); + state->h2 = (state->h2 & nb) | (g2 & b); + state->h3 = (state->h3 & nb) | (g3 & b); + state->h4 = (state->h4 & nb) | (g4 & b); + + f0 = ((state->h0) | (state->h1 << 26)) + (uint64_t)U8TO32_LE(&state->key[0]); + f1 = ((state->h1 >> 6) | (state->h2 << 20)) + + (uint64_t)U8TO32_LE(&state->key[4]); + f2 = ((state->h2 >> 12) | (state->h3 << 14)) + + (uint64_t)U8TO32_LE(&state->key[8]); + f3 = ((state->h3 >> 18) | (state->h4 << 8)) + + (uint64_t)U8TO32_LE(&state->key[12]); + + U32TO8_LE(&mac[0], f0); + f1 += (f0 >> 32); + U32TO8_LE(&mac[4], f1); + f2 += (f1 >> 32); + U32TO8_LE(&mac[8], f2); + f3 += (f2 >> 32); + U32TO8_LE(&mac[12], f3); +} + +#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 */ diff --git a/crypto/poly1305/poly1305.h b/crypto/poly1305/poly1305.h new file mode 100644 index 00000000..a15bf1a8 --- /dev/null +++ b/crypto/poly1305/poly1305.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_POLY1305_H +#define OPENSSL_HEADER_POLY1305_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef unsigned char poly1305_state[512]; + +/* poly1305_init sets up |state| so that it can be used to calculate an + * authentication tag with the one-time key |key|. Note that |key| is a + * one-time key and therefore there is no `reset' method because that would + * enable several messages to be authenticated with the same key. */ +extern void CRYPTO_poly1305_init(poly1305_state* state, const uint8_t key[32]); + +/* poly1305_update processes |in_len| bytes from |in|. It can be called zero or + * more times after poly1305_init. */ +extern void CRYPTO_poly1305_update(poly1305_state* state, const uint8_t* in, + size_t in_len); + +/* poly1305_finish completes the poly1305 calculation and writes a 16 byte + * authentication tag to |mac|. */ +extern void CRYPTO_poly1305_finish(poly1305_state* state, uint8_t mac[16]); + + +#if defined(__cplusplus) +} /* extern C */ +#endif + +#endif /* OPENSSL_HEADER_POLY1305_H */ diff --git a/crypto/poly1305/poly1305_arm.c b/crypto/poly1305/poly1305_arm.c new file mode 100644 index 00000000..9d5e2769 --- /dev/null +++ b/crypto/poly1305/poly1305_arm.c @@ -0,0 +1,288 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* This implementation was taken from the public domain, neon2 version in + * SUPERCOP by D. J. Bernstein and Peter Schwabe. */ + +#include + + +#if defined(OPENSSL_ARM) + +typedef struct { + uint32_t v[12]; /* for alignment; only using 10 */ +} fe1305x2; + +#define addmulmod openssl_poly1305_neon2_addmulmod +#define blocks openssl_poly1305_neon2_blocks + +extern void addmulmod(fe1305x2 *r, const fe1305x2 *x, const fe1305x2 *y, + const fe1305x2 *c); + +extern int blocks(fe1305x2 *h, const fe1305x2 *precomp, const uint8_t *in, + unsigned int inlen); + +static void freeze(fe1305x2 *r) { + int i; + + uint32_t x0 = r->v[0]; + uint32_t x1 = r->v[2]; + uint32_t x2 = r->v[4]; + uint32_t x3 = r->v[6]; + uint32_t x4 = r->v[8]; + uint32_t y0; + uint32_t y1; + uint32_t y2; + uint32_t y3; + uint32_t y4; + uint32_t swap; + + for (i = 0; i < 3; ++i) { + x1 += x0 >> 26; + x0 &= 0x3ffffff; + x2 += x1 >> 26; + x1 &= 0x3ffffff; + x3 += x2 >> 26; + x2 &= 0x3ffffff; + x4 += x3 >> 26; + x3 &= 0x3ffffff; + x0 += 5 * (x4 >> 26); + x4 &= 0x3ffffff; + } + + y0 = x0 + 5; + y1 = x1 + (y0 >> 26); + y0 &= 0x3ffffff; + y2 = x2 + (y1 >> 26); + y1 &= 0x3ffffff; + y3 = x3 + (y2 >> 26); + y2 &= 0x3ffffff; + y4 = x4 + (y3 >> 26); + y3 &= 0x3ffffff; + swap = -(y4 >> 26); + y4 &= 0x3ffffff; + + y0 ^= x0; + y1 ^= x1; + y2 ^= x2; + y3 ^= x3; + y4 ^= x4; + + y0 &= swap; + y1 &= swap; + y2 &= swap; + y3 &= swap; + y4 &= swap; + + y0 ^= x0; + y1 ^= x1; + y2 ^= x2; + y3 ^= x3; + y4 ^= x4; + + r->v[0] = y0; + r->v[2] = y1; + r->v[4] = y2; + r->v[6] = y3; + r->v[8] = y4; +} + +static void fe1305x2_tobytearray(uint8_t *r, fe1305x2 *x) { + uint32_t x0 = x->v[0]; + uint32_t x1 = x->v[2]; + uint32_t x2 = x->v[4]; + uint32_t x3 = x->v[6]; + uint32_t x4 = x->v[8]; + + x1 += x0 >> 26; + x0 &= 0x3ffffff; + x2 += x1 >> 26; + x1 &= 0x3ffffff; + x3 += x2 >> 26; + x2 &= 0x3ffffff; + x4 += x3 >> 26; + x3 &= 0x3ffffff; + + *(uint32_t *)r = x0 + (x1 << 26); + *(uint32_t *)(r + 4) = (x1 >> 6) + (x2 << 20); + *(uint32_t *)(r + 8) = (x2 >> 12) + (x3 << 14); + *(uint32_t *)(r + 12) = (x3 >> 18) + (x4 << 8); +} + +/* load32 exists to avoid breaking strict aliasing rules in + * fe1305x2_frombytearray. */ +static uint32_t load32(uint8_t *t) { + uint32_t tmp; + memcpy(&tmp, t, sizeof(tmp)); + return tmp; +} + +static void fe1305x2_frombytearray(fe1305x2 *r, const uint8_t *x, + unsigned long long xlen) { + int i; + uint8_t t[17]; + + for (i = 0; (i < 16) && (i < xlen); i++) + t[i] = x[i]; + xlen -= i; + x += i; + t[i++] = 1; + for (; i < 17; i++) + t[i] = 0; + + r->v[0] = 0x3ffffff & load32(t); + r->v[2] = 0x3ffffff & (load32(t + 3) >> 2); + r->v[4] = 0x3ffffff & (load32(t + 6) >> 4); + r->v[6] = 0x3ffffff & (load32(t + 9) >> 6); + r->v[8] = load32(t + 13); + + if (xlen) { + for (i = 0; (i < 16) && (i < xlen); i++) + t[i] = x[i]; + t[i++] = 1; + for (; i < 17; i++) + t[i] = 0; + + r->v[1] = 0x3ffffff & load32(t); + r->v[3] = 0x3ffffff & (load32(t + 3) >> 2); + r->v[5] = 0x3ffffff & (load32(t + 6) >> 4); + r->v[7] = 0x3ffffff & (load32(t + 9) >> 6); + r->v[9] = load32(t + 13); + } else + r->v[1] = r->v[3] = r->v[5] = r->v[7] = r->v[9] = 0; +} + +static const fe1305x2 zero __attribute__((aligned(16))); + +struct poly1305_state_st { + uint8_t data[sizeof(fe1305x2[5]) + 128]; + uint8_t buf[32]; + unsigned int buf_used; + uint8_t key[16]; +}; + +void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + unsigned int j; + + r->v[1] = r->v[0] = 0x3ffffff & *(uint32_t *)key; + r->v[3] = r->v[2] = 0x3ffff03 & ((*(uint32_t *)(key + 3)) >> 2); + r->v[5] = r->v[4] = 0x3ffc0ff & ((*(uint32_t *)(key + 6)) >> 4); + r->v[7] = r->v[6] = 0x3f03fff & ((*(uint32_t *)(key + 9)) >> 6); + r->v[9] = r->v[8] = 0x00fffff & ((*(uint32_t *)(key + 12)) >> 8); + + for (j = 0; j < 10; j++) + h->v[j] = 0; /* XXX: should fast-forward a bit */ + + addmulmod(precomp, r, r, &zero); /* precompute r^2 */ + addmulmod(precomp + 1, precomp, precomp, &zero); /* precompute r^4 */ + + memcpy(st->key, key + 16, 16); + st->buf_used = 0; +} + +void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, + size_t in_len) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + unsigned int i; + + if (st->buf_used) { + unsigned int todo = 32 - st->buf_used; + if (todo > in_len) + todo = in_len; + for (i = 0; i < todo; i++) + st->buf[st->buf_used + i] = in[i]; + st->buf_used += todo; + in_len -= todo; + in += todo; + + if (st->buf_used == sizeof(st->buf) && in_len) { + addmulmod(h, h, precomp, &zero); + fe1305x2_frombytearray(c, st->buf, sizeof(st->buf)); + for (i = 0; i < 10; i++) + h->v[i] += c->v[i]; + st->buf_used = 0; + } + } + + while (in_len > 32) { + unsigned int tlen = 1048576; + if (in_len < tlen) + tlen = in_len; + tlen -= blocks(h, precomp, in, tlen); + in_len -= tlen; + in += tlen; + } + + if (in_len) { + for (i = 0; i < in_len; i++) + st->buf[i] = in[i]; + st->buf_used = in_len; + } +} + +void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]) { + struct poly1305_state_st *st = (struct poly1305_state_st *)(state); + fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); + fe1305x2 *const h = r + 1; + fe1305x2 *const c = h + 1; + fe1305x2 *const precomp = c + 1; + + addmulmod(h, h, precomp, &zero); + + if (st->buf_used > 16) { + fe1305x2_frombytearray(c, st->buf, st->buf_used); + precomp->v[1] = r->v[1]; + precomp->v[3] = r->v[3]; + precomp->v[5] = r->v[5]; + precomp->v[7] = r->v[7]; + precomp->v[9] = r->v[9]; + addmulmod(h, h, precomp, c); + } else if (st->buf_used > 0) { + fe1305x2_frombytearray(c, st->buf, st->buf_used); + r->v[1] = 1; + r->v[3] = 0; + r->v[5] = 0; + r->v[7] = 0; + r->v[9] = 0; + addmulmod(h, h, r, c); + } + + h->v[0] += h->v[1]; + h->v[2] += h->v[3]; + h->v[4] += h->v[5]; + h->v[6] += h->v[7]; + h->v[8] += h->v[9]; + freeze(h); + + fe1305x2_frombytearray(c, st->key, 16); + c->v[8] ^= (1 << 24); + + h->v[0] += c->v[0]; + h->v[2] += c->v[2]; + h->v[4] += c->v[4]; + h->v[6] += c->v[6]; + h->v[8] += c->v[8]; + fe1305x2_tobytearray(mac, h); +} + +#endif /* OPENSSL_ARM */ diff --git a/crypto/poly1305/poly1305_arm_asm.S b/crypto/poly1305/poly1305_arm_asm.S new file mode 100644 index 00000000..e196e57d --- /dev/null +++ b/crypto/poly1305/poly1305_arm_asm.S @@ -0,0 +1,2013 @@ +#if defined(__arm__) + +# This implementation was taken from the public domain, neon2 version in +# SUPERCOP by D. J. Bernstein and Peter Schwabe. + +# qhasm: int32 input_0 + +# qhasm: int32 input_1 + +# qhasm: int32 input_2 + +# qhasm: int32 input_3 + +# qhasm: stack32 input_4 + +# qhasm: stack32 input_5 + +# qhasm: stack32 input_6 + +# qhasm: stack32 input_7 + +# qhasm: int32 caller_r4 + +# qhasm: int32 caller_r5 + +# qhasm: int32 caller_r6 + +# qhasm: int32 caller_r7 + +# qhasm: int32 caller_r8 + +# qhasm: int32 caller_r9 + +# qhasm: int32 caller_r10 + +# qhasm: int32 caller_r11 + +# qhasm: int32 caller_r12 + +# qhasm: int32 caller_r14 + +# qhasm: reg128 caller_q4 + +# qhasm: reg128 caller_q5 + +# qhasm: reg128 caller_q6 + +# qhasm: reg128 caller_q7 + +# qhasm: startcode +.fpu neon +.text + +# qhasm: reg128 r0 + +# qhasm: reg128 r1 + +# qhasm: reg128 r2 + +# qhasm: reg128 r3 + +# qhasm: reg128 r4 + +# qhasm: reg128 x01 + +# qhasm: reg128 x23 + +# qhasm: reg128 x4 + +# qhasm: reg128 y0 + +# qhasm: reg128 y12 + +# qhasm: reg128 y34 + +# qhasm: reg128 5y12 + +# qhasm: reg128 5y34 + +# qhasm: stack128 y0_stack + +# qhasm: stack128 y12_stack + +# qhasm: stack128 y34_stack + +# qhasm: stack128 5y12_stack + +# qhasm: stack128 5y34_stack + +# qhasm: reg128 z0 + +# qhasm: reg128 z12 + +# qhasm: reg128 z34 + +# qhasm: reg128 5z12 + +# qhasm: reg128 5z34 + +# qhasm: stack128 z0_stack + +# qhasm: stack128 z12_stack + +# qhasm: stack128 z34_stack + +# qhasm: stack128 5z12_stack + +# qhasm: stack128 5z34_stack + +# qhasm: stack128 two24 + +# qhasm: int32 ptr + +# qhasm: reg128 c01 + +# qhasm: reg128 c23 + +# qhasm: reg128 d01 + +# qhasm: reg128 d23 + +# qhasm: reg128 t0 + +# qhasm: reg128 t1 + +# qhasm: reg128 t2 + +# qhasm: reg128 t3 + +# qhasm: reg128 t4 + +# qhasm: reg128 mask + +# qhasm: reg128 u0 + +# qhasm: reg128 u1 + +# qhasm: reg128 u2 + +# qhasm: reg128 u3 + +# qhasm: reg128 u4 + +# qhasm: reg128 v01 + +# qhasm: reg128 mid + +# qhasm: reg128 v23 + +# qhasm: reg128 v4 + +# qhasm: int32 len + +# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks +.align 4 +.global openssl_poly1305_neon2_blocks +.type openssl_poly1305_neon2_blocks STT_FUNC +openssl_poly1305_neon2_blocks: +vpush {q4,q5,q6,q7} +mov r12,sp +sub sp,sp,#192 +and sp,sp,#0xffffffe0 + +# qhasm: len = input_3 +# asm 1: mov >len=int32#4,len=r3,y12=reg128#2%bot->y12=reg128#2%top},[y12=d2->y12=d3},[y34=reg128#3%bot->y34=reg128#3%top},[y34=d4->y34=d5},[input_1=int32#2,input_1=r1,z12=reg128#5%bot->z12=reg128#5%top},[z12=d8->z12=d9},[z34=reg128#6%bot->z34=reg128#6%top},[z34=d10->z34=d11},[mask=reg128#7,#0xffffffff +# asm 2: vmov.i64 >mask=q6,#0xffffffff +vmov.i64 q6,#0xffffffff + +# qhasm: 2x u4 = 0xff +# asm 1: vmov.i64 >u4=reg128#8,#0xff +# asm 2: vmov.i64 >u4=q7,#0xff +vmov.i64 q7,#0xff + +# qhasm: x01 aligned= mem128[input_0];input_0+=16 +# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[x01=d16->x01=d17},[x23=reg128#10%bot->x23=reg128#10%top},[x23=d18->x23=d19},[input_0=int32#1,input_0=r0,>=6 +# asm 1: vshr.u64 >mask=reg128#7,mask=q6,>= 7 +# asm 1: vshr.u64 >u4=reg128#8,u4=q7,5y12=reg128#12,5y12=q11,5y34=reg128#13,5y34=q12,5y12=reg128#12,<5y12=reg128#12,5y12=q11,<5y12=q11,5y34=reg128#13,<5y34=reg128#13,5y34=q12,<5y34=q12,u4=reg128#8,u4=q7,5z12=reg128#14,5z12=q13,5z34=reg128#15,5z34=q14,5z12=reg128#14,<5z12=reg128#14,5z12=q13,<5z12=q13,5z34=reg128#15,<5z34=reg128#15,5z34=q14,<5z34=q14,ptr=int32#2,ptr=r1,r4=reg128#16,r4=q15,r0=reg128#8,r0=q7,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,<5y12_stack=stack128#5 +# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64] +add r1,sp,#64 + +# qhasm: mem128[ptr] aligned= 5y12 +# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[ptr=int32#2,<5y34_stack=stack128#6 +# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80] +add r1,sp,#80 + +# qhasm: mem128[ptr] aligned= 5y34 +# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[ptr=int32#2,<5z12_stack=stack128#10 +# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144] +add r1,sp,#144 + +# qhasm: mem128[ptr] aligned= 5z12 +# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[ptr=int32#2,<5z34_stack=stack128#11 +# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160] +add r1,sp,#160 + +# qhasm: mem128[ptr] aligned= 5z34 +# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[? len - 64 +# asm 1: cmp +bls ._below64bytes + +# qhasm: input_2 += 32 +# asm 1: add >input_2=int32#2,input_2=r1,c01=reg128#1%bot->c01=reg128#1%top},[c01=d0->c01=d1},[c23=reg128#2%bot->c23=reg128#2%top},[c23=d2->c23=d3},[ptr=int32#3,ptr=r2,z12=reg128#3%bot->z12=reg128#3%top},[z12=d4->z12=d5},[ptr=int32#3,ptr=r2,z0=reg128#4%bot->z0=reg128#4%top},[z0=d6->z0=d7},[r3=reg128#5,r3=q4,input_2=int32#2,input_2=r1,ptr=int32#3,<5z34_stack=stack128#11 +# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160] +add r2,sp,#160 + +# qhasm: 5z34 aligned= mem128[ptr] +# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[5z34=d10->5z34=d11},[r0=reg128#8,r0=q7,r2=reg128#14,r2=q13,d01=reg128#12%bot->d01=reg128#12%top},[d01=d22->d01=d23},[r1=reg128#15,r1=q14,ptr=int32#3,<5z12_stack=stack128#10 +# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144] +add r2,sp,#144 + +# qhasm: 5z12 aligned= mem128[ptr] +# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[5z12=d0->5z12=d1},[d23=reg128#2%bot->d23=reg128#2%top},[d23=d2->d23=d3},[input_2=int32#2,input_2=r1,> 40 +# asm 1: vshr.u64 >v4=reg128#4,v4=q3,> 14; v23[3] = d23[2,3] unsigned>> 14 +# asm 1: vshrn.u64 > 26; v01[3] = d01[2,3] unsigned>> 26 +# asm 1: vshrn.u64 > 20; v23[1] = mid[2,3] unsigned>> 20 +# asm 1: vshrn.u64 ptr=int32#3,ptr=r2,y34=reg128#3%bot->y34=reg128#3%top},[y34=d4->y34=d5},[ptr=int32#3,ptr=r2,y12=reg128#2%bot->y12=reg128#2%top},[y12=d2->y12=d3},[ptr=int32#3,ptr=r2,y0=reg128#1%bot->y0=reg128#1%top},[y0=d0->y0=d1},[ptr=int32#3,<5y34_stack=stack128#6 +# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80] +add r2,sp,#80 + +# qhasm: 5y34 aligned= mem128[ptr] +# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[5y34=d24->5y34=d25},[ptr=int32#3,<5y12_stack=stack128#5 +# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64] +add r2,sp,#64 + +# qhasm: 5y12 aligned= mem128[ptr] +# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[5y12=d22->5y12=d23},[ptr=int32#3,ptr=r2,> 26 +# asm 1: vshr.u64 >t1=reg128#4,t1=q3,len=int32#4,len=r3,r0=reg128#6,r0=q5,r1=reg128#4,r1=q3,> 26 +# asm 1: vshr.u64 >t4=reg128#8,t4=q7,r3=reg128#5,r3=q4,x4=reg128#8,x4=q7,r4=reg128#16%bot->r4=reg128#16%top},[r4=d30->r4=d31},[> 26 +# asm 1: vshr.u64 >t2=reg128#9,t2=q8,r1=reg128#4,r1=q3,> 26 +# asm 1: vshr.u64 >t0=reg128#10,t0=q9,r2=reg128#9,r2=q8,x4=reg128#11,x4=q10,x01=reg128#6,x01=q5,r0=reg128#8%bot->r0=reg128#8%top},[r0=d14->r0=d15},[ptr=int32#3,ptr=r2,t0=reg128#10,t0=q9,> 26 +# asm 1: vshr.u64 >t3=reg128#14,t3=q13,x01=reg128#15,x01=q14,z34=reg128#6%bot->z34=reg128#6%top},[z34=d10->z34=d11},[x23=reg128#10,x23=q9,r3=reg128#5,r3=q4,input_2=int32#2,input_2=r1,> 26 +# asm 1: vshr.u64 >t1=reg128#14,t1=q13,x01=reg128#9,x01=q8,r1=reg128#4,r1=q3,> 26 +# asm 1: vshr.u64 >t4=reg128#14,t4=q13,r3=reg128#5,r3=q4,x4=reg128#11,x4=q10,? len - 64 +# asm 1: cmp +bhi ._mainloop2 + +# qhasm: input_2 -= 32 +# asm 1: sub >input_2=int32#3,input_2=r2,? len - 32 +# asm 1: cmp +bls ._end + +# qhasm: mainloop: +._mainloop: + +# qhasm: new r0 + +# qhasm: ptr = &two24 +# asm 1: lea >ptr=int32#2,ptr=r1,r4=reg128#5%bot->r4=reg128#5%top},[r4=d8->r4=d9},[u4=reg128#6%bot->u4=reg128#6%top},[u4=d10->u4=d11},[c01=reg128#8%bot->c01=reg128#8%top},[c01=d14->c01=d15},[c23=reg128#14%bot->c23=reg128#14%top},[c23=d26->c23=d27},[r0=reg128#4,r0=q3,r3=reg128#6,r3=q5,r1=reg128#14,r1=q13,r2=reg128#8,r2=q7,> 26 +# asm 1: vshr.u64 >t1=reg128#9,t1=q8,r0=reg128#4,r0=q3,r1=reg128#9,r1=q8,> 26 +# asm 1: vshr.u64 >t4=reg128#10,t4=q9,r3=reg128#6,r3=q5,r4=reg128#5,r4=q4,> 26 +# asm 1: vshr.u64 >t2=reg128#10,t2=q9,r1=reg128#11,r1=q10,> 26 +# asm 1: vshr.u64 >t0=reg128#9,t0=q8,r2=reg128#8,r2=q7,r4=reg128#5,r4=q4,r0=reg128#4,r0=q3,t0=reg128#9,t0=q8,> 26 +# asm 1: vshr.u64 >t3=reg128#14,t3=q13,r0=reg128#4,r0=q3,x23=reg128#10,x23=q9,r3=reg128#6,r3=q5,> 26 +# asm 1: vshr.u64 >t1=reg128#8,t1=q7,x01=reg128#9,x01=q8,r1=reg128#4,r1=q3,> 26 +# asm 1: vshr.u64 >t4=reg128#8,t4=q7,r3=reg128#6,r3=q5,x4=reg128#11,x4=q10,len=int32#4,len=r3,? len - 32 +# asm 1: cmp +bhi ._mainloop + +# qhasm: end: +._end: + +# qhasm: mem128[input_0] = x01;input_0+=16 +# asm 1: vst1.8 {len=int32#1,len=r0,mask=reg128#1,#0xffffffff +# asm 2: vmov.i64 >mask=q0,#0xffffffff +vmov.i64 q0,#0xffffffff + +# qhasm: y01 aligned= mem128[input_2];input_2+=16 +# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[y01=d2->y01=d3},[_5y01=reg128#3,_5y01=q2,y23=reg128#4%bot->y23=reg128#4%top},[y23=d6->y23=d7},[_5y23=reg128#9,_5y23=q8,_5y4=reg128#11,_5y4=q10,x01=reg128#12%bot->x01=reg128#12%top},[x01=d22->x01=d23},[_5y01=reg128#3,<_5y01=reg128#3,_5y01=q2,<_5y01=q2,x23=reg128#13%bot->x23=reg128#13%top},[x23=d24->x23=d25},[_5y23=reg128#9,<_5y23=reg128#9,_5y23=q8,<_5y23=q8,_5y4=reg128#11,<_5y4=reg128#11,_5y4=q10,<_5y4=q10,c01=reg128#14%bot->c01=reg128#14%top},[c01=d26->c01=d27},[x01=reg128#12,x01=q11,c23=reg128#14%bot->c23=reg128#14%top},[c23=d26->c23=d27},[x23=reg128#13,x23=q12,>=6 +# asm 1: vshr.u64 >mask=reg128#1,mask=q0,x4=reg128#14,x4=q13,r0=reg128#15,r0=q14,r1=reg128#3,r1=q2,r2=reg128#16,r2=q15,r3=reg128#9,r3=q8,r4=reg128#10,r4=q9,> 26 +# asm 1: vshr.u64 >t1=reg128#2,t1=q1,r0=reg128#4,r0=q3,r1=reg128#2,r1=q1,> 26 +# asm 1: vshr.u64 >t4=reg128#3,t4=q2,r3=reg128#9,r3=q8,r4=reg128#3,r4=q2,> 26 +# asm 1: vshr.u64 >t2=reg128#10,t2=q9,r1=reg128#2,r1=q1,> 26 +# asm 1: vshr.u64 >t0=reg128#11,t0=q10,r2=reg128#10,r2=q9,r4=reg128#3,r4=q2,r0=reg128#4,r0=q3,t0=reg128#11,t0=q10,> 26 +# asm 1: vshr.u64 >t3=reg128#12,t3=q11,r0=reg128#4,r0=q3,x23=reg128#10,x23=q9,r3=reg128#9,r3=q8,> 26 +# asm 1: vshr.u64 >t1=reg128#11,t1=q10,x01=reg128#4,x01=q3,r1=reg128#2,r1=q1,> 26 +# asm 1: vshr.u64 >t4=reg128#11,t4=q10,r3=reg128#1,r3=q0,x4=reg128#3,x4=q2, + + +#if !defined(OPENSSL_WINDOWS) && defined(OPENSSL_X86_64) + +#include + +#define ALIGN(x) __attribute__((aligned(x))) +/* inline is not a keyword in C89. */ +#define INLINE +#define U8TO64_LE(m) (*(uint64_t *)(m)) +#define U8TO32_LE(m) (*(uint32_t *)(m)) +#define U64TO8_LE(m, v) (*(uint64_t *)(m)) = v + +typedef __m128i xmmi; +typedef unsigned __int128 uint128_t; + +static const uint32_t ALIGN(16) poly1305_x64_sse2_message_mask[4] = { + (1 << 26) - 1, 0, (1 << 26) - 1, 0}; +static const uint32_t ALIGN(16) poly1305_x64_sse2_5[4] = {5, 0, 5, 0}; +static const uint32_t ALIGN(16) poly1305_x64_sse2_1shl128[4] = {(1 << 24), 0, + (1 << 24), 0}; + +static uint128_t INLINE add128(uint128_t a, uint128_t b) { return a + b; } + +static uint128_t INLINE add128_64(uint128_t a, uint64_t b) { return a + b; } + +static uint128_t INLINE mul64x64_128(uint64_t a, uint64_t b) { + return (uint128_t)a * b; +} + +static uint64_t INLINE lo128(uint128_t a) { return (uint64_t)a; } + +static uint64_t INLINE shr128(uint128_t v, const int shift) { + return (uint64_t)(v >> shift); +} + +static uint64_t INLINE shr128_pair(uint64_t hi, uint64_t lo, const int shift) { + return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift); +} + +typedef struct poly1305_power_t { + union { + xmmi v; + uint64_t u[2]; + uint32_t d[4]; + } R20, R21, R22, R23, R24, S21, S22, S23, S24; +} poly1305_power; + +typedef struct poly1305_state_internal_t { + poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 + bytes of free storage */ + union { + xmmi H[5]; /* 80 bytes */ + uint64_t HH[10]; + }; + /* uint64_t r0,r1,r2; [24 bytes] */ + /* uint64_t pad0,pad1; [16 bytes] */ + uint64_t started; /* 8 bytes */ + uint64_t leftover; /* 8 bytes */ + uint8_t buffer[64]; /* 64 bytes */ +} poly1305_state_internal; /* 448 bytes total + 63 bytes for + alignment = 511 bytes raw */ + +static poly1305_state_internal INLINE *poly1305_aligned_state( + poly1305_state *state) { + return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63); +} + +/* copy 0-63 bytes */ +static void INLINE +poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { + size_t offset = src - dst; + if (bytes & 32) { + _mm_storeu_si128((xmmi *)(dst + 0), + _mm_loadu_si128((xmmi *)(dst + offset + 0))); + _mm_storeu_si128((xmmi *)(dst + 16), + _mm_loadu_si128((xmmi *)(dst + offset + 16))); + dst += 32; + } + if (bytes & 16) { + _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((xmmi *)(dst + offset))); + dst += 16; + } + if (bytes & 8) { + *(uint64_t *)dst = *(uint64_t *)(dst + offset); + dst += 8; + } + if (bytes & 4) { + *(uint32_t *)dst = *(uint32_t *)(dst + offset); + dst += 4; + } + if (bytes & 2) { + *(uint16_t *)dst = *(uint16_t *)(dst + offset); + dst += 2; + } + if (bytes & 1) { + *(uint8_t *)dst = *(uint8_t *)(dst + offset); + } +} + +/* zero 0-15 bytes */ +static void INLINE poly1305_block_zero(uint8_t *dst, size_t bytes) { + if (bytes & 8) { + *(uint64_t *)dst = 0; + dst += 8; + } + if (bytes & 4) { + *(uint32_t *)dst = 0; + dst += 4; + } + if (bytes & 2) { + *(uint16_t *)dst = 0; + dst += 2; + } + if (bytes & 1) { + *(uint8_t *)dst = 0; + } +} + +static size_t INLINE poly1305_min(size_t a, size_t b) { + return (a < b) ? a : b; +} + +void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { + poly1305_state_internal *st = poly1305_aligned_state(state); + poly1305_power *p; + uint64_t r0, r1, r2; + uint64_t t0, t1; + + /* clamp key */ + t0 = U8TO64_LE(key + 0); + t1 = U8TO64_LE(key + 8); + r0 = t0 & 0xffc0fffffff; + t0 >>= 44; + t0 |= t1 << 20; + r1 = t0 & 0xfffffc0ffff; + t1 >>= 24; + r2 = t1 & 0x00ffffffc0f; + + /* store r in un-used space of st->P[1] */ + p = &st->P[1]; + p->R20.d[1] = (uint32_t)(r0); + p->R20.d[3] = (uint32_t)(r0 >> 32); + p->R21.d[1] = (uint32_t)(r1); + p->R21.d[3] = (uint32_t)(r1 >> 32); + p->R22.d[1] = (uint32_t)(r2); + p->R22.d[3] = (uint32_t)(r2 >> 32); + + /* store pad */ + p->R23.d[1] = U8TO32_LE(key + 16); + p->R23.d[3] = U8TO32_LE(key + 20); + p->R24.d[1] = U8TO32_LE(key + 24); + p->R24.d[3] = U8TO32_LE(key + 28); + + /* H = 0 */ + st->H[0] = _mm_setzero_si128(); + st->H[1] = _mm_setzero_si128(); + st->H[2] = _mm_setzero_si128(); + st->H[3] = _mm_setzero_si128(); + st->H[4] = _mm_setzero_si128(); + + st->started = 0; + st->leftover = 0; +} + +static void poly1305_first_block(poly1305_state_internal *st, + const uint8_t *m) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + xmmi T5, T6; + poly1305_power *p; + uint128_t d[3]; + uint64_t r0, r1, r2; + uint64_t r20, r21, r22, s22; + uint64_t pad0, pad1; + uint64_t c; + uint64_t i; + + /* pull out stored info */ + p = &st->P[1]; + + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; + pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; + + /* compute powers r^2,r^4 */ + r20 = r0; + r21 = r1; + r22 = r2; + for (i = 0; i < 2; i++) { + s22 = r22 * (5 << 2); + + d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22)); + d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21)); + d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20)); + + r20 = lo128(d[0]) & 0xfffffffffff; + c = shr128(d[0], 44); + d[1] = add128_64(d[1], c); + r21 = lo128(d[1]) & 0xfffffffffff; + c = shr128(d[1], 44); + d[2] = add128_64(d[2], c); + r22 = lo128(d[2]) & 0x3ffffffffff; + c = shr128(d[2], 42); + r20 += c * 5; + c = (r20 >> 44); + r20 = r20 & 0xfffffffffff; + r21 += c; + + p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)(r20)&0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R21.v = _mm_shuffle_epi32( + _mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R22.v = + _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R23.v = _mm_shuffle_epi32( + _mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), + _MM_SHUFFLE(1, 0, 1, 0)); + p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16))), + _MM_SHUFFLE(1, 0, 1, 0)); + p->S21.v = _mm_mul_epu32(p->R21.v, FIVE); + p->S22.v = _mm_mul_epu32(p->R22.v, FIVE); + p->S23.v = _mm_mul_epu32(p->R23.v, FIVE); + p->S24.v = _mm_mul_epu32(p->R24.v, FIVE); + p--; + } + + /* put saved info back */ + p = &st->P[1]; + p->R20.d[1] = (uint32_t)(r0); + p->R20.d[3] = (uint32_t)(r0 >> 32); + p->R21.d[1] = (uint32_t)(r1); + p->R21.d[3] = (uint32_t)(r1 >> 32); + p->R22.d[1] = (uint32_t)(r2); + p->R22.d[3] = (uint32_t)(r2 >> 32); + p->R23.d[1] = (uint32_t)(pad0); + p->R23.d[3] = (uint32_t)(pad0 >> 32); + p->R24.d[1] = (uint32_t)(pad1); + p->R24.d[3] = (uint32_t)(pad1 >> 32); + + /* H = [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + st->H[0] = _mm_and_si128(MMASK, T5); + st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + st->H[2] = _mm_and_si128(MMASK, T5); + st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); +} + +static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, + size_t bytes) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + + poly1305_power *p; + xmmi H0, H1, H2, H3, H4; + xmmi T0, T1, T2, T3, T4, T5, T6; + xmmi M0, M1, M2, M3, M4; + xmmi C1, C2; + + H0 = st->H[0]; + H1 = st->H[1]; + H2 = st->H[2]; + H3 = st->H[3]; + H4 = st->H[4]; + + while (bytes >= 64) { + /* H *= [r^4,r^4] */ + p = &st->P[0]; + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My]*[r^2,r^2] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + p = &st->P[1]; + T5 = _mm_mul_epu32(M0, p->R20.v); + T6 = _mm_mul_epu32(M0, p->R21.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M1, p->S24.v); + T6 = _mm_mul_epu32(M1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M2, p->S23.v); + T6 = _mm_mul_epu32(M2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M3, p->S22.v); + T6 = _mm_mul_epu32(M3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M4, p->S21.v); + T6 = _mm_mul_epu32(M4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(M0, p->R22.v); + T6 = _mm_mul_epu32(M0, p->R23.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M1, p->R21.v); + T6 = _mm_mul_epu32(M1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M2, p->R20.v); + T6 = _mm_mul_epu32(M2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M3, p->S24.v); + T6 = _mm_mul_epu32(M3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M4, p->S23.v); + T6 = _mm_mul_epu32(M4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(M0, p->R24.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(M4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), + _mm_loadl_epi64((xmmi *)(m + 48))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), + _mm_loadl_epi64((xmmi *)(m + 56))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + T0 = _mm_add_epi64(T0, M0); + T1 = _mm_add_epi64(T1, M1); + T2 = _mm_add_epi64(T2, M2); + T3 = _mm_add_epi64(T3, M3); + T4 = _mm_add_epi64(T4, M4); + + /* reduce */ + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */ + H0 = T0; + H1 = T1; + H2 = T2; + H3 = T3; + H4 = T4; + + m += 64; + bytes -= 64; + } + + st->H[0] = H0; + st->H[1] = H1; + st->H[2] = H2; + st->H[3] = H3; + st->H[4] = H4; +} + +static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, + size_t bytes) { + const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); + const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + + poly1305_power *p; + xmmi H0, H1, H2, H3, H4; + xmmi M0, M1, M2, M3, M4; + xmmi T0, T1, T2, T3, T4, T5, T6; + xmmi C1, C2; + + uint64_t r0, r1, r2; + uint64_t t0, t1, t2, t3, t4; + uint64_t c; + size_t consumed = 0; + + H0 = st->H[0]; + H1 = st->H[1]; + H2 = st->H[2]; + H3 = st->H[3]; + H4 = st->H[4]; + + /* p = [r^2,r^2] */ + p = &st->P[1]; + + if (bytes >= 32) { + /* H *= [r^2,r^2] */ + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + /* H += [Mx,My] */ + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), + _mm_loadl_epi64((xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), + _mm_loadl_epi64((xmmi *)(m + 24))); + M0 = _mm_and_si128(MMASK, T5); + M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); + M2 = _mm_and_si128(MMASK, T5); + M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); + M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); + + T0 = _mm_add_epi64(T0, M0); + T1 = _mm_add_epi64(T1, M1); + T2 = _mm_add_epi64(T2, M2); + T3 = _mm_add_epi64(T3, M3); + T4 = _mm_add_epi64(T4, M4); + + /* reduce */ + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = (H*[r^2,r^2] + [Mx,My]) */ + H0 = T0; + H1 = T1; + H2 = T2; + H3 = T3; + H4 = T4; + + consumed = 32; + } + + /* finalize, H *= [r^2,r] */ + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + + p->R20.d[2] = (uint32_t)(r0)&0x3ffffff; + p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff; + p->R22.d[2] = (uint32_t)((r1 >> 8)) & 0x3ffffff; + p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff; + p->R24.d[2] = (uint32_t)((r2 >> 16)); + p->S21.d[2] = p->R21.d[2] * 5; + p->S22.d[2] = p->R22.d[2] * 5; + p->S23.d[2] = p->R23.d[2] * 5; + p->S24.d[2] = p->R24.d[2] * 5; + + /* H *= [r^2,r] */ + T0 = _mm_mul_epu32(H0, p->R20.v); + T1 = _mm_mul_epu32(H0, p->R21.v); + T2 = _mm_mul_epu32(H0, p->R22.v); + T3 = _mm_mul_epu32(H0, p->R23.v); + T4 = _mm_mul_epu32(H0, p->R24.v); + T5 = _mm_mul_epu32(H1, p->S24.v); + T6 = _mm_mul_epu32(H1, p->R20.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H2, p->S23.v); + T6 = _mm_mul_epu32(H2, p->S24.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H3, p->S22.v); + T6 = _mm_mul_epu32(H3, p->S23.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H4, p->S21.v); + T6 = _mm_mul_epu32(H4, p->S22.v); + T0 = _mm_add_epi64(T0, T5); + T1 = _mm_add_epi64(T1, T6); + T5 = _mm_mul_epu32(H1, p->R21.v); + T6 = _mm_mul_epu32(H1, p->R22.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H2, p->R20.v); + T6 = _mm_mul_epu32(H2, p->R21.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H3, p->S24.v); + T6 = _mm_mul_epu32(H3, p->R20.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H4, p->S23.v); + T6 = _mm_mul_epu32(H4, p->S24.v); + T2 = _mm_add_epi64(T2, T5); + T3 = _mm_add_epi64(T3, T6); + T5 = _mm_mul_epu32(H1, p->R23.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H2, p->R22.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H3, p->R21.v); + T4 = _mm_add_epi64(T4, T5); + T5 = _mm_mul_epu32(H4, p->R20.v); + T4 = _mm_add_epi64(T4, T5); + + C1 = _mm_srli_epi64(T0, 26); + C2 = _mm_srli_epi64(T3, 26); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_and_si128(T3, MMASK); + T1 = _mm_add_epi64(T1, C1); + T4 = _mm_add_epi64(T4, C2); + C1 = _mm_srli_epi64(T1, 26); + C2 = _mm_srli_epi64(T4, 26); + T1 = _mm_and_si128(T1, MMASK); + T4 = _mm_and_si128(T4, MMASK); + T2 = _mm_add_epi64(T2, C1); + T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); + C1 = _mm_srli_epi64(T2, 26); + C2 = _mm_srli_epi64(T0, 26); + T2 = _mm_and_si128(T2, MMASK); + T0 = _mm_and_si128(T0, MMASK); + T3 = _mm_add_epi64(T3, C1); + T1 = _mm_add_epi64(T1, C2); + C1 = _mm_srli_epi64(T3, 26); + T3 = _mm_and_si128(T3, MMASK); + T4 = _mm_add_epi64(T4, C1); + + /* H = H[0]+H[1] */ + H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8)); + H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8)); + H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8)); + H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8)); + H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8)); + + t0 = _mm_cvtsi128_si32(H0); + c = (t0 >> 26); + t0 &= 0x3ffffff; + t1 = _mm_cvtsi128_si32(H1) + c; + c = (t1 >> 26); + t1 &= 0x3ffffff; + t2 = _mm_cvtsi128_si32(H2) + c; + c = (t2 >> 26); + t2 &= 0x3ffffff; + t3 = _mm_cvtsi128_si32(H3) + c; + c = (t3 >> 26); + t3 &= 0x3ffffff; + t4 = _mm_cvtsi128_si32(H4) + c; + c = (t4 >> 26); + t4 &= 0x3ffffff; + t0 = t0 + (c * 5); + c = (t0 >> 26); + t0 &= 0x3ffffff; + t1 = t1 + c; + + st->HH[0] = ((t0) | (t1 << 26)) & 0xfffffffffffull; + st->HH[1] = ((t1 >> 18) | (t2 << 8) | (t3 << 34)) & 0xfffffffffffull; + st->HH[2] = ((t3 >> 10) | (t4 << 16)) & 0x3ffffffffffull; + + return consumed; +} + +void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, + size_t bytes) { + poly1305_state_internal *st = poly1305_aligned_state(state); + size_t want; + + /* need at least 32 initial bytes to start the accelerated branch */ + if (!st->started) { + if ((st->leftover == 0) && (bytes > 32)) { + poly1305_first_block(st, m); + m += 32; + bytes -= 32; + } else { + want = poly1305_min(32 - st->leftover, bytes); + poly1305_block_copy(st->buffer + st->leftover, m, want); + bytes -= want; + m += want; + st->leftover += want; + if ((st->leftover < 32) || (bytes == 0)) + return; + poly1305_first_block(st, st->buffer); + st->leftover = 0; + } + st->started = 1; + } + + /* handle leftover */ + if (st->leftover) { + want = poly1305_min(64 - st->leftover, bytes); + poly1305_block_copy(st->buffer + st->leftover, m, want); + bytes -= want; + m += want; + st->leftover += want; + if (st->leftover < 64) + return; + poly1305_blocks(st, st->buffer, 64); + st->leftover = 0; + } + + /* process 64 byte blocks */ + if (bytes >= 64) { + want = (bytes & ~63); + poly1305_blocks(st, m, want); + m += want; + bytes -= want; + } + + if (bytes) { + poly1305_block_copy(st->buffer + st->leftover, m, bytes); + st->leftover += bytes; + } +} + +void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { + poly1305_state_internal *st = poly1305_aligned_state(state); + size_t leftover = st->leftover; + uint8_t *m = st->buffer; + uint128_t d[3]; + uint64_t h0, h1, h2; + uint64_t t0, t1; + uint64_t g0, g1, g2, c, nc; + uint64_t r0, r1, r2, s1, s2; + poly1305_power *p; + + if (st->started) { + size_t consumed = poly1305_combine(st, m, leftover); + leftover -= consumed; + m += consumed; + } + + /* st->HH will either be 0 or have the combined result */ + h0 = st->HH[0]; + h1 = st->HH[1]; + h2 = st->HH[2]; + + p = &st->P[1]; + r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; + r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; + r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; + s1 = r1 * (5 << 2); + s2 = r2 * (5 << 2); + + if (leftover < 16) + goto poly1305_donna_atmost15bytes; + +poly1305_donna_atleast16bytes: + t0 = U8TO64_LE(m + 0); + t1 = U8TO64_LE(m + 8); + h0 += t0 & 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += t0 & 0xfffffffffff; + h2 += (t1 >> 24) | ((uint64_t)1 << 40); + +poly1305_donna_mul: + d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), + mul64x64_128(h2, s1)); + d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), + mul64x64_128(h2, s2)); + d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), + mul64x64_128(h2, r0)); + h0 = lo128(d[0]) & 0xfffffffffff; + c = shr128(d[0], 44); + d[1] = add128_64(d[1], c); + h1 = lo128(d[1]) & 0xfffffffffff; + c = shr128(d[1], 44); + d[2] = add128_64(d[2], c); + h2 = lo128(d[2]) & 0x3ffffffffff; + c = shr128(d[2], 42); + h0 += c * 5; + + m += 16; + leftover -= 16; + if (leftover >= 16) + goto poly1305_donna_atleast16bytes; + +/* final bytes */ +poly1305_donna_atmost15bytes: + if (!leftover) + goto poly1305_donna_finish; + + m[leftover++] = 1; + poly1305_block_zero(m + leftover, 16 - leftover); + leftover = 16; + + t0 = U8TO64_LE(m + 0); + t1 = U8TO64_LE(m + 8); + h0 += t0 & 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += t0 & 0xfffffffffff; + h2 += (t1 >> 24); + + goto poly1305_donna_mul; + +poly1305_donna_finish: + c = (h0 >> 44); + h0 &= 0xfffffffffff; + h1 += c; + c = (h1 >> 44); + h1 &= 0xfffffffffff; + h2 += c; + c = (h2 >> 42); + h2 &= 0x3ffffffffff; + h0 += c * 5; + + g0 = h0 + 5; + c = (g0 >> 44); + g0 &= 0xfffffffffff; + g1 = h1 + c; + c = (g1 >> 44); + g1 &= 0xfffffffffff; + g2 = h2 + c - ((uint64_t)1 << 42); + + c = (g2 >> 63) - 1; + nc = ~c; + h0 = (h0 & nc) | (g0 & c); + h1 = (h1 & nc) | (g1 & c); + h2 = (h2 & nc) | (g2 & c); + + /* pad */ + t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; + t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; + h0 += (t0 & 0xfffffffffff); + c = (h0 >> 44); + h0 &= 0xfffffffffff; + t0 = shr128_pair(t1, t0, 44); + h1 += (t0 & 0xfffffffffff) + c; + c = (h1 >> 44); + h1 &= 0xfffffffffff; + t1 = (t1 >> 24); + h2 += (t1)+c; + + U64TO8_LE(mac + 0, ((h0) | (h1 << 44))); + U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24))); +} + +#endif /* !OPENSSL_WINDOWS && OPENSSL_X86_64 */ diff --git a/include/openssl/chacha.h b/include/openssl/chacha.h new file mode 120000 index 00000000..d42715f7 --- /dev/null +++ b/include/openssl/chacha.h @@ -0,0 +1 @@ +../../crypto/chacha/chacha.h \ No newline at end of file diff --git a/include/openssl/poly1305.h b/include/openssl/poly1305.h new file mode 120000 index 00000000..e0985cd4 --- /dev/null +++ b/include/openssl/poly1305.h @@ -0,0 +1 @@ +../../crypto/poly1305/poly1305.h \ No newline at end of file diff --git a/ssl/s3_lib.c b/ssl/s3_lib.c index 96b3a01e..ae73161c 100644 --- a/ssl/s3_lib.c +++ b/ssl/s3_lib.c @@ -1605,7 +1605,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -1637,7 +1637,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -1669,7 +1669,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -1701,7 +1701,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -1733,7 +1733,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -1765,7 +1765,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -2349,7 +2349,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -2381,7 +2381,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -2413,7 +2413,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -2445,7 +2445,7 @@ SSL_CIPHER ssl3_ciphers[]={ SSL_AEAD, SSL_TLSV1_2, SSL_NOT_EXP|SSL_HIGH|SSL_FIPS, - SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4), + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(4)|SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD, 128, 128, }, @@ -2529,6 +2529,51 @@ SSL_CIPHER ssl3_ciphers[]={ }, #endif + { + 1, + TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS1_CK_ECDHE_RSA_CHACHA20_POLY1305, + SSL_kEECDH, + SSL_aRSA, + SSL_CHACHA20POLY1305, + SSL_AEAD, + SSL_TLSV1_2, + SSL_NOT_EXP|SSL_HIGH, + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(0), + 256, + 0, + }, + + { + 1, + TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + TLS1_CK_ECDHE_ECDSA_CHACHA20_POLY1305, + SSL_kEECDH, + SSL_aECDSA, + SSL_CHACHA20POLY1305, + SSL_AEAD, + SSL_TLSV1_2, + SSL_NOT_EXP|SSL_HIGH, + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(0), + 256, + 0, + }, + + { + 1, + TLS1_TXT_DHE_RSA_WITH_CHACHA20_POLY1305, + TLS1_CK_DHE_RSA_CHACHA20_POLY1305, + SSL_kEDH, + SSL_aRSA, + SSL_CHACHA20POLY1305, + SSL_AEAD, + SSL_TLSV1_2, + SSL_NOT_EXP|SSL_HIGH, + SSL_HANDSHAKE_MAC_SHA256|TLS1_PRF_SHA256|SSL_CIPHER_ALGORITHM2_AEAD|FIXED_NONCE_LEN(0), + 256, + 0, + }, + /* end of list */ }; diff --git a/ssl/s3_pkt.c b/ssl/s3_pkt.c index c244724b..563f96ea 100644 --- a/ssl/s3_pkt.c +++ b/ssl/s3_pkt.c @@ -765,8 +765,11 @@ static int do_ssl3_write(SSL *s, int type, const unsigned char *buf, else eivlen = 0; } - else if (s->aead_write_ctx != NULL) + else if (s->aead_write_ctx != NULL && + s->aead_write_ctx->variable_nonce_included_in_record) + { eivlen = s->aead_write_ctx->variable_nonce_len; + } else eivlen = 0; diff --git a/ssl/ssl.h b/ssl/ssl.h index 5c0626c4..bf978a03 100644 --- a/ssl/ssl.h +++ b/ssl/ssl.h @@ -281,6 +281,7 @@ extern "C" { #define SSL_TXT_CAMELLIA128 "CAMELLIA128" #define SSL_TXT_CAMELLIA256 "CAMELLIA256" #define SSL_TXT_CAMELLIA "CAMELLIA" +#define SSL_TXT_CHACHA20 "CHACHA20" #define SSL_TXT_MD5 "MD5" #define SSL_TXT_SHA1 "SHA1" @@ -2258,6 +2259,7 @@ const char *SSL_get_version(const SSL *s); int SSL_CIPHER_is_AES(const SSL_CIPHER *c); int SSL_CIPHER_has_MD5_HMAC(const SSL_CIPHER *c); int SSL_CIPHER_is_AESGCM(const SSL_CIPHER *c); +int SSL_CIPHER_is_CHACHA20POLY1305(const SSL_CIPHER *c); /* This sets the 'default' SSL version that SSL_new() will create */ int SSL_CTX_set_ssl_version(SSL_CTX *ctx, const SSL_METHOD *meth); diff --git a/ssl/ssl_ciph.c b/ssl/ssl_ciph.c index e2350205..2cee44cf 100644 --- a/ssl/ssl_ciph.c +++ b/ssl/ssl_ciph.c @@ -296,6 +296,7 @@ static const SSL_CIPHER cipher_aliases[]={ {0,SSL_TXT_CAMELLIA128,0,0,0,SSL_CAMELLIA128,0,0,0,0,0,0}, {0,SSL_TXT_CAMELLIA256,0,0,0,SSL_CAMELLIA256,0,0,0,0,0,0}, {0,SSL_TXT_CAMELLIA ,0,0,0,SSL_CAMELLIA128|SSL_CAMELLIA256,0,0,0,0,0,0}, + {0,SSL_TXT_CHACHA20 ,0,0,0,SSL_CHACHA20POLY1305,0,0,0,0,0,0}, /* MAC aliases */ {0,SSL_TXT_MD5,0, 0,0,0,SSL_MD5, 0,0,0,0,0}, @@ -385,9 +386,15 @@ int ssl_cipher_get_evp_aead(const SSL_SESSION *s, const EVP_AEAD **aead) return 0; #ifndef OPENSSL_NO_AES - /* There is only one AEAD for now. */ - *aead = EVP_aead_aes_128_gcm(); - return 1; + switch (c->algorithm_enc) + { + case SSL_AES128GCM: + *aead = EVP_aead_aes_128_gcm(); + return 1; + case SSL_CHACHA20POLY1305: + *aead = EVP_aead_chacha20_poly1305(); + return 1; + } #endif return 0; @@ -1621,6 +1628,9 @@ char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, int len) case SSL_SEED: enc="SEED(128)"; break; + case SSL_CHACHA20POLY1305: + enc="ChaCha20-Poly1305"; + break; default: enc="unknown"; break; @@ -1681,6 +1691,11 @@ int SSL_CIPHER_is_AESGCM(const SSL_CIPHER *c) return (c->algorithm_mac & (SSL_AES128GCM|SSL_AES256GCM)) != 0; } +int SSL_CIPHER_is_CHACHA20POLY1305(const SSL_CIPHER *c) + { + return (c->algorithm_enc & SSL_CHACHA20POLY1305) != 0; + } + char *SSL_CIPHER_get_version(const SSL_CIPHER *c) { int i; diff --git a/ssl/ssl_locl.h b/ssl/ssl_locl.h index 22637a1c..ed25099d 100644 --- a/ssl/ssl_locl.h +++ b/ssl/ssl_locl.h @@ -316,6 +316,7 @@ #define SSL_SEED 0x00000800L #define SSL_AES128GCM 0x00001000L #define SSL_AES256GCM 0x00002000L +#define SSL_CHACHA20POLY1305 0x00004000L #define SSL_AES (SSL_AES128|SSL_AES256|SSL_AES128GCM|SSL_AES256GCM) #define SSL_CAMELLIA (SSL_CAMELLIA128|SSL_CAMELLIA256) @@ -377,6 +378,12 @@ #define SSL_CIPHER_AEAD_FIXED_NONCE_LEN(ssl_cipher) \ (((ssl_cipher->algorithm2 >> 24) & 0xf)*2) +/* SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD is a flag in + * SSL_CIPHER.algorithm2 which indicates that the variable part of the nonce is + * included as a prefix of the record. (AES-GCM, for example, does with with an + * 8-byte variable nonce.) */ +#define SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD (1<<22) + /* * Export and cipher strength information. For each cipher we have to decide * whether it is exportable or not. This information is likely to change @@ -747,6 +754,9 @@ struct ssl_aead_ctx_st * records. */ unsigned char fixed_nonce[8]; unsigned char fixed_nonce_len, variable_nonce_len, tag_len; + /* variable_nonce_included_in_record is non-zero if the variable nonce + * for a record is included as a prefix before the ciphertext. */ + char variable_nonce_included_in_record; }; #ifndef OPENSSL_NO_COMP diff --git a/ssl/t1_enc.c b/ssl/t1_enc.c index 58f8bf89..78975191 100644 --- a/ssl/t1_enc.c +++ b/ssl/t1_enc.c @@ -361,6 +361,8 @@ static int tls1_change_cipher_state_aead(SSL *s, char is_read, memcpy(aead_ctx->fixed_nonce, iv, iv_len); aead_ctx->fixed_nonce_len = iv_len; aead_ctx->variable_nonce_len = 8; /* always the case, currently. */ + aead_ctx->variable_nonce_included_in_record = + (s->s3->tmp.new_cipher->algorithm2 & SSL_CIPHER_ALGORITHM2_VARIABLE_NONCE_INCLUDED_IN_RECORD) != 0; if (aead_ctx->variable_nonce_len + aead_ctx->fixed_nonce_len != EVP_AEAD_nonce_length(aead)) { OPENSSL_PUT_ERROR(SSL, tls1_change_cipher_state_aead, ERR_R_INTERNAL_ERROR); @@ -822,6 +824,7 @@ int tls1_enc(SSL *s, int send) if (send) { size_t len = rec->length; + size_t eivlen = 0; in = rec->input; out = rec->data; @@ -837,22 +840,27 @@ int tls1_enc(SSL *s, int send) * variable nonce. Thus we can copy the sequence number * bytes into place without overwriting any of the * plaintext. */ - memcpy(out, ad, aead->variable_nonce_len); - len -= aead->variable_nonce_len; + if (aead->variable_nonce_included_in_record) + { + memcpy(out, ad, aead->variable_nonce_len); + len -= aead->variable_nonce_len; + eivlen = aead->variable_nonce_len; + } ad[11] = len >> 8; ad[12] = len & 0xff; if (!EVP_AEAD_CTX_seal( &aead->ctx, - out + aead->variable_nonce_len, &n, len + aead->tag_len, + out + eivlen, &n, len + aead->tag_len, nonce, nonce_used, - in + aead->variable_nonce_len, len, + in + eivlen, len, ad, sizeof(ad))) { return -1; } - n += aead->variable_nonce_len; + if (aead->variable_nonce_included_in_record) + n += aead->variable_nonce_len; } else { @@ -865,12 +873,17 @@ int tls1_enc(SSL *s, int send) if (len < aead->variable_nonce_len) return 0; - memcpy(nonce + nonce_used, in, aead->variable_nonce_len); + memcpy(nonce + nonce_used, + aead->variable_nonce_included_in_record ? in : ad, + aead->variable_nonce_len); nonce_used += aead->variable_nonce_len; - in += aead->variable_nonce_len; - len -= aead->variable_nonce_len; - out += aead->variable_nonce_len; + if (aead->variable_nonce_included_in_record) + { + in += aead->variable_nonce_len; + len -= aead->variable_nonce_len; + out += aead->variable_nonce_len; + } if (len < aead->tag_len) return 0; diff --git a/ssl/tls1.h b/ssl/tls1.h index 75de4c4f..d09a6e3d 100644 --- a/ssl/tls1.h +++ b/ssl/tls1.h @@ -567,6 +567,10 @@ SSL_CTX_ctrl(ctx, SSL_CTRL_SET_TLSEXT_AUTHZ_SERVER_AUDIT_PROOF_CB_ARG, 0, arg); #define TLS1_CK_ECDH_RSA_WITH_AES_128_GCM_SHA256 0x0300C031 #define TLS1_CK_ECDH_RSA_WITH_AES_256_GCM_SHA384 0x0300C032 +#define TLS1_CK_ECDHE_RSA_CHACHA20_POLY1305 0x0300CC13 +#define TLS1_CK_ECDHE_ECDSA_CHACHA20_POLY1305 0x0300CC14 +#define TLS1_CK_DHE_RSA_CHACHA20_POLY1305 0x0300CC15 + /* XXX * Inconsistency alert: * The OpenSSL names of ciphers with ephemeral DH here include the string @@ -718,6 +722,10 @@ SSL_CTX_ctrl(ctx, SSL_CTRL_SET_TLSEXT_AUTHZ_SERVER_AUDIT_PROOF_CB_ARG, 0, arg); #define TLS1_TXT_ECDH_RSA_WITH_AES_128_GCM_SHA256 "ECDH-RSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDH_RSA_WITH_AES_256_GCM_SHA384 "ECDH-RSA-AES256-GCM-SHA384" +#define TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305 "ECDHE-RSA-CHACHA20-POLY1305" +#define TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 "ECDHE-ECDSA-CHACHA20-POLY1305" +#define TLS1_TXT_DHE_RSA_WITH_CHACHA20_POLY1305 "DHE-RSA-CHACHA20-POLY1305" + #define TLS_CT_RSA_SIGN 1 #define TLS_CT_DSS_SIGN 2 #define TLS_CT_RSA_FIXED_DH 3 diff --git a/util/all_tests.sh b/util/all_tests.sh index 8f7071e3..6932a512 100644 --- a/util/all_tests.sh +++ b/util/all_tests.sh @@ -3,8 +3,10 @@ TESTS=" ./crypto/cipher/aead_test aes-128-gcm ../crypto/cipher/aes_128_gcm_tests.txt ./crypto/cipher/aead_test aes-256-gcm ../crypto/cipher/aes_256_gcm_tests.txt +./crypto/cipher/aead_test chacha20-poly1305 ../crypto/cipher/chacha20_poly1305_tests.txt ./crypto/bio/bio_test ./crypto/bn/bn_test +./crypto/cipher/cipher_test ../crypto/cipher/cipher_test.txt ./crypto/dh/dh_test ./crypto/dsa/dsa_test ./crypto/err/err_test