2020-10-27 00:05:07 +00:00
|
|
|
#include "aes256ctr.h"
|
|
|
|
#include <immintrin.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
2020-10-27 13:48:42 +00:00
|
|
|
/* Based heavily on public-domain code by Romain Dolbeau
|
|
|
|
* Different handling of nonce+counter than original version using
|
|
|
|
* separated 64-bit nonce and internal 64-bit counter, starting from zero
|
|
|
|
* Public Domain */
|
2019-09-17 13:02:01 +01:00
|
|
|
|
|
|
|
|
2020-10-27 13:48:42 +00:00
|
|
|
static inline void aesni_encrypt4(uint8_t out[64], __m128i *n, const __m128i rkeys[16]) {
|
|
|
|
__m128i f, f0, f1, f2, f3;
|
|
|
|
const __m128i idx = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0);
|
2019-09-17 13:02:01 +01:00
|
|
|
|
|
|
|
/* Load current counter value */
|
2020-07-31 07:17:42 +01:00
|
|
|
f = _mm_load_si128(n);
|
|
|
|
|
|
|
|
/* Increase counter in 4 consecutive blocks */
|
2020-10-27 13:48:42 +00:00
|
|
|
f0 = _mm_shuffle_epi8(_mm_add_epi64(f, _mm_set_epi64x(0, 0)), idx);
|
|
|
|
f1 = _mm_shuffle_epi8(_mm_add_epi64(f, _mm_set_epi64x(1, 0)), idx);
|
|
|
|
f2 = _mm_shuffle_epi8(_mm_add_epi64(f, _mm_set_epi64x(2, 0)), idx);
|
|
|
|
f3 = _mm_shuffle_epi8(_mm_add_epi64(f, _mm_set_epi64x(3, 0)), idx);
|
2020-07-31 07:17:42 +01:00
|
|
|
|
|
|
|
/* Write counter for next iteration, increased by 4 */
|
|
|
|
_mm_store_si128(n, _mm_add_epi64(f, _mm_set_epi64x(4, 0)));
|
|
|
|
|
|
|
|
/* Actual AES encryption, 4x interleaved */
|
2020-10-27 13:48:42 +00:00
|
|
|
f = _mm_load_si128(&rkeys[0]);
|
|
|
|
f0 = _mm_xor_si128(f0, f);
|
|
|
|
f1 = _mm_xor_si128(f1, f);
|
|
|
|
f2 = _mm_xor_si128(f2, f);
|
|
|
|
f3 = _mm_xor_si128(f3, f);
|
2020-07-31 07:17:42 +01:00
|
|
|
|
|
|
|
for (int i = 1; i < 14; i++) {
|
2020-10-27 13:48:42 +00:00
|
|
|
f = _mm_load_si128(&rkeys[i]);
|
|
|
|
f0 = _mm_aesenc_si128(f0, f);
|
|
|
|
f1 = _mm_aesenc_si128(f1, f);
|
|
|
|
f2 = _mm_aesenc_si128(f2, f);
|
|
|
|
f3 = _mm_aesenc_si128(f3, f);
|
2019-09-17 13:02:01 +01:00
|
|
|
}
|
|
|
|
|
2020-10-27 13:48:42 +00:00
|
|
|
f = _mm_load_si128(&rkeys[14]);
|
|
|
|
f0 = _mm_aesenclast_si128(f0, f);
|
|
|
|
f1 = _mm_aesenclast_si128(f1, f);
|
|
|
|
f2 = _mm_aesenclast_si128(f2, f);
|
|
|
|
f3 = _mm_aesenclast_si128(f3, f);
|
2019-09-17 13:02:01 +01:00
|
|
|
|
|
|
|
/* Write results */
|
2020-07-31 07:17:42 +01:00
|
|
|
_mm_storeu_si128((__m128i *)(out + 0), f0);
|
|
|
|
_mm_storeu_si128((__m128i *)(out + 16), f1);
|
|
|
|
_mm_storeu_si128((__m128i *)(out + 32), f2);
|
|
|
|
_mm_storeu_si128((__m128i *)(out + 48), f3);
|
2019-09-17 13:02:01 +01:00
|
|
|
}
|
|
|
|
|
2020-07-31 07:17:42 +01:00
|
|
|
void PQCLEAN_KYBER102490S_AVX2_aes256ctr_init(aes256ctr_ctx *state, const uint8_t key[32], uint64_t nonce) {
|
|
|
|
__m128i key0, key1, temp0, temp1, temp2, temp4;
|
|
|
|
int idx = 0;
|
2019-09-17 13:02:01 +01:00
|
|
|
|
2020-07-31 07:17:42 +01:00
|
|
|
key0 = _mm_loadu_si128((__m128i *)(key + 0));
|
|
|
|
key1 = _mm_loadu_si128((__m128i *)(key + 16));
|
|
|
|
state->n = _mm_loadl_epi64((__m128i *)&nonce);
|
2019-09-17 13:02:01 +01:00
|
|
|
|
|
|
|
state->rkeys[idx++] = key0;
|
|
|
|
temp0 = key0;
|
|
|
|
temp2 = key1;
|
|
|
|
temp4 = _mm_setzero_si128();
|
|
|
|
|
|
|
|
#define BLOCK1(IMM) \
|
|
|
|
temp1 = _mm_aeskeygenassist_si128(temp2, IMM); \
|
|
|
|
state->rkeys[idx++] = temp2; \
|
|
|
|
temp4 = (__m128i)_mm_shuffle_ps((__m128)temp4, (__m128)temp0, 0x10); \
|
|
|
|
temp0 = _mm_xor_si128(temp0, temp4); \
|
|
|
|
temp4 = (__m128i)_mm_shuffle_ps((__m128)temp4, (__m128)temp0, 0x8c); \
|
|
|
|
temp0 = _mm_xor_si128(temp0, temp4); \
|
|
|
|
temp1 = (__m128i)_mm_shuffle_ps((__m128)temp1, (__m128)temp1, 0xff); \
|
|
|
|
temp0 = _mm_xor_si128(temp0, temp1)
|
|
|
|
|
|
|
|
#define BLOCK2(IMM) \
|
|
|
|
temp1 = _mm_aeskeygenassist_si128(temp0, IMM); \
|
|
|
|
state->rkeys[idx++] = temp0; \
|
|
|
|
temp4 = (__m128i)_mm_shuffle_ps((__m128)temp4, (__m128)temp2, 0x10); \
|
|
|
|
temp2 = _mm_xor_si128(temp2, temp4); \
|
|
|
|
temp4 = (__m128i)_mm_shuffle_ps((__m128)temp4, (__m128)temp2, 0x8c); \
|
|
|
|
temp2 = _mm_xor_si128(temp2, temp4); \
|
|
|
|
temp1 = (__m128i)_mm_shuffle_ps((__m128)temp1, (__m128)temp1, 0xaa); \
|
|
|
|
temp2 = _mm_xor_si128(temp2, temp1)
|
|
|
|
|
|
|
|
BLOCK1(0x01);
|
|
|
|
BLOCK2(0x01);
|
|
|
|
|
|
|
|
BLOCK1(0x02);
|
|
|
|
BLOCK2(0x02);
|
|
|
|
|
|
|
|
BLOCK1(0x04);
|
|
|
|
BLOCK2(0x04);
|
|
|
|
|
|
|
|
BLOCK1(0x08);
|
|
|
|
BLOCK2(0x08);
|
|
|
|
|
|
|
|
BLOCK1(0x10);
|
|
|
|
BLOCK2(0x10);
|
|
|
|
|
|
|
|
BLOCK1(0x20);
|
|
|
|
BLOCK2(0x20);
|
|
|
|
|
|
|
|
BLOCK1(0x40);
|
|
|
|
state->rkeys[idx++] = temp0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PQCLEAN_KYBER102490S_AVX2_aes256ctr_squeezeblocks(uint8_t *out,
|
|
|
|
size_t nblocks,
|
|
|
|
aes256ctr_ctx *state) {
|
2020-10-27 00:05:07 +00:00
|
|
|
size_t i;
|
2019-09-17 13:02:01 +01:00
|
|
|
for (i = 0; i < nblocks; i++) {
|
2020-07-31 07:17:42 +01:00
|
|
|
aesni_encrypt4(out, &state->n, state->rkeys);
|
|
|
|
out += 64;
|
2019-09-17 13:02:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PQCLEAN_KYBER102490S_AVX2_aes256ctr_prf(uint8_t *out,
|
|
|
|
size_t outlen,
|
2020-10-27 00:05:07 +00:00
|
|
|
const uint8_t key[32],
|
2020-07-31 07:17:42 +01:00
|
|
|
uint64_t nonce) {
|
2020-10-27 00:05:07 +00:00
|
|
|
unsigned int i;
|
2020-07-31 07:17:42 +01:00
|
|
|
uint8_t buf[64];
|
2019-09-17 13:02:01 +01:00
|
|
|
aes256ctr_ctx state;
|
|
|
|
|
2020-10-27 00:05:07 +00:00
|
|
|
PQCLEAN_KYBER102490S_AVX2_aes256ctr_init(&state, key, nonce);
|
2019-09-17 13:02:01 +01:00
|
|
|
|
2020-07-31 07:17:42 +01:00
|
|
|
while (outlen >= 64) {
|
|
|
|
aesni_encrypt4(out, &state.n, state.rkeys);
|
|
|
|
outlen -= 64;
|
2020-10-27 13:48:42 +00:00
|
|
|
out += 64;
|
2019-09-17 13:02:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (outlen) {
|
2020-07-31 07:17:42 +01:00
|
|
|
aesni_encrypt4(buf, &state.n, state.rkeys);
|
2019-09-17 13:02:01 +01:00
|
|
|
for (i = 0; i < outlen; i++) {
|
|
|
|
out[i] = buf[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|