104306f587
STRICT_ALIGNMENT is a remnant of OpenSSL code would cast pointers to size_t* and load more than one byte at a time. Not all architectures support unaligned access, so it did an alignment check and only enterred this path if aligned or the underlying architecture didn't care. This is UB. Unaligned casts in C are undefined on all architectures, so we switch these to memcpy some time ago. Compilers can optimize memcpy to the unaligned accesses we wanted. That left our modes logic as: - If STRICT_ALIGNMENT is 1 and things are unaligned, work byte-by-byte. - Otherwise, use the memcpy-based word-by-word code, which now works independent of STRICT_ALIGNMENT. Remove the first check to simplify things. On x86, x86_64, and aarch64, STRICT_ALIGNMENT is zero and this is a no-op. ARM is more complex. Per [0], ARMv7 and up support unaligned access. ARMv5 do not. ARMv6 does, but can run in a mode where it looks more like ARMv5. For ARMv7 and up, STRICT_ALIGNMENT should have been zero, but was one. Thus this change should be an improvement for ARMv7 (right now unaligned inputs lose bsaes-armv7). The Android NDK does not even support the pre-ARMv7 ABI anymore[1]. Nonetheless, Cronet still supports ARMv6 as a library. It builds with -march=armv6 which GCC interprets as supporting unaligned access, so it too did not want this code. For completeness, should anyone still care about ARMv5 or be building with an overly permissive -march flag, GCC does appear unable to inline the memcpy calls. However, GCC also does not interpret (uintptr_t)ptr % sizeof(size_t) as an alignment assertion, so such consumers have already been paying for the memcpy here and throughout the library. In general, C's arcane pointer rules mean we must resort to memcpy often, so, realistically, we must require that the compiler optimize memcpy well. [0] https://medium.com/@iLevex/the-curious-case-of-unaligned-access-on-arm-5dd0ebe24965 [1] https://developer.android.com/ndk/guides/abis#armeabi Change-Id: I3c7dea562adaeb663032e395499e69530dd8e145 Reviewed-on: https://boringssl-review.googlesource.com/c/34873 Reviewed-by: Adam Langley <agl@google.com>
507 lines
21 KiB
C
507 lines
21 KiB
C
/* ====================================================================
|
|
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
*
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in
|
|
* the documentation and/or other materials provided with the
|
|
* distribution.
|
|
*
|
|
* 3. All advertising materials mentioning features or use of this
|
|
* software must display the following acknowledgment:
|
|
* "This product includes software developed by the OpenSSL Project
|
|
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
|
|
*
|
|
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
|
|
* endorse or promote products derived from this software without
|
|
* prior written permission. For written permission, please contact
|
|
* openssl-core@openssl.org.
|
|
*
|
|
* 5. Products derived from this software may not be called "OpenSSL"
|
|
* nor may "OpenSSL" appear in their names without prior written
|
|
* permission of the OpenSSL Project.
|
|
*
|
|
* 6. Redistributions of any form whatsoever must retain the following
|
|
* acknowledgment:
|
|
* "This product includes software developed by the OpenSSL Project
|
|
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
|
|
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
|
|
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
* ==================================================================== */
|
|
|
|
#ifndef OPENSSL_HEADER_MODES_INTERNAL_H
|
|
#define OPENSSL_HEADER_MODES_INTERNAL_H
|
|
|
|
#include <openssl/base.h>
|
|
|
|
#include <openssl/aes.h>
|
|
#include <openssl/cpu.h>
|
|
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include "../../internal.h"
|
|
|
|
#if defined(__cplusplus)
|
|
extern "C" {
|
|
#endif
|
|
|
|
|
|
static inline uint32_t GETU32(const void *in) {
|
|
uint32_t v;
|
|
OPENSSL_memcpy(&v, in, sizeof(v));
|
|
return CRYPTO_bswap4(v);
|
|
}
|
|
|
|
static inline void PUTU32(void *out, uint32_t v) {
|
|
v = CRYPTO_bswap4(v);
|
|
OPENSSL_memcpy(out, &v, sizeof(v));
|
|
}
|
|
|
|
static inline size_t load_word_le(const void *in) {
|
|
size_t v;
|
|
OPENSSL_memcpy(&v, in, sizeof(v));
|
|
return v;
|
|
}
|
|
|
|
static inline void store_word_le(void *out, size_t v) {
|
|
OPENSSL_memcpy(out, &v, sizeof(v));
|
|
}
|
|
|
|
// block128_f is the type of an AES block cipher implementation.
|
|
//
|
|
// Unlike upstream OpenSSL, it and the other functions in this file hard-code
|
|
// |AES_KEY|. It is undefined in C to call a function pointer with anything
|
|
// other than the original type. Thus we either must match |block128_f| to the
|
|
// type signature of |AES_encrypt| and friends or pass in |void*| wrapper
|
|
// functions.
|
|
//
|
|
// These functions are called exclusively with AES, so we use the former.
|
|
typedef void (*block128_f)(const uint8_t in[16], uint8_t out[16],
|
|
const AES_KEY *key);
|
|
|
|
|
|
// CTR.
|
|
|
|
// ctr128_f is the type of a function that performs CTR-mode encryption.
|
|
typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks,
|
|
const AES_KEY *key, const uint8_t ivec[16]);
|
|
|
|
// CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode)
|
|
// |len| bytes from |in| to |out| using |block| in counter mode. There's no
|
|
// requirement that |len| be a multiple of any value and any partial blocks are
|
|
// stored in |ecount_buf| and |*num|, which must be zeroed before the initial
|
|
// call. The counter is a 128-bit, big-endian value in |ivec| and is
|
|
// incremented by this function.
|
|
void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
uint8_t ecount_buf[16], unsigned *num,
|
|
block128_f block);
|
|
|
|
// CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes
|
|
// |ctr|, a function that performs CTR mode but only deals with the lower 32
|
|
// bits of the counter. This is useful when |ctr| can be an optimised
|
|
// function.
|
|
void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
uint8_t ecount_buf[16], unsigned *num,
|
|
ctr128_f ctr);
|
|
|
|
|
|
// GCM.
|
|
//
|
|
// This API differs from the upstream API slightly. The |GCM128_CONTEXT| does
|
|
// not have a |key| pointer that points to the key as upstream's version does.
|
|
// Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT|
|
|
// can be safely copied. Additionally, |gcm_key| is split into a separate
|
|
// struct.
|
|
|
|
typedef struct { uint64_t hi,lo; } u128;
|
|
|
|
// gmult_func multiplies |Xi| by the GCM key and writes the result back to
|
|
// |Xi|.
|
|
typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]);
|
|
|
|
// ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from
|
|
// |inp|. The result is written back to |Xi| and the |len| argument must be a
|
|
// multiple of 16.
|
|
typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16],
|
|
const uint8_t *inp, size_t len);
|
|
|
|
typedef struct gcm128_key_st {
|
|
// Note the MOVBE-based, x86-64, GHASH assembly requires |H| and |Htable| to
|
|
// be the first two elements of this struct. Additionally, some assembly
|
|
// routines require a 16-byte-aligned |Htable| when hashing data, but not
|
|
// initialization. |GCM128_KEY| is not itself aligned to simplify embedding in
|
|
// |EVP_AEAD_CTX|, but |Htable|'s offset must be a multiple of 16.
|
|
u128 H;
|
|
u128 Htable[16];
|
|
gmult_func gmult;
|
|
ghash_func ghash;
|
|
|
|
block128_f block;
|
|
|
|
// use_aesni_gcm_crypt is true if this context should use the assembly
|
|
// functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data.
|
|
unsigned use_aesni_gcm_crypt:1;
|
|
} GCM128_KEY;
|
|
|
|
// GCM128_CONTEXT contains state for a single GCM operation. The structure
|
|
// should be zero-initialized before use.
|
|
typedef struct {
|
|
// The following 5 names follow names in GCM specification
|
|
union {
|
|
uint64_t u[2];
|
|
uint32_t d[4];
|
|
uint8_t c[16];
|
|
size_t t[16 / sizeof(size_t)];
|
|
} Yi, EKi, EK0, len, Xi;
|
|
|
|
// Note that the order of |Xi| and |gcm_key| is fixed by the MOVBE-based,
|
|
// x86-64, GHASH assembly. Additionally, some assembly routines require
|
|
// |gcm_key| to be 16-byte aligned. |GCM128_KEY| is not itself aligned to
|
|
// simplify embedding in |EVP_AEAD_CTX|.
|
|
alignas(16) GCM128_KEY gcm_key;
|
|
|
|
unsigned mres, ares;
|
|
} GCM128_CONTEXT;
|
|
|
|
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
|
|
// crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is
|
|
// used.
|
|
int crypto_gcm_clmul_enabled(void);
|
|
#endif
|
|
|
|
// CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to
|
|
// |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware
|
|
// accelerated) functions for performing operations in the GHASH field. If the
|
|
// AVX implementation was used |*out_is_avx| will be true.
|
|
void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
|
|
u128 *out_key, u128 out_table[16], int *out_is_avx,
|
|
const uint8_t gcm_key[16]);
|
|
|
|
// CRYPTO_gcm128_init_key initialises |gcm_key| to use |block| (typically AES)
|
|
// with the given key. |block_is_hwaes| is one if |block| is |aes_hw_encrypt|.
|
|
OPENSSL_EXPORT void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key,
|
|
const AES_KEY *key, block128_f block,
|
|
int block_is_hwaes);
|
|
|
|
// CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the
|
|
// same key that was passed to |CRYPTO_gcm128_init|.
|
|
OPENSSL_EXPORT void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const AES_KEY *key,
|
|
const uint8_t *iv, size_t iv_len);
|
|
|
|
// CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM.
|
|
// This must be called before and data is encrypted. It returns one on success
|
|
// and zero otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad,
|
|
size_t len);
|
|
|
|
// CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key|
|
|
// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
|
|
// on success and zero otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
|
|
const AES_KEY *key, const uint8_t *in,
|
|
uint8_t *out, size_t len);
|
|
|
|
// CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key|
|
|
// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
|
|
// on success and zero otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
|
|
const AES_KEY *key, const uint8_t *in,
|
|
uint8_t *out, size_t len);
|
|
|
|
// CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using
|
|
// a CTR function that only handles the bottom 32 bits of the nonce, like
|
|
// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
|
|
// passed to |CRYPTO_gcm128_init|. It returns one on success and zero
|
|
// otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
|
|
const AES_KEY *key,
|
|
const uint8_t *in, uint8_t *out,
|
|
size_t len, ctr128_f stream);
|
|
|
|
// CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using
|
|
// a CTR function that only handles the bottom 32 bits of the nonce, like
|
|
// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
|
|
// passed to |CRYPTO_gcm128_init|. It returns one on success and zero
|
|
// otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
|
|
const AES_KEY *key,
|
|
const uint8_t *in, uint8_t *out,
|
|
size_t len, ctr128_f stream);
|
|
|
|
// CRYPTO_gcm128_finish calculates the authenticator and compares it against
|
|
// |len| bytes of |tag|. It returns one on success and zero otherwise.
|
|
OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag,
|
|
size_t len);
|
|
|
|
// CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|.
|
|
// The minimum of |len| and 16 bytes are copied into |tag|.
|
|
OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag,
|
|
size_t len);
|
|
|
|
|
|
// GCM assembly.
|
|
|
|
#if !defined(OPENSSL_NO_ASM) && \
|
|
(defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
|
|
defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \
|
|
defined(OPENSSL_PPC64LE))
|
|
#define GHASH_ASM
|
|
#endif
|
|
|
|
void gcm_init_4bit(u128 Htable[16], const uint64_t H[2]);
|
|
void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
|
|
#if defined(GHASH_ASM)
|
|
|
|
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
|
|
#define GCM_FUNCREF_4BIT
|
|
void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
|
|
#if defined(OPENSSL_X86_64)
|
|
#define GHASH_ASM_X86_64
|
|
void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
|
|
size_t len);
|
|
|
|
OPENSSL_INLINE char gcm_ssse3_capable(void) {
|
|
return (OPENSSL_ia32cap_get()[1] & (1 << (41 - 32))) != 0;
|
|
}
|
|
|
|
// |gcm_gmult_ssse3| and |gcm_ghash_ssse3| require |Htable| to be
|
|
// 16-byte-aligned, but |gcm_init_ssse3| does not.
|
|
void gcm_init_ssse3(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_ssse3(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_ssse3(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
|
|
size_t len);
|
|
|
|
#define AESNI_GCM
|
|
size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi);
|
|
size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi);
|
|
#endif // OPENSSL_X86_64
|
|
|
|
#if defined(OPENSSL_X86)
|
|
#define GHASH_ASM_X86
|
|
void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
#endif // OPENSSL_X86
|
|
|
|
#elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
|
|
#include <openssl/arm_arch.h>
|
|
#if __ARM_ARCH__ >= 7
|
|
#define GHASH_ASM_ARM
|
|
#define GCM_FUNCREF_4BIT
|
|
|
|
OPENSSL_INLINE int gcm_pmull_capable(void) {
|
|
return CRYPTO_is_ARMv8_PMULL_capable();
|
|
}
|
|
|
|
void gcm_init_v8(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_v8(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
|
|
#if defined(OPENSSL_ARM)
|
|
// 32-bit ARM also has support for doing GCM with NEON instructions.
|
|
OPENSSL_INLINE int gcm_neon_capable(void) { return CRYPTO_is_NEON_capable(); }
|
|
|
|
void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
#else
|
|
// AArch64 only has the ARMv8 versions of functions.
|
|
OPENSSL_INLINE int gcm_neon_capable(void) { return 0; }
|
|
OPENSSL_INLINE void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
|
|
abort();
|
|
}
|
|
OPENSSL_INLINE void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
|
|
abort();
|
|
}
|
|
OPENSSL_INLINE void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16],
|
|
const uint8_t *inp, size_t len) {
|
|
abort();
|
|
}
|
|
#endif // OPENSSL_ARM
|
|
|
|
#endif // __ARM_ARCH__ >= 7
|
|
#elif defined(OPENSSL_PPC64LE)
|
|
#define GHASH_ASM_PPC64LE
|
|
#define GCM_FUNCREF_4BIT
|
|
void gcm_init_p8(u128 Htable[16], const uint64_t Xi[2]);
|
|
void gcm_gmult_p8(uint64_t Xi[2], const u128 Htable[16]);
|
|
void gcm_ghash_p8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
|
|
size_t len);
|
|
#endif
|
|
#endif // GHASH_ASM
|
|
|
|
|
|
// CCM.
|
|
|
|
typedef struct ccm128_context {
|
|
block128_f block;
|
|
ctr128_f ctr;
|
|
unsigned M, L;
|
|
} CCM128_CONTEXT;
|
|
|
|
// CRYPTO_ccm128_init initialises |ctx| to use |block| (typically AES) with the
|
|
// specified |M| and |L| parameters. It returns one on success and zero if |M|
|
|
// or |L| is invalid.
|
|
int CRYPTO_ccm128_init(CCM128_CONTEXT *ctx, const AES_KEY *key,
|
|
block128_f block, ctr128_f ctr, unsigned M, unsigned L);
|
|
|
|
// CRYPTO_ccm128_max_input returns the maximum input length accepted by |ctx|.
|
|
size_t CRYPTO_ccm128_max_input(const CCM128_CONTEXT *ctx);
|
|
|
|
// CRYPTO_ccm128_encrypt encrypts |len| bytes from |in| to |out| writing the tag
|
|
// to |out_tag|. |key| must be the same key that was passed to
|
|
// |CRYPTO_ccm128_init|. It returns one on success and zero otherwise.
|
|
int CRYPTO_ccm128_encrypt(const CCM128_CONTEXT *ctx, const AES_KEY *key,
|
|
uint8_t *out, uint8_t *out_tag, size_t tag_len,
|
|
const uint8_t *nonce, size_t nonce_len,
|
|
const uint8_t *in, size_t len, const uint8_t *aad,
|
|
size_t aad_len);
|
|
|
|
// CRYPTO_ccm128_decrypt decrypts |len| bytes from |in| to |out|, writing the
|
|
// expected tag to |out_tag|. |key| must be the same key that was passed to
|
|
// |CRYPTO_ccm128_init|. It returns one on success and zero otherwise.
|
|
int CRYPTO_ccm128_decrypt(const CCM128_CONTEXT *ctx, const AES_KEY *key,
|
|
uint8_t *out, uint8_t *out_tag, size_t tag_len,
|
|
const uint8_t *nonce, size_t nonce_len,
|
|
const uint8_t *in, size_t len, const uint8_t *aad,
|
|
size_t aad_len);
|
|
|
|
|
|
// CBC.
|
|
|
|
// cbc128_f is the type of a function that performs CBC-mode encryption.
|
|
typedef void (*cbc128_f)(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16], int enc);
|
|
|
|
// CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the
|
|
// given IV and block cipher in CBC mode. The input need not be a multiple of
|
|
// 128 bits long, but the output will round up to the nearest 128 bit multiple,
|
|
// zero padding the input if needed. The IV will be updated on return.
|
|
void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
block128_f block);
|
|
|
|
// CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the
|
|
// given IV and block cipher in CBC mode. If |len| is not a multiple of 128
|
|
// bits then only that many bytes will be written, but a multiple of 128 bits
|
|
// is always read from |in|. The IV will be updated on return.
|
|
void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
block128_f block);
|
|
|
|
|
|
// OFB.
|
|
|
|
// CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode)
|
|
// |len| bytes from |in| to |out| using |block| in OFB mode. There's no
|
|
// requirement that |len| be a multiple of any value and any partial blocks are
|
|
// stored in |ivec| and |*num|, the latter must be zero before the initial
|
|
// call.
|
|
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16], unsigned *num,
|
|
block128_f block);
|
|
|
|
|
|
// CFB.
|
|
|
|
// CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
|
|
// from |in| to |out| using |block| in CFB mode. There's no requirement that
|
|
// |len| be a multiple of any value and any partial blocks are stored in |ivec|
|
|
// and |*num|, the latter must be zero before the initial call.
|
|
void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16], unsigned *num,
|
|
int enc, block128_f block);
|
|
|
|
// CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
|
|
// from |in| to |out| using |block| in CFB-8 mode. Prior to the first call
|
|
// |num| should be set to zero.
|
|
void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
unsigned *num, int enc, block128_f block);
|
|
|
|
// CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
|
|
// from |in| to |out| using |block| in CFB-1 mode. Prior to the first call
|
|
// |num| should be set to zero.
|
|
void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
unsigned *num, int enc, block128_f block);
|
|
|
|
size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len,
|
|
const AES_KEY *key, uint8_t ivec[16],
|
|
block128_f block);
|
|
|
|
|
|
// POLYVAL.
|
|
//
|
|
// POLYVAL is a polynomial authenticator that operates over a field very
|
|
// similar to the one that GHASH uses. See
|
|
// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3.
|
|
|
|
typedef union {
|
|
uint64_t u[2];
|
|
uint8_t c[16];
|
|
} polyval_block;
|
|
|
|
struct polyval_ctx {
|
|
// Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based,
|
|
// x86-64, GHASH assembly. Additionally, some assembly routines require
|
|
// |Htable| to be 16-byte aligned.
|
|
polyval_block S;
|
|
u128 H;
|
|
alignas(16) u128 Htable[16];
|
|
gmult_func gmult;
|
|
ghash_func ghash;
|
|
};
|
|
|
|
// CRYPTO_POLYVAL_init initialises |ctx| using |key|.
|
|
void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]);
|
|
|
|
// CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the
|
|
// blocks from |in|. Only a whole number of blocks can be processed so |in_len|
|
|
// must be a multiple of 16.
|
|
void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in,
|
|
size_t in_len);
|
|
|
|
// CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|.
|
|
void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]);
|
|
|
|
|
|
#if defined(__cplusplus)
|
|
} // extern C
|
|
#endif
|
|
|
|
#endif // OPENSSL_HEADER_MODES_INTERNAL_H
|