Fix undefined pointer casts in SHA-512 code.

Casting an unaligned pointer to uint64_t* is undefined, even on
platforms that support unaligned access. Additionally, dereferencing as
uint64_t violates strict aliasing rules. Instead, use memcpys which we
assume any sensible compiler can optimize. Also simplify the PULL64
business with the existing CRYPTO_bswap8.

This also removes the need for the
SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA logic. The generic C code now
handles unaligned data and the assembly already can as well. (The only
problematic platform with assembly is old ARM, but sha512-armv4.pl
already handles this via an __ARM_ARCH__ check.  See also OpenSSL's
version of this file which always defines
SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA if SHA512_ASM is defined.)

Add unaligned tests to digest_test.cc, so we retain coverage of
unaligned EVP_MD inputs.

Change-Id: Idfd8586c64bab2a77292af2fa8eebbd193e57c7d
Reviewed-on: https://boringssl-review.googlesource.com/c/34444
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: Adam Langley <agl@google.com>
This commit is contained in:
David Benjamin 2019-01-12 18:43:18 +00:00 committed by CQ bot account: commit-bot@chromium.org
parent 72f015562c
commit 2fe0360a4e
4 changed files with 65 additions and 119 deletions

View File

@ -17,6 +17,7 @@
#include <string.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
@ -183,6 +184,20 @@ static void TestDigest(const TestVector *test) {
EXPECT_EQ(EVP_MD_size(test->md.func()), digest_len);
CompareDigest(test, digest.get(), digest_len);
// Test with unaligned input.
ASSERT_TRUE(EVP_DigestInit_ex(ctx.get(), test->md.func(), NULL));
std::vector<char> unaligned(strlen(test->input) + 1);
char *ptr = unaligned.data();
if ((reinterpret_cast<uintptr_t>(ptr) & 1) == 0) {
ptr++;
}
OPENSSL_memcpy(ptr, test->input, strlen(test->input));
for (size_t i = 0; i < test->repeat; i++) {
ASSERT_TRUE(EVP_DigestUpdate(ctx.get(), ptr, strlen(test->input)));
}
ASSERT_TRUE(EVP_DigestFinal_ex(ctx.get(), digest.get(), &digest_len));
CompareDigest(test, digest.get(), digest_len);
// Test the one-shot function.
if (test->md.one_shot_func && test->repeat == 1) {
uint8_t *out = test->md.one_shot_func((const uint8_t *)test->input,

View File

@ -27,15 +27,18 @@ extern "C" {
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) || \
defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE)
#define SHA1_ASM
void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
void sha1_block_data_order(uint32_t *state, const uint8_t *in,
size_t num_blocks);
#endif
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) || \
defined(OPENSSL_AARCH64)
#define SHA256_ASM
#define SHA512_ASM
void sha256_block_data_order(uint32_t *state, const uint8_t *in, size_t num);
void sha512_block_data_order(uint64_t *state, const uint64_t *W, size_t num);
void sha256_block_data_order(uint32_t *state, const uint8_t *in,
size_t num_blocks);
void sha512_block_data_order(uint64_t *state, const uint8_t *in,
size_t num_blocks);
#endif
#endif // OPENSSL_NO_ASM

View File

@ -64,22 +64,11 @@
#include "../../internal.h"
// IMPLEMENTATION NOTES.
//
// The 32-bit hash algorithms share a common byte-order neutral collector and
// padding function implementations that operate on unaligned data,
// ../md32_common.h. This SHA-512 implementation does not. Reasons
// [in reverse order] are:
//
// - It's the only 64-bit hash algorithm for the moment of this writing,
// there is no need for common collector/padding implementation [yet];
// - By supporting only a transform function that operates on *aligned* data
// the collector/padding function is simpler and easier to optimize.
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
defined(__ARM_FEATURE_UNALIGNED)
#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
#endif
// ../digest/md32_common.h. SHA-512 is the only 64-bit hash algorithm, as of
// this writing, so there is no need for a common collector/padding
// implementation yet.
int SHA384_Init(SHA512_CTX *sha) {
sha->h[0] = UINT64_C(0xcbbb9d5dc1059ed8);
@ -135,8 +124,8 @@ uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out) {
}
#if !defined(SHA512_ASM)
static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
size_t num);
static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
size_t num_blocks);
#endif
@ -149,19 +138,13 @@ int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) {
}
void SHA512_Transform(SHA512_CTX *c, const uint8_t *block) {
#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
if ((size_t)block % sizeof(c->u.d[0]) != 0) {
OPENSSL_memcpy(c->u.p, block, sizeof(c->u.p));
block = c->u.p;
}
#endif
sha512_block_data_order(c->h, (uint64_t *)block, 1);
sha512_block_data_order(c->h, block, 1);
}
int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
uint64_t l;
uint8_t *p = c->u.p;
const uint8_t *data = (const uint8_t *)in_data;
const uint8_t *data = in_data;
if (len == 0) {
return 1;
@ -187,28 +170,16 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
OPENSSL_memcpy(p + c->num, data, n), c->num = 0;
len -= n;
data += n;
sha512_block_data_order(c->h, (uint64_t *)p, 1);
sha512_block_data_order(c->h, p, 1);
}
}
if (len >= sizeof(c->u)) {
#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
if ((size_t)data % sizeof(c->u.d[0]) != 0) {
while (len >= sizeof(c->u)) {
OPENSSL_memcpy(p, data, sizeof(c->u));
sha512_block_data_order(c->h, (uint64_t *)p, 1);
len -= sizeof(c->u);
data += sizeof(c->u);
}
} else
#endif
{
sha512_block_data_order(c->h, (uint64_t *)data, len / sizeof(c->u));
sha512_block_data_order(c->h, data, len / sizeof(c->u));
data += len;
len %= sizeof(c->u);
data -= len;
}
}
if (len != 0) {
OPENSSL_memcpy(p, data, len);
@ -219,7 +190,7 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
}
int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
uint8_t *p = (uint8_t *)sha->u.p;
uint8_t *p = sha->u.p;
size_t n = sha->num;
p[n] = 0x80; // There always is a room for one
@ -227,7 +198,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
if (n > (sizeof(sha->u) - 16)) {
OPENSSL_memset(p + n, 0, sizeof(sha->u) - n);
n = 0;
sha512_block_data_order(sha->h, (uint64_t *)p, 1);
sha512_block_data_order(sha->h, p, 1);
}
OPENSSL_memset(p + n, 0, sizeof(sha->u) - 16 - n);
@ -248,7 +219,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
p[sizeof(sha->u) - 15] = (uint8_t)(sha->Nh >> 48);
p[sizeof(sha->u) - 16] = (uint8_t)(sha->Nh >> 56);
sha512_block_data_order(sha->h, (uint64_t *)p, 1);
sha512_block_data_order(sha->h, p, 1);
if (md == NULL) {
// TODO(davidben): This NULL check is absent in other low-level hash 'final'
@ -348,20 +319,6 @@ static const uint64_t K512[80] = {
__asm__("rorq %1, %0" : "=r"(ret) : "J"(n), "0"(a) : "cc"); \
ret; \
})
#define PULL64(x) \
({ \
uint64_t ret = *((const uint64_t *)(&(x))); \
__asm__("bswapq %0" : "=r"(ret) : "0"(ret)); \
ret; \
})
#elif(defined(__i386) || defined(__i386__))
#define PULL64(x) \
({ \
const unsigned int *p = (const unsigned int *)(&(x)); \
unsigned int hi = p[0], lo = p[1]; \
__asm__("bswapl %0; bswapl %1;" : "=r"(lo), "=r"(hi) : "0"(lo), "1"(hi)); \
((uint64_t)hi) << 32 | lo; \
})
#elif(defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
#define ROTR(a, n) \
({ \
@ -376,47 +333,22 @@ static const uint64_t K512[80] = {
__asm__("ror %0, %1, %2" : "=r"(ret) : "r"(a), "I"(n)); \
ret; \
})
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define PULL64(x) \
({ \
uint64_t ret; \
__asm__("rev %0, %1" : "=r"(ret) : "r"(*((const uint64_t *)(&(x))))); \
ret; \
})
#endif
#endif
#elif defined(_MSC_VER)
#if defined(_WIN64) // applies to both IA-64 and AMD64
#elif defined(_MSC_VER) && defined(_WIN64)
#pragma intrinsic(_rotr64)
#define ROTR(a, n) _rotr64((a), n)
#endif
#if defined(_M_IX86) && !defined(OPENSSL_NO_ASM)
static uint64_t __fastcall __pull64be(const void *x) {
_asm mov edx, [ecx + 0]
_asm mov eax, [ecx + 4]
_asm bswap edx
_asm bswap eax
}
#define PULL64(x) __pull64be(&(x))
#if _MSC_VER <= 1200
#pragma inline_depth(0)
#endif
#endif
#endif
#ifndef PULL64
#define B(x, j) \
(((uint64_t)(*(((const uint8_t *)(&x)) + j))) << ((7 - j) * 8))
#define PULL64(x) \
(B(x, 0) | B(x, 1) | B(x, 2) | B(x, 3) | B(x, 4) | B(x, 5) | B(x, 6) | \
B(x, 7))
#endif
#ifndef ROTR
#define ROTR(x, s) (((x) >> s) | (x) << (64 - s))
#endif
static inline uint64_t load_u64_be(const void *ptr) {
uint64_t ret;
OPENSSL_memcpy(&ret, ptr, sizeof(ret));
return CRYPTO_bswap8(ret);
}
#define Sigma0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
#define Sigma1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
#define sigma0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
@ -429,7 +361,7 @@ static uint64_t __fastcall __pull64be(const void *x) {
#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
// This code should give better results on 32-bit CPU with less than
// ~24 registers, both size and performance wise...
static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
size_t num) {
uint64_t A, E, T;
uint64_t X[9 + 80], *F;
@ -447,7 +379,7 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
F[7] = state[7];
for (i = 0; i < 16; i++, F--) {
T = PULL64(W[i]);
T = load_u64_be(in + i * 8);
F[0] = A;
F[4] = E;
F[8] = T;
@ -478,7 +410,7 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
state[6] += F[6];
state[7] += F[7];
W += 16;
in += 16 * 8;
}
}
@ -502,7 +434,7 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
ROUND_00_15(i + j, a, b, c, d, e, f, g, h); \
} while (0)
static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
size_t num) {
uint64_t a, b, c, d, e, f, g, h, s0, s1, T1;
uint64_t X[16];
@ -519,37 +451,37 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
g = state[6];
h = state[7];
T1 = X[0] = PULL64(W[0]);
T1 = X[0] = load_u64_be(in);
ROUND_00_15(0, a, b, c, d, e, f, g, h);
T1 = X[1] = PULL64(W[1]);
T1 = X[1] = load_u64_be(in + 8);
ROUND_00_15(1, h, a, b, c, d, e, f, g);
T1 = X[2] = PULL64(W[2]);
T1 = X[2] = load_u64_be(in + 2 * 8);
ROUND_00_15(2, g, h, a, b, c, d, e, f);
T1 = X[3] = PULL64(W[3]);
T1 = X[3] = load_u64_be(in + 3 * 8);
ROUND_00_15(3, f, g, h, a, b, c, d, e);
T1 = X[4] = PULL64(W[4]);
T1 = X[4] = load_u64_be(in + 4 * 8);
ROUND_00_15(4, e, f, g, h, a, b, c, d);
T1 = X[5] = PULL64(W[5]);
T1 = X[5] = load_u64_be(in + 5 * 8);
ROUND_00_15(5, d, e, f, g, h, a, b, c);
T1 = X[6] = PULL64(W[6]);
T1 = X[6] = load_u64_be(in + 6 * 8);
ROUND_00_15(6, c, d, e, f, g, h, a, b);
T1 = X[7] = PULL64(W[7]);
T1 = X[7] = load_u64_be(in + 7 * 8);
ROUND_00_15(7, b, c, d, e, f, g, h, a);
T1 = X[8] = PULL64(W[8]);
T1 = X[8] = load_u64_be(in + 8 * 8);
ROUND_00_15(8, a, b, c, d, e, f, g, h);
T1 = X[9] = PULL64(W[9]);
T1 = X[9] = load_u64_be(in + 9 * 8);
ROUND_00_15(9, h, a, b, c, d, e, f, g);
T1 = X[10] = PULL64(W[10]);
T1 = X[10] = load_u64_be(in + 10 * 8);
ROUND_00_15(10, g, h, a, b, c, d, e, f);
T1 = X[11] = PULL64(W[11]);
T1 = X[11] = load_u64_be(in + 11 * 8);
ROUND_00_15(11, f, g, h, a, b, c, d, e);
T1 = X[12] = PULL64(W[12]);
T1 = X[12] = load_u64_be(in + 12 * 8);
ROUND_00_15(12, e, f, g, h, a, b, c, d);
T1 = X[13] = PULL64(W[13]);
T1 = X[13] = load_u64_be(in + 13 * 8);
ROUND_00_15(13, d, e, f, g, h, a, b, c);
T1 = X[14] = PULL64(W[14]);
T1 = X[14] = load_u64_be(in + 14 * 8);
ROUND_00_15(14, c, d, e, f, g, h, a, b);
T1 = X[15] = PULL64(W[15]);
T1 = X[15] = load_u64_be(in + 15 * 8);
ROUND_00_15(15, b, c, d, e, f, g, h, a);
for (i = 16; i < 80; i += 16) {
@ -580,7 +512,7 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
state[6] += g;
state[7] += h;
W += 16;
in += 16 * 8;
}
}
@ -589,8 +521,6 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
#endif // !SHA512_ASM
#undef ROTR
#undef PULL64
#undef B
#undef Sigma0
#undef Sigma1
#undef sigma0
@ -599,5 +529,3 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
#undef Maj
#undef ROUND_00_15
#undef ROUND_16_80
#undef HOST_c2l
#undef HOST_l2c

View File

@ -51,7 +51,7 @@ TEST(SHATest, SHA512ABI) {
SHA512_CTX ctx;
SHA512_Init(&ctx);
static const uint64_t kBuf[SHA512_CBLOCK / sizeof(uint64_t) * 4] = {0};
static const uint8_t kBuf[SHA512_CBLOCK * 4] = {0};
CHECK_ABI(sha512_block_data_order, ctx.h, kBuf, 1);
CHECK_ABI(sha512_block_data_order, ctx.h, kBuf, 2);
CHECK_ABI(sha512_block_data_order, ctx.h, kBuf, 3);