From 78fefbf3bbb3ac6ddf537fc927b15b4c41db7f6c Mon Sep 17 00:00:00 2001 From: David Benjamin Date: Sun, 6 Dec 2015 21:48:45 -0500 Subject: [PATCH] Reformat md32_common.h, part 2. Manual tweaks and then clang-formatted again. Change-Id: I809fdb71b2135343e5c1264dd659b464780fc54a Reviewed-on: https://boringssl-review.googlesource.com/6649 Reviewed-by: Adam Langley --- crypto/digest/md32_common.h | 125 +++++++++++++++++------------------- include/openssl/md4.h | 2 +- include/openssl/md5.h | 2 +- include/openssl/sha.h | 6 +- 4 files changed, 63 insertions(+), 72 deletions(-) diff --git a/crypto/digest/md32_common.h b/crypto/digest/md32_common.h index 79c5b716..b398a5ee 100644 --- a/crypto/digest/md32_common.h +++ b/crypto/digest/md32_common.h @@ -51,11 +51,13 @@ #include +#include #if defined(__cplusplus) extern "C" { #endif + #define asm __asm__ /* This is a generic 32-bit "collector" for message digest algorithms. It @@ -74,9 +76,9 @@ extern "C" { * * typedef struct _state_st { * uint32_t h[ / sizeof(uint32_t)]; - * uint32_t Nl,Nh; + * uint32_t Nl, Nh; * uint32_t data[HASH_CBLOCK / sizeof(uint32_t)]; - * unsigned int num + * unsigned num; * ... * } _CTX; * @@ -134,27 +136,23 @@ extern "C" { #error "HASH_BLOCK_DATA_ORDER must be defined!" #endif -/* - * Engage compiler specific rotate intrinsic function if available. - */ +#ifndef HASH_MAKE_STRING +#error "HASH_MAKE_STRING must be defined!" +#endif + #undef ROTATE #if defined(_MSC_VER) #define ROTATE(a, n) _lrotl(a, n) #elif defined(__ICC) #define ROTATE(a, n) _rotl(a, n) #elif defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM) -/* - * Some GNU C inline assembler templates. Note that these are - * rotates by *constant* number of bits! But that's exactly - * what we need here... - * - */ #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -#define ROTATE(a, n) \ - ({ \ - register uint32_t ret; \ - asm("roll %1,%0" : "=r"(ret) : "I"(n), "0"((uint32_t)(a)) : "cc"); \ - ret; \ +/* Note this macro requires |n| be a constant. */ +#define ROTATE(a, n) \ + ({ \ + register uint32_t ret; \ + asm("roll %1, %0" : "=r"(ret) : "I"(n), "0"((uint32_t)(a)) : "cc"); \ + ret; \ }) #endif /* OPENSSL_X86 || OPENSSL_X86_64 */ #endif /* COMPILER */ @@ -165,15 +163,12 @@ extern "C" { #if defined(DATA_ORDER_IS_BIG_ENDIAN) -#ifndef PEDANTIC -#if defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM) +#if !defined(PEDANTIC) && defined(__GNUC__) && __GNUC__ >= 2 && \ + !defined(OPENSSL_NO_ASM) #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* - * This gives ~30-40% performance improvement in SHA-256 compiled - * with gcc [on P4]. Well, first macro to be frank. We can pull - * this trick on x86* platforms only, because these CPUs can fetch - * unaligned data without raising an exception. - */ +/* The first macro gives a ~30-40% performance improvement in SHA-256 compiled + * with gcc on P4. This can only be done on x86, where unaligned data fetches + * are possible. */ #define HOST_c2l(c, l) \ ({ \ uint32_t r = *((const uint32_t *)(c)); \ @@ -189,33 +184,30 @@ extern "C" { (c) += 4; \ r; \ }) -#elif defined(__aarch64__) -#if defined(__BYTE_ORDER__) +#elif defined(__aarch64__) && defined(__BYTE_ORDER__) #if defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -#define HOST_c2l(c, l) \ - ({ \ - uint32_t r; \ - asm("rev %w0,%w1" : "=r"(r) : "r"(*((const uint32_t *)(c)))); \ - (c) += 4; \ - (l) = r; \ +#define HOST_c2l(c, l) \ + ({ \ + uint32_t r; \ + asm("rev %w0, %w1" : "=r"(r) : "r"(*((const uint32_t *)(c)))); \ + (c) += 4; \ + (l) = r; \ }) -#define HOST_l2c(l, c) \ - ({ \ - uint32_t r; \ - asm("rev %w0,%w1" : "=r"(r) : "r"((uint32_t)(l))); \ - *((uint32_t *)(c)) = r; \ - (c) += 4; \ - r; \ +#define HOST_l2c(l, c) \ + ({ \ + uint32_t r; \ + asm("rev %w0, %w1" : "=r"(r) : "r"((uint32_t)(l))); \ + *((uint32_t *)(c)) = r; \ + (c) += 4; \ + r; \ }) #elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4) #define HOST_l2c(l, c) (*((uint32_t *)(c)) = (l), (c) += 4, (l)) -#endif -#endif -#endif -#endif -#endif +#endif /* __aarch64__ && __BYTE_ORDER__ */ +#endif /* ARCH */ +#endif /* !PEDANTIC && GNUC && !NO_ASM */ #ifndef HOST_c2l #define HOST_c2l(c, l) \ @@ -223,6 +215,7 @@ extern "C" { l |= (((uint32_t)(*((c)++))) << 16), \ l |= (((uint32_t)(*((c)++))) << 8), l |= (((uint32_t)(*((c)++))))) #endif + #ifndef HOST_l2c #define HOST_l2c(l, c) \ (*((c)++) = (uint8_t)(((l) >> 24) & 0xff), \ @@ -237,7 +230,7 @@ extern "C" { /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ #define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4) #define HOST_l2c(l, c) (*((uint32_t *)(c)) = (l), (c) += 4, l) -#endif +#endif /* OPENSSL_X86 || OPENSSL_X86_64 */ #ifndef HOST_c2l #define HOST_c2l(c, l) \ @@ -245,6 +238,7 @@ extern "C" { l |= (((uint32_t)(*((c)++))) << 16), \ l |= (((uint32_t)(*((c)++))) << 24)) #endif + #ifndef HOST_l2c #define HOST_l2c(l, c) \ (*((c)++) = (uint8_t)(((l)) & 0xff), \ @@ -253,28 +247,26 @@ extern "C" { *((c)++) = (uint8_t)(((l) >> 24) & 0xff), l) #endif -#endif +#endif /* DATA_ORDER */ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { const uint8_t *data = data_; - uint8_t *p; - uint32_t l; - size_t n; - if (len == 0) + if (len == 0) { return 1; + } - l = (c->Nl + (((uint32_t)len) << 3)) & 0xffffffffUL; - /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to - * Wei Dai for pointing it out. */ - if (l < c->Nl) /* overflow */ + uint32_t l = c->Nl + (((uint32_t)len) << 3); + if (l < c->Nl) { + /* Handle carries. */ c->Nh++; - c->Nh += (uint32_t)(len >> 29); /* might cause compiler warning on 16-bit */ + } + c->Nh += (uint32_t)(len >> 29); c->Nl = l; - n = c->num; + size_t n = c->num; if (n != 0) { - p = (uint8_t *)c->data; + uint8_t *p = (uint8_t *)c->data; if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) { memcpy(p + n, data, HASH_CBLOCK - n); @@ -283,10 +275,11 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { data += n; len -= n; c->num = 0; - memset(p, 0, HASH_CBLOCK); /* keep it zeroed */ + /* Keep |c->data| zeroed when unused. */ + memset(p, 0, HASH_CBLOCK); } else { memcpy(p + n, data, len); - c->num += (unsigned int)len; + c->num += (unsigned)len; return 1; } } @@ -300,8 +293,8 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { } if (len != 0) { - p = (uint8_t *)c->data; - c->num = (unsigned int)len; + uint8_t *p = (uint8_t *)c->data; + c->num = (unsigned)len; memcpy(p, data, len); } return 1; @@ -317,7 +310,10 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { uint8_t *p = (uint8_t *)c->data; size_t n = c->num; - p[n] = 0x80; /* there is always room for one */ + /* |c->data| always has room for at least one byte. A full block would have + * been consumed. */ + assert(n < HASH_CBLOCK); + p[n] = 0x80; n++; if (n > (HASH_CBLOCK - 8)) { @@ -340,12 +336,7 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { c->num = 0; memset(p, 0, HASH_CBLOCK); -#ifndef HASH_MAKE_STRING -#error "HASH_MAKE_STRING must be defined!" -#else HASH_MAKE_STRING(c, md); -#endif - return 1; } diff --git a/include/openssl/md4.h b/include/openssl/md4.h index e363b732..19ed662d 100644 --- a/include/openssl/md4.h +++ b/include/openssl/md4.h @@ -91,7 +91,7 @@ struct md4_state_st { uint32_t h[4]; uint32_t Nl, Nh; uint32_t data[16]; - unsigned int num; + unsigned num; }; diff --git a/include/openssl/md5.h b/include/openssl/md5.h index 87c3ba41..055cdb0b 100644 --- a/include/openssl/md5.h +++ b/include/openssl/md5.h @@ -96,7 +96,7 @@ struct md5_state_st { uint32_t h[4]; uint32_t Nl, Nh; uint32_t data[16]; - unsigned int num; + unsigned num; }; diff --git a/include/openssl/sha.h b/include/openssl/sha.h index 423510df..fec2927e 100644 --- a/include/openssl/sha.h +++ b/include/openssl/sha.h @@ -116,7 +116,7 @@ struct sha_state_st { #endif uint32_t Nl, Nh; uint32_t data[16]; - unsigned int num; + unsigned num; }; @@ -177,7 +177,7 @@ struct sha256_state_st { uint32_t h[8]; uint32_t Nl, Nh; uint32_t data[16]; - unsigned int num, md_len; + unsigned num, md_len; }; @@ -245,7 +245,7 @@ struct sha512_state_st { uint64_t d[16]; uint8_t p[128]; } u; - unsigned int num, md_len; + unsigned num, md_len; };