Browse Source

Reformat md32_common.h, part 1.

We've tweaked it already and upstream's using a different indentation
style now anyway. This is the first of two commits. For verifiability,
this is the output of clang-format with no modifications.

Change-Id: Ia30f20bee0cc8046aedf9ac7106cc4630e8d93e6
Reviewed-on: https://boringssl-review.googlesource.com/6648
Reviewed-by: Adam Langley <agl@google.com>
kris/onging/CECPQ3_patch15
David Benjamin 9 years ago
committed by Adam Langley
parent
commit
fea1137e55
1 changed files with 180 additions and 175 deletions
  1. +180
    -175
      crypto/digest/md32_common.h

+ 180
- 175
crypto/digest/md32_common.h View File

@@ -81,7 +81,8 @@ extern "C" {
* } <NAME>_CTX;
*
* <chaining length> is the output length of the hash in bytes, before
* any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and SHA-512).
* any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and
* SHA-512).
*
* |HASH_UPDATE| must be defined as the name of the "Update" function to
* generate.
@@ -137,216 +138,220 @@ extern "C" {
* Engage compiler specific rotate intrinsic function if available.
*/
#undef ROTATE
# if defined(_MSC_VER)
# define ROTATE(a,n) _lrotl(a,n)
# elif defined(__ICC)
# define ROTATE(a,n) _rotl(a,n)
# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
* what we need here...
* <appro@fy.chalmers.se>
*/
# if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
# define ROTATE(a,n) ({ register uint32_t ret; \
asm ( \
"roll %1,%0" \
: "=r"(ret) \
: "I"(n), "0"((uint32_t)(a)) \
: "cc"); \
ret; \
})
# endif /* OPENSSL_X86 || OPENSSL_X86_64 */
# endif /* COMPILER */
#if defined(_MSC_VER)
#define ROTATE(a, n) _lrotl(a, n)
#elif defined(__ICC)
#define ROTATE(a, n) _rotl(a, n)
#elif defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
* what we need here...
* <appro@fy.chalmers.se>
*/
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
#define ROTATE(a, n) \
({ \
register uint32_t ret; \
asm("roll %1,%0" : "=r"(ret) : "I"(n), "0"((uint32_t)(a)) : "cc"); \
ret; \
})
#endif /* OPENSSL_X86 || OPENSSL_X86_64 */
#endif /* COMPILER */

#ifndef ROTATE
#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
#define ROTATE(a, n) (((a) << (n)) | (((a)&0xffffffff) >> (32 - (n))))
#endif

#if defined(DATA_ORDER_IS_BIG_ENDIAN)

#ifndef PEDANTIC
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM)
# if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* with gcc [on P4]. Well, first macro to be frank. We can pull
* this trick on x86* platforms only, because these CPUs can fetch
* unaligned data without raising an exception.
*/
# define HOST_c2l(c,l) ({ uint32_t r=*((const uint32_t *)(c)); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
(c)+=4; (l)=r; })
# define HOST_l2c(l,c) ({ uint32_t r=(l); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
*((uint32_t *)(c))=r; (c)+=4; r; })
# elif defined(__aarch64__)
# if defined(__BYTE_ORDER__)
# if defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
# define HOST_c2l(c,l) ({ uint32_t r; \
asm ("rev %w0,%w1" \
:"=r"(r) \
:"r"(*((const uint32_t *)(c))));\
(c)+=4; (l)=r; })
# define HOST_l2c(l,c) ({ uint32_t r; \
asm ("rev %w0,%w1" \
:"=r"(r) \
:"r"((uint32_t)(l))); \
*((uint32_t *)(c))=r; (c)+=4; r; })
# elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define HOST_c2l(c,l) (void)((l)=*((const uint32_t *)(c)), (c)+=4)
# define HOST_l2c(l,c) (*((uint32_t *)(c))=(l), (c)+=4, (l))
# endif
# endif
# endif
# endif
#if defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM)
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* with gcc [on P4]. Well, first macro to be frank. We can pull
* this trick on x86* platforms only, because these CPUs can fetch
* unaligned data without raising an exception.
*/
#define HOST_c2l(c, l) \
({ \
uint32_t r = *((const uint32_t *)(c)); \
asm("bswapl %0" : "=r"(r) : "0"(r)); \
(c) += 4; \
(l) = r; \
})
#define HOST_l2c(l, c) \
({ \
uint32_t r = (l); \
asm("bswapl %0" : "=r"(r) : "0"(r)); \
*((uint32_t *)(c)) = r; \
(c) += 4; \
r; \
})
#elif defined(__aarch64__)
#if defined(__BYTE_ORDER__)
#if defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_c2l(c, l) \
({ \
uint32_t r; \
asm("rev %w0,%w1" : "=r"(r) : "r"(*((const uint32_t *)(c)))); \
(c) += 4; \
(l) = r; \
})
#define HOST_l2c(l, c) \
({ \
uint32_t r; \
asm("rev %w0,%w1" : "=r"(r) : "r"((uint32_t)(l))); \
*((uint32_t *)(c)) = r; \
(c) += 4; \
r; \
})
#elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4)
#define HOST_l2c(l, c) (*((uint32_t *)(c)) = (l), (c) += 4, (l))
#endif
#endif
#endif
#endif
#endif

#ifndef HOST_c2l
#define HOST_c2l(c,l) (void)(l =(((uint32_t)(*((c)++)))<<24), \
l|=(((uint32_t)(*((c)++)))<<16), \
l|=(((uint32_t)(*((c)++)))<< 8), \
l|=(((uint32_t)(*((c)++))) ))
#define HOST_c2l(c, l) \
(void)(l = (((uint32_t)(*((c)++))) << 24), \
l |= (((uint32_t)(*((c)++))) << 16), \
l |= (((uint32_t)(*((c)++))) << 8), l |= (((uint32_t)(*((c)++)))))
#endif
#ifndef HOST_l2c
#define HOST_l2c(l,c) (*((c)++)=(uint8_t)(((l)>>24)&0xff), \
*((c)++)=(uint8_t)(((l)>>16)&0xff), \
*((c)++)=(uint8_t)(((l)>> 8)&0xff), \
*((c)++)=(uint8_t)(((l) )&0xff), \
l)
#define HOST_l2c(l, c) \
(*((c)++) = (uint8_t)(((l) >> 24) & 0xff), \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff), \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff), \
*((c)++) = (uint8_t)(((l)) & 0xff), l)
#endif

#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)

#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
/* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
# define HOST_c2l(c,l) (void)((l)=*((const uint32_t *)(c)), (c)+=4)
# define HOST_l2c(l,c) (*((uint32_t *)(c))=(l), (c)+=4, l)
/* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
#define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4)
#define HOST_l2c(l, c) (*((uint32_t *)(c)) = (l), (c) += 4, l)
#endif

#ifndef HOST_c2l
#define HOST_c2l(c,l) (void)(l =(((uint32_t)(*((c)++))) ), \
l|=(((uint32_t)(*((c)++)))<< 8), \
l|=(((uint32_t)(*((c)++)))<<16), \
l|=(((uint32_t)(*((c)++)))<<24))
#define HOST_c2l(c, l) \
(void)(l = (((uint32_t)(*((c)++)))), l |= (((uint32_t)(*((c)++))) << 8), \
l |= (((uint32_t)(*((c)++))) << 16), \
l |= (((uint32_t)(*((c)++))) << 24))
#endif
#ifndef HOST_l2c
#define HOST_l2c(l,c) (*((c)++)=(uint8_t)(((l) )&0xff), \
*((c)++)=(uint8_t)(((l)>> 8)&0xff), \
*((c)++)=(uint8_t)(((l)>>16)&0xff), \
*((c)++)=(uint8_t)(((l)>>24)&0xff), \
l)
#define HOST_l2c(l, c) \
(*((c)++) = (uint8_t)(((l)) & 0xff), \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff), \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff), \
*((c)++) = (uint8_t)(((l) >> 24) & 0xff), l)
#endif

#endif

int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
{
const uint8_t *data=data_;
uint8_t *p;
uint32_t l;
size_t n;

if (len==0) return 1;

l=(c->Nl+(((uint32_t)len)<<3))&0xffffffffUL;
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
* Wei Dai <weidai@eskimo.com> for pointing it out. */
if (l < c->Nl) /* overflow */
c->Nh++;
c->Nh+=(uint32_t)(len>>29); /* might cause compiler warning on 16-bit */
c->Nl=l;

n = c->num;
if (n != 0)
{
p=(uint8_t *)c->data;

if (len >= HASH_CBLOCK || len+n >= HASH_CBLOCK)
{
memcpy (p+n,data,HASH_CBLOCK-n);
HASH_BLOCK_DATA_ORDER (c->h,p,1);
n = HASH_CBLOCK-n;
data += n;
len -= n;
c->num = 0;
memset (p,0,HASH_CBLOCK); /* keep it zeroed */
}
else
{
memcpy (p+n,data,len);
c->num += (unsigned int)len;
return 1;
}
}

n = len/HASH_CBLOCK;
if (n > 0)
{
HASH_BLOCK_DATA_ORDER (c->h,data,n);
n *= HASH_CBLOCK;
data += n;
len -= n;
}

if (len != 0)
{
p = (uint8_t *)c->data;
c->num = (unsigned int)len;
memcpy (p,data,len);
}
return 1;
}


void HASH_TRANSFORM (HASH_CTX *c, const uint8_t *data)
{
HASH_BLOCK_DATA_ORDER (c->h,data,1);
}


int HASH_FINAL (uint8_t *md, HASH_CTX *c)
{
uint8_t *p = (uint8_t *)c->data;
size_t n = c->num;

p[n] = 0x80; /* there is always room for one */
n++;

if (n > (HASH_CBLOCK-8))
{
memset (p+n,0,HASH_CBLOCK-n);
n=0;
HASH_BLOCK_DATA_ORDER (c->h,p,1);
}
memset (p+n,0,HASH_CBLOCK-8-n);

p += HASH_CBLOCK-8;
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
(void)HOST_l2c(c->Nh,p);
(void)HOST_l2c(c->Nl,p);
int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) {
const uint8_t *data = data_;
uint8_t *p;
uint32_t l;
size_t n;

if (len == 0)
return 1;

l = (c->Nl + (((uint32_t)len) << 3)) & 0xffffffffUL;
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
* Wei Dai <weidai@eskimo.com> for pointing it out. */
if (l < c->Nl) /* overflow */
c->Nh++;
c->Nh += (uint32_t)(len >> 29); /* might cause compiler warning on 16-bit */
c->Nl = l;

n = c->num;
if (n != 0) {
p = (uint8_t *)c->data;

if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) {
memcpy(p + n, data, HASH_CBLOCK - n);
HASH_BLOCK_DATA_ORDER(c->h, p, 1);
n = HASH_CBLOCK - n;
data += n;
len -= n;
c->num = 0;
memset(p, 0, HASH_CBLOCK); /* keep it zeroed */
} else {
memcpy(p + n, data, len);
c->num += (unsigned int)len;
return 1;
}
}

n = len / HASH_CBLOCK;
if (n > 0) {
HASH_BLOCK_DATA_ORDER(c->h, data, n);
n *= HASH_CBLOCK;
data += n;
len -= n;
}

if (len != 0) {
p = (uint8_t *)c->data;
c->num = (unsigned int)len;
memcpy(p, data, len);
}
return 1;
}


void HASH_TRANSFORM(HASH_CTX *c, const uint8_t *data) {
HASH_BLOCK_DATA_ORDER(c->h, data, 1);
}


int HASH_FINAL(uint8_t *md, HASH_CTX *c) {
uint8_t *p = (uint8_t *)c->data;
size_t n = c->num;

p[n] = 0x80; /* there is always room for one */
n++;

if (n > (HASH_CBLOCK - 8)) {
memset(p + n, 0, HASH_CBLOCK - n);
n = 0;
HASH_BLOCK_DATA_ORDER(c->h, p, 1);
}
memset(p + n, 0, HASH_CBLOCK - 8 - n);

p += HASH_CBLOCK - 8;
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
(void)HOST_l2c(c->Nh, p);
(void)HOST_l2c(c->Nl, p);
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
(void)HOST_l2c(c->Nl,p);
(void)HOST_l2c(c->Nh,p);
(void)HOST_l2c(c->Nl, p);
(void)HOST_l2c(c->Nh, p);
#endif
p -= HASH_CBLOCK;
HASH_BLOCK_DATA_ORDER (c->h,p,1);
c->num=0;
memset (p,0,HASH_CBLOCK);
p -= HASH_CBLOCK;
HASH_BLOCK_DATA_ORDER(c->h, p, 1);
c->num = 0;
memset(p, 0, HASH_CBLOCK);

#ifndef HASH_MAKE_STRING
#error "HASH_MAKE_STRING must be defined!"
#else
HASH_MAKE_STRING(c,md);
HASH_MAKE_STRING(c, md);
#endif

return 1;
}
return 1;
}


#if defined(__cplusplus)
} /* extern C */
} /* extern C */
#endif

#endif /* OPENSSL_HEADER_MD32_COMMON_H */
#endif /* OPENSSL_HEADER_MD32_COMMON_H */

Loading…
Cancel
Save