Fix build.

There were a couple more asm lines to turn into __asm__ when the patches got
reordered slightly.

Change-Id: I44be5caee6d09bb3db5dea4791592b12d175822c
Reviewed-on: https://boringssl-review.googlesource.com/6741
Reviewed-by: Adam Langley <agl@google.com>
This commit is contained in:
David Benjamin 2015-12-16 16:23:13 -05:00 committed by Adam Langley
parent e3203923b5
commit 23a681b9f9

View File

@ -146,38 +146,38 @@ extern "C" {
/* The first macro gives a ~30-40% performance improvement in SHA-256 compiled /* The first macro gives a ~30-40% performance improvement in SHA-256 compiled
* with gcc on P4. This can only be done on x86, where unaligned data fetches * with gcc on P4. This can only be done on x86, where unaligned data fetches
* are possible. */ * are possible. */
#define HOST_c2l(c, l) \ #define HOST_c2l(c, l) \
(void)({ \ (void)({ \
uint32_t r = *((const uint32_t *)(c)); \ uint32_t r = *((const uint32_t *)(c)); \
asm("bswapl %0" : "=r"(r) : "0"(r)); \ __asm__("bswapl %0" : "=r"(r) : "0"(r)); \
(c) += 4; \ (c) += 4; \
(l) = r; \ (l) = r; \
}) })
#define HOST_l2c(l, c) \ #define HOST_l2c(l, c) \
(void)({ \ (void)({ \
uint32_t r = (l); \ uint32_t r = (l); \
asm("bswapl %0" : "=r"(r) : "0"(r)); \ __asm__("bswapl %0" : "=r"(r) : "0"(r)); \
*((uint32_t *)(c)) = r; \ *((uint32_t *)(c)) = r; \
(c) += 4; \ (c) += 4; \
r; \ r; \
}) })
#elif defined(__aarch64__) && defined(__BYTE_ORDER__) #elif defined(__aarch64__) && defined(__BYTE_ORDER__)
#if defined(__ORDER_LITTLE_ENDIAN__) && \ #if defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_c2l(c, l) \ #define HOST_c2l(c, l) \
(void)({ \ (void)({ \
uint32_t r; \ uint32_t r; \
asm("rev %w0, %w1" : "=r"(r) : "r"(*((const uint32_t *)(c)))); \ __asm__("rev %w0, %w1" : "=r"(r) : "r"(*((const uint32_t *)(c)))); \
(c) += 4; \ (c) += 4; \
(l) = r; \ (l) = r; \
}) })
#define HOST_l2c(l, c) \ #define HOST_l2c(l, c) \
(void)({ \ (void)({ \
uint32_t r; \ uint32_t r; \
asm("rev %w0, %w1" : "=r"(r) : "r"((uint32_t)(l))); \ __asm__("rev %w0, %w1" : "=r"(r) : "r"((uint32_t)(l))); \
*((uint32_t *)(c)) = r; \ *((uint32_t *)(c)) = r; \
(c) += 4; \ (c) += 4; \
r; \ r; \
}) })
#elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4) #define HOST_c2l(c, l) (void)((l) = *((const uint32_t *)(c)), (c) += 4)