From d3a4e280db4c963b5ab0afa02f21322b776ce8c0 Mon Sep 17 00:00:00 2001 From: Brian Smith Date: Sun, 17 Jan 2016 20:04:05 -1000 Subject: [PATCH] Fix trivial -Wcast-qual violations. Fix casts from const to non-const where dropping the constness is completely unnecessary. The changes to chacha_vec.c don't result in any changes to chacha_vec_arm.S. Change-Id: I2f10081fd0e73ff5db746347c5971f263a5221a6 Reviewed-on: https://boringssl-review.googlesource.com/6923 Reviewed-by: David Benjamin --- crypto/bn/montgomery.c | 3 +- crypto/chacha/chacha_vec.c | 22 ++++++------ crypto/ec/p256-64.c | 10 +++--- crypto/modes/ctr.c | 3 +- crypto/poly1305/poly1305_vec.c | 64 +++++++++++++++++----------------- 5 files changed, 52 insertions(+), 50 deletions(-) diff --git a/crypto/bn/montgomery.c b/crypto/bn/montgomery.c index 18da0dad..d956d62b 100644 --- a/crypto/bn/montgomery.c +++ b/crypto/bn/montgomery.c @@ -417,11 +417,10 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, const BN_MONT_CTX *mont) { - BIGNUM *n; BN_ULONG *ap,*np,*rp,n0,v,carry; int nl,max,i; - n= (BIGNUM*) &(mont->N); + const BIGNUM *n = &mont->N; nl=n->top; if (nl == 0) { ret->top=0; return(1); } diff --git a/crypto/chacha/chacha_vec.c b/crypto/chacha/chacha_vec.c index addbaa3d..79ec9412 100644 --- a/crypto/chacha/chacha_vec.c +++ b/crypto/chacha/chacha_vec.c @@ -80,8 +80,8 @@ typedef unsigned vec __attribute__((vector_size(16))); #define VBPI 3 #endif #define ONE (vec) _mm_set_epi32(0, 0, 0, 1) -#define LOAD(m) (vec) _mm_loadu_si128((__m128i *)(m)) -#define LOAD_ALIGNED(m) (vec) _mm_load_si128((__m128i *)(m)) +#define LOAD(m) (vec) _mm_loadu_si128((const __m128i *)(m)) +#define LOAD_ALIGNED(m) (vec) _mm_load_si128((const __m128i *)(m)) #define STORE(m, r) _mm_storeu_si128((__m128i *)(m), (__m128i)(r)) #define ROTV1(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(0, 3, 2, 1)) #define ROTV2(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(1, 0, 3, 2)) @@ -157,7 +157,10 @@ void CRYPTO_chacha_20( const uint8_t nonce[12], uint32_t counter) { - unsigned iters, i, *op=(unsigned *)out, *ip=(unsigned *)in, *kp; + unsigned iters, i; + unsigned *op = (unsigned *)out; + const unsigned *ip = (const unsigned *)in; + const unsigned *kp = (const unsigned *)key; #if defined(__ARM_NEON__) uint32_t np[3]; uint8_t alignment_buffer[16] __attribute__((aligned(16))); @@ -165,18 +168,17 @@ void CRYPTO_chacha_20( vec s0, s1, s2, s3; __attribute__ ((aligned (16))) unsigned chacha_const[] = {0x61707865,0x3320646E,0x79622D32,0x6B206574}; - kp = (unsigned *)key; #if defined(__ARM_NEON__) memcpy(np, nonce, 12); #endif s0 = LOAD_ALIGNED(chacha_const); - s1 = LOAD(&((vec*)kp)[0]); - s2 = LOAD(&((vec*)kp)[1]); + s1 = LOAD(&((const vec*)kp)[0]); + s2 = LOAD(&((const vec*)kp)[1]); s3 = (vec){ counter, - ((uint32_t*)nonce)[0], - ((uint32_t*)nonce)[1], - ((uint32_t*)nonce)[2] + ((const uint32_t*)nonce)[0], + ((const uint32_t*)nonce)[1], + ((const uint32_t*)nonce)[2] }; for (iters = 0; iters < inlen/(BPI*64); iters++) @@ -316,7 +318,7 @@ void CRYPTO_chacha_20( buf[0] = REVV_BE(v0 + s0); for (i=inlen & ~15; i> 4; mask |= mask >> 2; diff --git a/crypto/modes/ctr.c b/crypto/modes/ctr.c index 52ff048f..0baed5d4 100644 --- a/crypto/modes/ctr.c +++ b/crypto/modes/ctr.c @@ -125,7 +125,8 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, (*block)(ivec, ecount_buf, key); ctr128_inc(ivec); for (; n < 16; n += sizeof(size_t)) { - *(size_t *)(out + n) = *(size_t *)(in + n) ^ *(size_t *)(ecount_buf + n); + *(size_t *)(out + n) = *(const size_t *)(in + n) ^ + *(const size_t *)(ecount_buf + n); } len -= 16; out += 16; diff --git a/crypto/poly1305/poly1305_vec.c b/crypto/poly1305/poly1305_vec.c index 07578d08..3235b58b 100644 --- a/crypto/poly1305/poly1305_vec.c +++ b/crypto/poly1305/poly1305_vec.c @@ -28,8 +28,8 @@ #define ALIGN(x) __attribute__((aligned(x))) /* inline is not a keyword in C89. */ #define INLINE -#define U8TO64_LE(m) (*(uint64_t *)(m)) -#define U8TO32_LE(m) (*(uint32_t *)(m)) +#define U8TO64_LE(m) (*(const uint64_t *)(m)) +#define U8TO32_LE(m) (*(const uint32_t *)(m)) #define U64TO8_LE(m, v) (*(uint64_t *)(m)) = v typedef __m128i xmmi; @@ -93,21 +93,21 @@ poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { size_t offset = src - dst; if (bytes & 32) { _mm_storeu_si128((xmmi *)(dst + 0), - _mm_loadu_si128((xmmi *)(dst + offset + 0))); + _mm_loadu_si128((const xmmi *)(dst + offset + 0))); _mm_storeu_si128((xmmi *)(dst + 16), - _mm_loadu_si128((xmmi *)(dst + offset + 16))); + _mm_loadu_si128((const xmmi *)(dst + offset + 16))); dst += 32; } if (bytes & 16) { - _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((xmmi *)(dst + offset))); + _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((const xmmi *)(dst + offset))); dst += 16; } if (bytes & 8) { - *(uint64_t *)dst = *(uint64_t *)(dst + offset); + *(uint64_t *)dst = *(const uint64_t *)(dst + offset); dst += 8; } if (bytes & 4) { - *(uint32_t *)dst = *(uint32_t *)(dst + offset); + *(uint32_t *)dst = *(const uint32_t *)(dst + offset); dst += 4; } if (bytes & 2) { @@ -186,9 +186,9 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { static void poly1305_first_block(poly1305_state_internal *st, const uint8_t *m) { - const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); - const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); - const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); xmmi T5, T6; poly1305_power *p; uint128_t d[3]; @@ -265,10 +265,10 @@ static void poly1305_first_block(poly1305_state_internal *st, p->R24.d[3] = (uint32_t)(pad1 >> 32); /* H = [Mx,My] */ - T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), - _mm_loadl_epi64((xmmi *)(m + 16))); - T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), - _mm_loadl_epi64((xmmi *)(m + 24))); + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), + _mm_loadl_epi64((const xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), + _mm_loadl_epi64((const xmmi *)(m + 24))); st->H[0] = _mm_and_si128(MMASK, T5); st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); @@ -279,9 +279,9 @@ static void poly1305_first_block(poly1305_state_internal *st, static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, size_t bytes) { - const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); - const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); - const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); + const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); + const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); + const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); poly1305_power *p; xmmi H0, H1, H2, H3, H4; @@ -345,10 +345,10 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T4 = _mm_add_epi64(T4, T5); /* H += [Mx,My]*[r^2,r^2] */ - T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), - _mm_loadl_epi64((xmmi *)(m + 16))); - T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), - _mm_loadl_epi64((xmmi *)(m + 24))); + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), + _mm_loadl_epi64((const xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), + _mm_loadl_epi64((const xmmi *)(m + 24))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); @@ -409,10 +409,10 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T4 = _mm_add_epi64(T4, T5); /* H += [Mx,My] */ - T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), - _mm_loadl_epi64((xmmi *)(m + 48))); - T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), - _mm_loadl_epi64((xmmi *)(m + 56))); + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 32)), + _mm_loadl_epi64((const xmmi *)(m + 48))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 40)), + _mm_loadl_epi64((const xmmi *)(m + 56))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); @@ -469,9 +469,9 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, size_t bytes) { - const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask); - const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128); - const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5); + const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); + const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); + const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); poly1305_power *p; xmmi H0, H1, H2, H3, H4; @@ -542,10 +542,10 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T4 = _mm_add_epi64(T4, T5); /* H += [Mx,My] */ - T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), - _mm_loadl_epi64((xmmi *)(m + 16))); - T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), - _mm_loadl_epi64((xmmi *)(m + 24))); + T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), + _mm_loadl_epi64((const xmmi *)(m + 16))); + T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), + _mm_loadl_epi64((const xmmi *)(m + 24))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));