Fix trivial -Wcast-qual violations.

Fix casts from const to non-const where dropping the constness is
completely unnecessary. The changes to chacha_vec.c don't result in any
changes to chacha_vec_arm.S.

Change-Id: I2f10081fd0e73ff5db746347c5971f263a5221a6
Reviewed-on: https://boringssl-review.googlesource.com/6923
Reviewed-by: David Benjamin <davidben@google.com>
This commit is contained in:
Brian Smith 2016-01-17 20:04:05 -10:00 committed by David Benjamin
parent a646258c14
commit d3a4e280db
5 changed files with 52 additions and 50 deletions

View File

@ -417,11 +417,10 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r,
static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, const BN_MONT_CTX *mont)
{
BIGNUM *n;
BN_ULONG *ap,*np,*rp,n0,v,carry;
int nl,max,i;
n= (BIGNUM*) &(mont->N);
const BIGNUM *n = &mont->N;
nl=n->top;
if (nl == 0) { ret->top=0; return(1); }

View File

@ -80,8 +80,8 @@ typedef unsigned vec __attribute__((vector_size(16)));
#define VBPI 3
#endif
#define ONE (vec) _mm_set_epi32(0, 0, 0, 1)
#define LOAD(m) (vec) _mm_loadu_si128((__m128i *)(m))
#define LOAD_ALIGNED(m) (vec) _mm_load_si128((__m128i *)(m))
#define LOAD(m) (vec) _mm_loadu_si128((const __m128i *)(m))
#define LOAD_ALIGNED(m) (vec) _mm_load_si128((const __m128i *)(m))
#define STORE(m, r) _mm_storeu_si128((__m128i *)(m), (__m128i)(r))
#define ROTV1(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(0, 3, 2, 1))
#define ROTV2(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(1, 0, 3, 2))
@ -157,7 +157,10 @@ void CRYPTO_chacha_20(
const uint8_t nonce[12],
uint32_t counter)
{
unsigned iters, i, *op=(unsigned *)out, *ip=(unsigned *)in, *kp;
unsigned iters, i;
unsigned *op = (unsigned *)out;
const unsigned *ip = (const unsigned *)in;
const unsigned *kp = (const unsigned *)key;
#if defined(__ARM_NEON__)
uint32_t np[3];
uint8_t alignment_buffer[16] __attribute__((aligned(16)));
@ -165,18 +168,17 @@ void CRYPTO_chacha_20(
vec s0, s1, s2, s3;
__attribute__ ((aligned (16))) unsigned chacha_const[] =
{0x61707865,0x3320646E,0x79622D32,0x6B206574};
kp = (unsigned *)key;
#if defined(__ARM_NEON__)
memcpy(np, nonce, 12);
#endif
s0 = LOAD_ALIGNED(chacha_const);
s1 = LOAD(&((vec*)kp)[0]);
s2 = LOAD(&((vec*)kp)[1]);
s1 = LOAD(&((const vec*)kp)[0]);
s2 = LOAD(&((const vec*)kp)[1]);
s3 = (vec){
counter,
((uint32_t*)nonce)[0],
((uint32_t*)nonce)[1],
((uint32_t*)nonce)[2]
((const uint32_t*)nonce)[0],
((const uint32_t*)nonce)[1],
((const uint32_t*)nonce)[2]
};
for (iters = 0; iters < inlen/(BPI*64); iters++)
@ -316,7 +318,7 @@ void CRYPTO_chacha_20(
buf[0] = REVV_BE(v0 + s0);
for (i=inlen & ~15; i<inlen; i++)
((char *)op)[i] = ((char *)ip)[i] ^ ((char *)buf)[i];
((char *)op)[i] = ((const char *)ip)[i] ^ ((const char *)buf)[i];
}
}

View File

@ -99,10 +99,10 @@ static const u64 bottom63bits = 0x7ffffffffffffffful;
/* bin32_to_felem takes a little-endian byte array and converts it into felem
* form. This assumes that the CPU is little-endian. */
static void bin32_to_felem(felem out, const u8 in[32]) {
out[0] = *((u64 *)&in[0]);
out[1] = *((u64 *)&in[8]);
out[2] = *((u64 *)&in[16]);
out[3] = *((u64 *)&in[24]);
out[0] = *((const u64 *)&in[0]);
out[1] = *((const u64 *)&in[8]);
out[2] = *((const u64 *)&in[16]);
out[3] = *((const u64 *)&in[24]);
}
/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
@ -1476,7 +1476,7 @@ static void select_point(const u64 idx, unsigned int size,
memset(outlimbs, 0, 3 * sizeof(smallfelem));
for (i = 0; i < size; i++) {
const u64 *inlimbs = (u64 *)&pre_comp[i][0][0];
const u64 *inlimbs = (const u64 *)&pre_comp[i][0][0];
u64 mask = i ^ idx;
mask |= mask >> 4;
mask |= mask >> 2;

View File

@ -125,7 +125,8 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
(*block)(ivec, ecount_buf, key);
ctr128_inc(ivec);
for (; n < 16; n += sizeof(size_t)) {
*(size_t *)(out + n) = *(size_t *)(in + n) ^ *(size_t *)(ecount_buf + n);
*(size_t *)(out + n) = *(const size_t *)(in + n) ^
*(const size_t *)(ecount_buf + n);
}
len -= 16;
out += 16;

View File

@ -28,8 +28,8 @@
#define ALIGN(x) __attribute__((aligned(x)))
/* inline is not a keyword in C89. */
#define INLINE
#define U8TO64_LE(m) (*(uint64_t *)(m))
#define U8TO32_LE(m) (*(uint32_t *)(m))
#define U8TO64_LE(m) (*(const uint64_t *)(m))
#define U8TO32_LE(m) (*(const uint32_t *)(m))
#define U64TO8_LE(m, v) (*(uint64_t *)(m)) = v
typedef __m128i xmmi;
@ -93,21 +93,21 @@ poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) {
size_t offset = src - dst;
if (bytes & 32) {
_mm_storeu_si128((xmmi *)(dst + 0),
_mm_loadu_si128((xmmi *)(dst + offset + 0)));
_mm_loadu_si128((const xmmi *)(dst + offset + 0)));
_mm_storeu_si128((xmmi *)(dst + 16),
_mm_loadu_si128((xmmi *)(dst + offset + 16)));
_mm_loadu_si128((const xmmi *)(dst + offset + 16)));
dst += 32;
}
if (bytes & 16) {
_mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((xmmi *)(dst + offset)));
_mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((const xmmi *)(dst + offset)));
dst += 16;
}
if (bytes & 8) {
*(uint64_t *)dst = *(uint64_t *)(dst + offset);
*(uint64_t *)dst = *(const uint64_t *)(dst + offset);
dst += 8;
}
if (bytes & 4) {
*(uint32_t *)dst = *(uint32_t *)(dst + offset);
*(uint32_t *)dst = *(const uint32_t *)(dst + offset);
dst += 4;
}
if (bytes & 2) {
@ -186,9 +186,9 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) {
static void poly1305_first_block(poly1305_state_internal *st,
const uint8_t *m) {
const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5);
const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128);
const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask);
const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5);
const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128);
xmmi T5, T6;
poly1305_power *p;
uint128_t d[3];
@ -265,10 +265,10 @@ static void poly1305_first_block(poly1305_state_internal *st,
p->R24.d[3] = (uint32_t)(pad1 >> 32);
/* H = [Mx,My] */
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)),
_mm_loadl_epi64((xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)),
_mm_loadl_epi64((xmmi *)(m + 24)));
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)),
_mm_loadl_epi64((const xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)),
_mm_loadl_epi64((const xmmi *)(m + 24)));
st->H[0] = _mm_and_si128(MMASK, T5);
st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
@ -279,9 +279,9 @@ static void poly1305_first_block(poly1305_state_internal *st,
static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m,
size_t bytes) {
const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5);
const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128);
const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask);
const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5);
const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128);
poly1305_power *p;
xmmi H0, H1, H2, H3, H4;
@ -345,10 +345,10 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m,
T4 = _mm_add_epi64(T4, T5);
/* H += [Mx,My]*[r^2,r^2] */
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)),
_mm_loadl_epi64((xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)),
_mm_loadl_epi64((xmmi *)(m + 24)));
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)),
_mm_loadl_epi64((const xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)),
_mm_loadl_epi64((const xmmi *)(m + 24)));
M0 = _mm_and_si128(MMASK, T5);
M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
@ -409,10 +409,10 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m,
T4 = _mm_add_epi64(T4, T5);
/* H += [Mx,My] */
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)),
_mm_loadl_epi64((xmmi *)(m + 48)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)),
_mm_loadl_epi64((xmmi *)(m + 56)));
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 32)),
_mm_loadl_epi64((const xmmi *)(m + 48)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 40)),
_mm_loadl_epi64((const xmmi *)(m + 56)));
M0 = _mm_and_si128(MMASK, T5);
M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
@ -469,9 +469,9 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m,
static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m,
size_t bytes) {
const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
const xmmi HIBIT = _mm_load_si128((xmmi *)poly1305_x64_sse2_1shl128);
const xmmi FIVE = _mm_load_si128((xmmi *)poly1305_x64_sse2_5);
const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask);
const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128);
const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5);
poly1305_power *p;
xmmi H0, H1, H2, H3, H4;
@ -542,10 +542,10 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m,
T4 = _mm_add_epi64(T4, T5);
/* H += [Mx,My] */
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)),
_mm_loadl_epi64((xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)),
_mm_loadl_epi64((xmmi *)(m + 24)));
T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)),
_mm_loadl_epi64((const xmmi *)(m + 16)));
T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)),
_mm_loadl_epi64((const xmmi *)(m + 24)));
M0 = _mm_and_si128(MMASK, T5);
M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));