Bläddra i källkod

Use C99 for size_t loops.

This was done just by grepping for 'size_t i;' and 'size_t j;'. I left
everything in crypto/x509 and friends alone.

There's some instances in gcm.c that are non-trivial and pulled into a
separate CL for ease of review.

Change-Id: I6515804e3097f7e90855f1e7610868ee87117223
Reviewed-on: https://boringssl-review.googlesource.com/10801
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: Adam Langley <agl@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
kris/onging/CECPQ3_patch15
David Benjamin 8 år sedan
committed by CQ bot account: commit-bot@chromium.org
förälder
incheckning
54091230cd
37 ändrade filer med 136 tillägg och 235 borttagningar
  1. +1
    -2
      crypto/base64/base64_test.cc
  2. +1
    -2
      crypto/bio/hexdump.c
  3. +3
    -6
      crypto/bn/convert.c
  4. +4
    -6
      crypto/bytestring/cbb.c
  5. +1
    -2
      crypto/bytestring/cbs.c
  6. +2
    -2
      crypto/cipher/e_aes.c
  7. +2
    -4
      crypto/cipher/e_des.c
  8. +1
    -2
      crypto/digest/digest_test.cc
  9. +1
    -3
      crypto/ec/ec.c
  10. +14
    -22
      crypto/ec/p224-64.c
  11. +15
    -24
      crypto/ec/p256-64.c
  12. +5
    -6
      crypto/ec/simple.c
  13. +8
    -15
      crypto/evp/print.c
  14. +2
    -4
      crypto/ex_data.c
  15. +2
    -3
      crypto/hmac/hmac.c
  16. +6
    -10
      crypto/lhash/lhash.c
  17. +2
    -3
      crypto/modes/gcm.c
  18. +3
    -6
      crypto/modes/gcm_test.c
  19. +6
    -12
      crypto/newhope/poly.c
  20. +6
    -9
      crypto/pkcs8/pkcs8.c
  21. +1
    -2
      crypto/rsa/rsa.c
  22. +3
    -3
      crypto/rsa/rsa_asn1.c
  23. +12
    -21
      crypto/stack/stack.c
  24. +1
    -2
      crypto/test/test_util.cc
  25. +2
    -4
      ssl/custom_extensions.c
  26. +4
    -7
      ssl/d1_both.c
  27. +1
    -2
      ssl/handshake_client.c
  28. +3
    -5
      ssl/handshake_server.c
  29. +1
    -2
      ssl/s3_lib.c
  30. +2
    -4
      ssl/ssl_aead_ctx.c
  31. +1
    -2
      ssl/ssl_asn1.c
  32. +3
    -6
      ssl/ssl_cert.c
  33. +2
    -3
      ssl/ssl_cipher.c
  34. +2
    -4
      ssl/ssl_ecdh.c
  35. +2
    -4
      ssl/ssl_lib.c
  36. +10
    -19
      ssl/t1_lib.c
  37. +1
    -2
      ssl/tls_record.c

+ 1
- 2
crypto/base64/base64_test.cc Visa fil

@@ -107,8 +107,7 @@ static std::string RemoveNewlines(const char *in) {
std::string ret;
const size_t in_len = strlen(in);

size_t i;
for (i = 0; i < in_len; i++) {
for (size_t i = 0; i < in_len; i++) {
if (in[i] != '\n') {
ret.push_back(in[i]);
}


+ 1
- 2
crypto/bio/hexdump.c Visa fil

@@ -86,7 +86,6 @@ static char to_char(uint8_t b) {
* |ctx|. */
static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
size_t len) {
size_t i;
char buf[10];
unsigned l;

@@ -95,7 +94,7 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
* ^ offset ^ extra space ^ ASCII of line
*/

for (i = 0; i < len; i++) {
for (size_t i = 0; i < len; i++) {
if (ctx->used == 0) {
/* The beginning of a line. */
BIO_indent(ctx->bio, ctx->indent, UINT_MAX);


+ 3
- 6
crypto/bn/convert.c Visa fil

@@ -160,9 +160,6 @@ static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) {
}

int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
size_t i;
BN_ULONG l;

/* Special case for |in| = 0. Just branch as the probability is negligible. */
if (BN_is_zero(in)) {
memset(out, 0, len);
@@ -175,7 +172,7 @@ int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
return 0;
}
if ((len % BN_BYTES) != 0) {
l = read_word_padded(in, len / BN_BYTES);
BN_ULONG l = read_word_padded(in, len / BN_BYTES);
if (l >> (8 * (len % BN_BYTES)) != 0) {
return 0;
}
@@ -188,9 +185,9 @@ int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
* leading zero octets is low.
*
* See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */
i = len;
size_t i = len;
while (i--) {
l = read_word_padded(in, i / BN_BYTES);
BN_ULONG l = read_word_padded(in, i / BN_BYTES);
*(out++) = (uint8_t)(l >> (8 * (i % BN_BYTES))) & 0xff;
}
return 1;


+ 4
- 6
crypto/bytestring/cbb.c Visa fil

@@ -142,17 +142,16 @@ static int cbb_buffer_add(struct cbb_buffer_st *base, uint8_t **out,

static int cbb_buffer_add_u(struct cbb_buffer_st *base, uint32_t v,
size_t len_len) {
uint8_t *buf;
size_t i;

if (len_len == 0) {
return 1;
}

uint8_t *buf;
if (!cbb_buffer_add(base, &buf, len_len)) {
return 0;
}

for (i = len_len - 1; i < len_len; i--) {
for (size_t i = len_len - 1; i < len_len; i--) {
buf[i] = v;
v >>= 8;
}
@@ -440,14 +439,13 @@ void CBB_discard_child(CBB *cbb) {

int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) {
CBB child;
size_t i;
int started = 0;

if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER)) {
return 0;
}

for (i = 0; i < 8; i++) {
for (size_t i = 0; i < 8; i++) {
uint8_t byte = (value >> 8*(7-i)) & 0xff;
if (!started) {
if (byte == 0) {


+ 1
- 2
crypto/bytestring/cbs.c Visa fil

@@ -88,13 +88,12 @@ int CBS_mem_equal(const CBS *cbs, const uint8_t *data, size_t len) {

static int cbs_get_u(CBS *cbs, uint32_t *out, size_t len) {
uint32_t result = 0;
size_t i;
const uint8_t *data;

if (!cbs_get(cbs, &data, len)) {
return 0;
}
for (i = 0; i < len; i++) {
for (size_t i = 0; i < len; i++) {
result <<= 8;
result |= data[i];
}


+ 2
- 2
crypto/cipher/e_aes.c Visa fil

@@ -353,14 +353,14 @@ static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t len) {
size_t bl = ctx->cipher->block_size;
size_t i;
EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;

if (len < bl) {
return 1;
}

for (i = 0, len -= bl; i <= len; i += bl) {
len -= bl;
for (size_t i = 0; i <= len; i += bl) {
(*dat->block)(in + i, out + i, &dat->ks);
}



+ 2
- 4
crypto/cipher/e_des.c Visa fil

@@ -104,8 +104,7 @@ static int des_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
in_len -= ctx->cipher->block_size;

EVP_DES_KEY *dat = (EVP_DES_KEY *) ctx->cipher_data;
size_t i;
for (i = 0; i <= in_len; i += ctx->cipher->block_size) {
for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
DES_ecb_encrypt((DES_cblock *) (in + i), (DES_cblock *) (out + i),
&dat->ks.ks, ctx->encrypt);
}
@@ -189,8 +188,7 @@ static int des_ede_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
in_len -= ctx->cipher->block_size;

DES_EDE_KEY *dat = (DES_EDE_KEY *) ctx->cipher_data;
size_t i;
for (i = 0; i <= in_len; i += ctx->cipher->block_size) {
for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
DES_ecb3_encrypt((DES_cblock *) (in + i), (DES_cblock *) (out + i),
&dat->ks.ks[0], &dat->ks.ks[1], &dat->ks.ks[2],
ctx->encrypt);


+ 1
- 2
crypto/digest/digest_test.cc Visa fil

@@ -142,10 +142,9 @@ static bool CompareDigest(const TestVector *test,
const uint8_t *digest,
size_t digest_len) {
static const char kHexTable[] = "0123456789abcdef";
size_t i;
char digest_hex[2*EVP_MAX_MD_SIZE + 1];

for (i = 0; i < digest_len; i++) {
for (size_t i = 0; i < digest_len; i++) {
digest_hex[2*i] = kHexTable[digest[i] >> 4];
digest_hex[2*i + 1] = kHexTable[digest[i] & 0xf];
}


+ 1
- 3
crypto/ec/ec.c Visa fil

@@ -709,9 +709,7 @@ int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) {

int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[],
BN_CTX *ctx) {
size_t i;

for (i = 0; i < num; i++) {
for (size_t i = 0; i < num; i++) {
if (group->meth != points[i]->meth) {
OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS);
return 0;


+ 14
- 22
crypto/ec/p224-64.c Visa fil

@@ -192,8 +192,7 @@ static void bin28_to_felem(felem out, const u8 in[28]) {
}

static void felem_to_bin28(u8 out[28], const felem in) {
size_t i;
for (i = 0; i < 7; ++i) {
for (size_t i = 0; i < 7; ++i) {
out[i] = in[0] >> (8 * i);
out[i + 7] = in[1] >> (8 * i);
out[i + 14] = in[2] >> (8 * i);
@@ -203,8 +202,7 @@ static void felem_to_bin28(u8 out[28], const felem in) {

/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
static void flip_endian(u8 *out, const u8 *in, size_t len) {
size_t i;
for (i = 0; i < len; ++i) {
for (size_t i = 0; i < len; ++i) {
out[i] = in[len - 1 - i];
}
}
@@ -524,7 +522,6 @@ static limb felem_is_zero(const felem in) {
static void felem_inv(felem out, const felem in) {
felem ftmp, ftmp2, ftmp3, ftmp4;
widefelem tmp;
size_t i;

felem_square(tmp, in);
felem_reduce(ftmp, tmp); /* 2 */
@@ -544,7 +541,7 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp, tmp); /* 2^6 - 1 */
felem_square(tmp, ftmp);
felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
for (size_t i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
}
@@ -552,7 +549,7 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
felem_square(tmp, ftmp2);
felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
for (i = 0; i < 11; ++i) {/* 2^24 - 2^12 */
for (size_t i = 0; i < 11; ++i) {/* 2^24 - 2^12 */
felem_square(tmp, ftmp3);
felem_reduce(ftmp3, tmp);
}
@@ -560,7 +557,7 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
felem_square(tmp, ftmp2);
felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
for (i = 0; i < 23; ++i) {/* 2^48 - 2^24 */
for (size_t i = 0; i < 23; ++i) {/* 2^48 - 2^24 */
felem_square(tmp, ftmp3);
felem_reduce(ftmp3, tmp);
}
@@ -568,7 +565,7 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
felem_square(tmp, ftmp3);
felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
for (i = 0; i < 47; ++i) {/* 2^96 - 2^48 */
for (size_t i = 0; i < 47; ++i) {/* 2^96 - 2^48 */
felem_square(tmp, ftmp4);
felem_reduce(ftmp4, tmp);
}
@@ -576,13 +573,13 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
felem_square(tmp, ftmp3);
felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
for (i = 0; i < 23; ++i) {/* 2^120 - 2^24 */
for (size_t i = 0; i < 23; ++i) {/* 2^120 - 2^24 */
felem_square(tmp, ftmp4);
felem_reduce(ftmp4, tmp);
}
felem_mul(tmp, ftmp2, ftmp4);
felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
for (size_t i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
}
@@ -592,7 +589,7 @@ static void felem_inv(felem out, const felem in) {
felem_reduce(ftmp, tmp); /* 2^127 - 2 */
felem_mul(tmp, ftmp, in);
felem_reduce(ftmp, tmp); /* 2^127 - 1 */
for (i = 0; i < 97; ++i) {/* 2^224 - 2^97 */
for (size_t i = 0; i < 97; ++i) {/* 2^224 - 2^97 */
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
}
@@ -604,10 +601,9 @@ static void felem_inv(felem out, const felem in) {
* if icopy == 1, copy in to out,
* if icopy == 0, copy out to itself. */
static void copy_conditional(felem out, const felem in, limb icopy) {
size_t i;
/* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
const limb copy = -icopy;
for (i = 0; i < 4; ++i) {
for (size_t i = 0; i < 4; ++i) {
const limb tmp = copy & (in[i] ^ out[i]);
out[i] ^= tmp;
}
@@ -866,8 +862,7 @@ static void select_point(const u64 idx, size_t size,
limb *outlimbs = &out[0][0];
memset(outlimbs, 0, 3 * sizeof(felem));

size_t i;
for (i = 0; i < size; i++) {
for (size_t i = 0; i < size; i++) {
const limb *inlimbs = &pre_comp[i][0][0];
u64 mask = i ^ idx;
mask |= mask >> 4;
@@ -875,8 +870,7 @@ static void select_point(const u64 idx, size_t size,
mask |= mask >> 1;
mask &= 1;
mask--;
size_t j;
for (j = 0; j < 4 * 3; j++) {
for (size_t j = 0; j < 4 * 3; j++) {
outlimbs[j] |= inlimbs[j] & mask;
}
}
@@ -1082,8 +1076,7 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group,
* i.e., they contribute nothing to the linear combination */
memset(secrets, 0, num_points * sizeof(felem_bytearray));
memset(pre_comp, 0, num_points * 17 * 3 * sizeof(felem));
size_t i;
for (i = 0; i < num_points; ++i) {
for (size_t i = 0; i < num_points; ++i) {
if (i == num) {
/* the generator */
p = EC_GROUP_get0_generator(group);
@@ -1121,8 +1114,7 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group,
felem_assign(pre_comp[i][1][1], y_out);
felem_assign(pre_comp[i][1][2], z_out);

size_t j;
for (j = 2; j <= 16; ++j) {
for (size_t j = 2; j <= 16; ++j) {
if (j & 1) {
point_add(pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
pre_comp[i][1][0], pre_comp[i][1][1], pre_comp[i][1][2],


+ 15
- 24
crypto/ec/p256-64.c Visa fil

@@ -94,8 +94,7 @@ static void smallfelem_to_bin32(u8 out[32], const smallfelem in) {

/* To preserve endianness when using BN_bn2bin and BN_bin2bn. */
static void flip_endian(u8 *out, const u8 *in, size_t len) {
size_t i;
for (i = 0; i < len; ++i) {
for (size_t i = 0; i < len; ++i) {
out[i] = in[len - 1 - i];
}
}
@@ -719,8 +718,7 @@ static void felem_contract(smallfelem out, const felem in) {
* each u64, from most-significant to least significant. For each one, if
* all words so far have been equal (m is all ones) then a non-equal
* result is the answer. Otherwise we continue. */
size_t i;
for (i = 3; i < 4; i--) {
for (size_t i = 3; i < 4; i--) {
u64 equal;
uint128_t a = ((uint128_t)kPrime[i]) - out[i];
/* if out[i] > kPrime[i] then a will underflow and the high 64-bits
@@ -810,7 +808,6 @@ static void felem_inv(felem out, const felem in) {
/* each e_I will hold |in|^{2^I - 1} */
felem e2, e4, e8, e16, e32, e64;
longfelem tmp;
size_t i;

felem_square(tmp, in);
felem_reduce(ftmp, tmp); /* 2^1 */
@@ -835,47 +832,47 @@ static void felem_inv(felem out, const felem in) {
felem_mul(tmp, ftmp, e4);
felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
felem_assign(e8, ftmp);
for (i = 0; i < 8; i++) {
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
} /* 2^16 - 2^8 */
felem_mul(tmp, ftmp, e8);
felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
felem_assign(e16, ftmp);
for (i = 0; i < 16; i++) {
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
} /* 2^32 - 2^16 */
felem_mul(tmp, ftmp, e16);
felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
felem_assign(e32, ftmp);
for (i = 0; i < 32; i++) {
for (size_t i = 0; i < 32; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
} /* 2^64 - 2^32 */
felem_assign(e64, ftmp);
felem_mul(tmp, ftmp, in);
felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
for (i = 0; i < 192; i++) {
for (size_t i = 0; i < 192; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
} /* 2^256 - 2^224 + 2^192 */

felem_mul(tmp, e64, e32);
felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
for (i = 0; i < 16; i++) {
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
} /* 2^80 - 2^16 */
felem_mul(tmp, ftmp2, e16);
felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
for (i = 0; i < 8; i++) {
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
} /* 2^88 - 2^8 */
felem_mul(tmp, ftmp2, e8);
felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
for (i = 0; i < 4; i++) {
for (size_t i = 0; i < 4; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
} /* 2^92 - 2^4 */
@@ -1008,8 +1005,7 @@ static void point_double_small(smallfelem x_out, smallfelem y_out,

/* copy_conditional copies in to out iff mask is all ones. */
static void copy_conditional(felem out, const felem in, limb mask) {
size_t i;
for (i = 0; i < NLIMBS; ++i) {
for (size_t i = 0; i < NLIMBS; ++i) {
const limb tmp = mask & (in[i] ^ out[i]);
out[i] ^= tmp;
}
@@ -1017,9 +1013,8 @@ static void copy_conditional(felem out, const felem in, limb mask) {

/* copy_small_conditional copies in to out iff mask is all ones. */
static void copy_small_conditional(felem out, const smallfelem in, limb mask) {
size_t i;
const u64 mask64 = mask;
for (i = 0; i < NLIMBS; ++i) {
for (size_t i = 0; i < NLIMBS; ++i) {
out[i] = ((limb)(in[i] & mask64)) | (out[i] & ~mask);
}
}
@@ -1407,8 +1402,7 @@ static void select_point(const u64 idx, size_t size,
u64 *outlimbs = &out[0][0];
memset(outlimbs, 0, 3 * sizeof(smallfelem));

size_t i;
for (i = 0; i < size; i++) {
for (size_t i = 0; i < size; i++) {
const u64 *inlimbs = (const u64 *)&pre_comp[i][0][0];
u64 mask = i ^ idx;
mask |= mask >> 4;
@@ -1416,8 +1410,7 @@ static void select_point(const u64 idx, size_t size,
mask |= mask >> 1;
mask &= 1;
mask--;
size_t j;
for (j = 0; j < NLIMBS * 3; j++) {
for (size_t j = 0; j < NLIMBS * 3; j++) {
outlimbs[j] |= inlimbs[j] & mask;
}
}
@@ -1639,8 +1632,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group,
* i.e., they contribute nothing to the linear combination. */
memset(secrets, 0, num_points * sizeof(felem_bytearray));
memset(pre_comp, 0, num_points * 17 * 3 * sizeof(smallfelem));
size_t i;
for (i = 0; i < num_points; ++i) {
for (size_t i = 0; i < num_points; ++i) {
if (i == num) {
/* we didn't have a valid precomputation, so we pick the generator. */
p = EC_GROUP_get0_generator(group);
@@ -1674,8 +1666,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group,
felem_shrink(pre_comp[i][1][0], x_out);
felem_shrink(pre_comp[i][1][1], y_out);
felem_shrink(pre_comp[i][1][2], z_out);
size_t j;
for (j = 2; j <= 16; ++j) {
for (size_t j = 2; j <= 16; ++j) {
if (j & 1) {
point_add_small(pre_comp[i][j][0], pre_comp[i][j][1],
pre_comp[i][j][2], pre_comp[i][1][0],


+ 5
- 6
crypto/ec/simple.c Visa fil

@@ -964,7 +964,6 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num,
BN_CTX *new_ctx = NULL;
BIGNUM *tmp, *tmp_Z;
BIGNUM **prod_Z = NULL;
size_t i;
int ret = 0;

if (num == 0) {
@@ -990,7 +989,7 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num,
goto err;
}
memset(prod_Z, 0, num * sizeof(prod_Z[0]));
for (i = 0; i < num; i++) {
for (size_t i = 0; i < num; i++) {
prod_Z[i] = BN_new();
if (prod_Z[i] == NULL) {
goto err;
@@ -1010,7 +1009,7 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num,
}
}

for (i = 1; i < num; i++) {
for (size_t i = 1; i < num; i++) {
if (!BN_is_zero(&points[i]->Z)) {
if (!group->meth->field_mul(group, prod_Z[i], prod_Z[i - 1],
&points[i]->Z, ctx)) {
@@ -1047,7 +1046,7 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num,
}
}

for (i = num - 1; i > 0; --i) {
for (size_t i = num - 1; i > 0; --i) {
/* Loop invariant: tmp is the product of the inverses of
* points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */
if (BN_is_zero(&points[i]->Z)) {
@@ -1071,7 +1070,7 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num,
}

/* Finally, fix up the X and Y coordinates for all points. */
for (i = 0; i < num; i++) {
for (size_t i = 0; i < num; i++) {
EC_POINT *p = points[i];

if (!BN_is_zero(&p->Z)) {
@@ -1095,7 +1094,7 @@ err:
BN_CTX_end(ctx);
BN_CTX_free(new_ctx);
if (prod_Z != NULL) {
for (i = 0; i < num; i++) {
for (size_t i = 0; i < num; i++) {
if (prod_Z[i] == NULL) {
break;
}


+ 8
- 15
crypto/evp/print.c Visa fil

@@ -121,15 +121,13 @@ static int bn_print(BIO *bp, const char *number, const BIGNUM *num,
}

static void update_buflen(const BIGNUM *b, size_t *pbuflen) {
size_t i;

if (!b) {
return;
}

i = BN_num_bytes(b);
if (*pbuflen < i) {
*pbuflen = i;
size_t len = BN_num_bytes(b);
if (*pbuflen < len) {
*pbuflen = len;
}
}

@@ -154,10 +152,8 @@ static int do_rsa_print(BIO *out, const RSA *rsa, int off,
update_buflen(rsa->iqmp, &buf_len);

if (rsa->additional_primes != NULL) {
size_t i;

for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
i++) {
for (size_t i = 0;
i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
const RSA_additional_prime *ap =
sk_RSA_additional_prime_value(rsa->additional_primes, i);
update_buflen(ap->prime, &buf_len);
@@ -211,13 +207,11 @@ static int do_rsa_print(BIO *out, const RSA *rsa, int off,

if (rsa->additional_primes != NULL &&
sk_RSA_additional_prime_num(rsa->additional_primes) > 0) {
size_t i;

if (BIO_printf(out, "otherPrimeInfos:\n") <= 0) {
goto err;
}
for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
i++) {
for (size_t i = 0;
i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
const RSA_additional_prime *ap =
sk_RSA_additional_prime_value(rsa->additional_primes, i);

@@ -483,8 +477,7 @@ static EVP_PKEY_PRINT_METHOD kPrintMethods[] = {
static size_t kPrintMethodsLen = OPENSSL_ARRAY_SIZE(kPrintMethods);

static EVP_PKEY_PRINT_METHOD *find_method(int type) {
size_t i;
for (i = 0; i < kPrintMethodsLen; i++) {
for (size_t i = 0; i < kPrintMethodsLen; i++) {
if (kPrintMethods[i].type == type) {
return &kPrintMethods[i];
}


+ 2
- 4
crypto/ex_data.c Visa fil

@@ -244,8 +244,7 @@ int CRYPTO_dup_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, CRYPTO_EX_DATA *to,
return 0;
}

size_t i;
for (i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
for (size_t i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
CRYPTO_EX_DATA_FUNCS *func_pointer =
sk_CRYPTO_EX_DATA_FUNCS_value(func_pointers, i);
void *ptr = CRYPTO_get_ex_data(from, i + ex_data_class->num_reserved);
@@ -274,8 +273,7 @@ void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj,
return;
}

size_t i;
for (i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
for (size_t i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
CRYPTO_EX_DATA_FUNCS *func_pointer =
sk_CRYPTO_EX_DATA_FUNCS_value(func_pointers, i);
if (func_pointer->free_func) {


+ 2
- 3
crypto/hmac/hmac.c Visa fil

@@ -115,7 +115,6 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len,
* exist callers which intend the latter, but the former is an awkward edge
* case. Fix to API to avoid this. */
if (md != ctx->md || key != NULL) {
size_t i;
uint8_t pad[EVP_MAX_MD_BLOCK_SIZE];
uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE];
unsigned key_block_len;
@@ -139,7 +138,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len,
memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len);
}

for (i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
pad[i] = 0x36 ^ key_block[i];
}
if (!EVP_DigestInit_ex(&ctx->i_ctx, md, impl) ||
@@ -147,7 +146,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len,
return 0;
}

for (i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
pad[i] = 0x5c ^ key_block[i];
}
if (!EVP_DigestInit_ex(&ctx->o_ctx, md, impl) ||


+ 6
- 10
crypto/lhash/lhash.c Visa fil

@@ -100,15 +100,13 @@ _LHASH *lh_new(lhash_hash_func hash, lhash_cmp_func comp) {
}

void lh_free(_LHASH *lh) {
size_t i;
LHASH_ITEM *n, *next;

if (lh == NULL) {
return;
}

for (i = 0; i < lh->num_buckets; i++) {
for (n = lh->buckets[i]; n != NULL; n = next) {
for (size_t i = 0; i < lh->num_buckets; i++) {
LHASH_ITEM *next;
for (LHASH_ITEM *n = lh->buckets[i]; n != NULL; n = next) {
next = n->next;
OPENSSL_free(n);
}
@@ -277,9 +275,6 @@ void *lh_delete(_LHASH *lh, const void *data) {

static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *),
void (*arg_func)(void *, void *), void *arg) {
size_t i;
LHASH_ITEM *cur, *next;

if (lh == NULL) {
return;
}
@@ -289,8 +284,9 @@ static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *),
lh->callback_depth++;
}

for (i = 0; i < lh->num_buckets; i++) {
for (cur = lh->buckets[i]; cur != NULL; cur = next) {
for (size_t i = 0; i < lh->num_buckets; i++) {
LHASH_ITEM *next;
for (LHASH_ITEM *cur = lh->buckets[i]; cur != NULL; cur = next) {
next = cur->next;
if (arg_func) {
arg_func(cur->data, arg);


+ 2
- 3
crypto/modes/gcm.c Visa fil

@@ -516,11 +516,10 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
ctx->Yi.c[15] = 1;
ctr = 1;
} else {
size_t i;
uint64_t len0 = len;

while (len >= 16) {
for (i = 0; i < 16; ++i) {
for (size_t i = 0; i < 16; ++i) {
ctx->Yi.c[i] ^= iv[i];
}
GCM_MUL(ctx, Yi);
@@ -528,7 +527,7 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
len -= 16;
}
if (len) {
for (i = 0; i < len; ++i) {
for (size_t i = 0; i < len; ++i) {
ctx->Yi.c[i] ^= iv[i];
}
GCM_MUL(ctx, Yi);


+ 3
- 6
crypto/modes/gcm_test.c Visa fil

@@ -257,9 +257,6 @@ static int from_hex(uint8_t *out, char in) {

static int decode_hex(uint8_t **out, size_t *out_len, const char *in,
unsigned test_num, const char *description) {
uint8_t *buf = NULL;
size_t i;

if (in == NULL) {
*out = NULL;
*out_len = 0;
@@ -269,16 +266,16 @@ static int decode_hex(uint8_t **out, size_t *out_len, const char *in,
size_t len = strlen(in);
if (len & 1) {
fprintf(stderr, "%u: Odd-length %s input.\n", test_num, description);
goto err;
return 0;
}

buf = OPENSSL_malloc(len / 2);
uint8_t *buf = OPENSSL_malloc(len / 2);
if (buf == NULL) {
fprintf(stderr, "%u: malloc failure.\n", test_num);
goto err;
}

for (i = 0; i < len; i += 2) {
for (size_t i = 0; i < len; i += 2) {
uint8_t v, v2;
if (!from_hex(&v, in[i]) ||
!from_hex(&v2, in[i+1])) {


+ 6
- 12
crypto/newhope/poly.c Visa fil

@@ -28,8 +28,7 @@ extern uint16_t newhope_psis_bitrev_montgomery[];
extern uint16_t newhope_psis_inv_montgomery[];

void NEWHOPE_POLY_frombytes(NEWHOPE_POLY* r, const uint8_t* a) {
int i;
for (i = 0; i < PARAM_N / 4; i++) {
for (int i = 0; i < PARAM_N / 4; i++) {
r->coeffs[4 * i + 0] =
a[7 * i + 0] | (((uint16_t)a[7 * i + 1] & 0x3f) << 8);
r->coeffs[4 * i + 1] = (a[7 * i + 1] >> 6) |
@@ -44,10 +43,9 @@ void NEWHOPE_POLY_frombytes(NEWHOPE_POLY* r, const uint8_t* a) {
}

void NEWHOPE_POLY_tobytes(uint8_t* r, const NEWHOPE_POLY* p) {
int i;
uint16_t t0, t1, t2, t3, m;
int16_t c;
for (i = 0; i < PARAM_N / 4; i++) {
for (int i = 0; i < PARAM_N / 4; i++) {
t0 = newhope_barrett_reduce(
p->coeffs[4 * i + 0]); /* Make sure that coefficients
have only 14 bits */
@@ -136,13 +134,11 @@ void NEWHOPE_POLY_noise(NEWHOPE_POLY* r) {
/* The reference implementation calls ChaCha20 here. */
RAND_bytes((uint8_t *) tp, sizeof(tp));

size_t i;
for (i = 0; i < PARAM_N; i++) {
for (size_t i = 0; i < PARAM_N; i++) {
const uint32_t t = tp[i];

size_t j;
uint32_t d = 0;
for (j = 0; j < 8; j++) {
for (size_t j = 0; j < 8; j++) {
d += (t >> j) & 0x01010101;
}

@@ -154,8 +150,7 @@ void NEWHOPE_POLY_noise(NEWHOPE_POLY* r) {

void newhope_poly_pointwise(NEWHOPE_POLY* r, const NEWHOPE_POLY* a,
const NEWHOPE_POLY* b) {
size_t i;
for (i = 0; i < PARAM_N; i++) {
for (size_t i = 0; i < PARAM_N; i++) {
uint16_t t = newhope_montgomery_reduce(3186 * b->coeffs[i]);
/* t is now in Montgomery domain */
r->coeffs[i] = newhope_montgomery_reduce(a->coeffs[i] * t);
@@ -165,8 +160,7 @@ void newhope_poly_pointwise(NEWHOPE_POLY* r, const NEWHOPE_POLY* a,

void newhope_poly_add(NEWHOPE_POLY* r, const NEWHOPE_POLY* a,
const NEWHOPE_POLY* b) {
size_t i;
for (i = 0; i < PARAM_N; i++) {
for (size_t i = 0; i < PARAM_N; i++) {
r->coeffs[i] = newhope_barrett_reduce(a->coeffs[i] + b->coeffs[i]);
}
}


+ 6
- 9
crypto/pkcs8/pkcs8.c Visa fil

@@ -155,11 +155,10 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len,
return 0;
}

size_t i;
for (i = 0; i < S_len; i++) {
for (size_t i = 0; i < S_len; i++) {
I[i] = salt[i % salt_len];
}
for (i = 0; i < P_len; i++) {
for (size_t i = 0; i < P_len; i++) {
I[i + S_len] = pass_raw[i % pass_raw_len];
}

@@ -178,8 +177,7 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len,
!EVP_DigestFinal_ex(&ctx, A, &A_len)) {
goto err;
}
int iter;
for (iter = 1; iter < iterations; iter++) {
for (int iter = 1; iter < iterations; iter++) {
if (!EVP_DigestInit_ex(&ctx, md, NULL) ||
!EVP_DigestUpdate(&ctx, A, A_len) ||
!EVP_DigestFinal_ex(&ctx, A, &A_len)) {
@@ -198,7 +196,7 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len,
/* B. Concatenate copies of A_i to create a string B of length v bits (the
* final copy of A_i may be truncated to create B). */
uint8_t B[EVP_MAX_MD_BLOCK_SIZE];
for (i = 0; i < block_size; i++) {
for (size_t i = 0; i < block_size; i++) {
B[i] = A[i % A_len];
}

@@ -206,10 +204,9 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len,
* where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod
* 2^v for each j. */
assert(I_len % block_size == 0);
for (i = 0; i < I_len; i += block_size) {
for (size_t i = 0; i < I_len; i += block_size) {
unsigned carry = 1;
size_t j;
for (j = block_size - 1; j < block_size; j--) {
for (size_t j = block_size - 1; j < block_size; j--) {
carry += I[i + j] + B[j];
I[i + j] = (uint8_t)carry;
carry >>= 8;


+ 1
- 2
crypto/rsa/rsa.c Visa fil

@@ -609,8 +609,7 @@ int RSA_check_key(const RSA *key) {
num_additional_primes = sk_RSA_additional_prime_num(key->additional_primes);
}

size_t i;
for (i = 0; i < num_additional_primes; i++) {
for (size_t i = 0; i < num_additional_primes; i++) {
const RSA_additional_prime *ap =
sk_RSA_additional_prime_value(key->additional_primes, i);
if (!BN_mul(&n, &n, ap->prime, ctx) ||


+ 3
- 3
crypto/rsa/rsa_asn1.c Visa fil

@@ -329,10 +329,10 @@ int RSA_marshal_private_key(CBB *cbb, const RSA *rsa) {
OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR);
return 0;
}
size_t i;
for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
for (size_t i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
i++) {
RSA_additional_prime *ap =
sk_RSA_additional_prime_value(rsa->additional_primes, i);
sk_RSA_additional_prime_value(rsa->additional_primes, i);
CBB other_prime_info;
if (!CBB_add_asn1(&other_prime_infos, &other_prime_info,
CBS_ASN1_SEQUENCE) ||


+ 12
- 21
crypto/stack/stack.c Visa fil

@@ -131,13 +131,11 @@ void sk_free(_STACK *sk) {
}

void sk_pop_free(_STACK *sk, void (*func)(void *)) {
size_t i;

if (sk == NULL) {
return;
}

for (i = 0; i < sk->num; i++) {
for (size_t i = 0; i < sk->num; i++) {
if (sk->data[i] != NULL) {
func(sk->data[i]);
}
@@ -209,13 +207,11 @@ void *sk_delete(_STACK *sk, size_t where) {
}

void *sk_delete_ptr(_STACK *sk, void *p) {
size_t i;

if (sk == NULL) {
return NULL;
}

for (i = 0; i < sk->num; i++) {
for (size_t i = 0; i < sk->num; i++) {
if (sk->data[i] == p) {
return sk_delete(sk, i);
}
@@ -225,17 +221,13 @@ void *sk_delete_ptr(_STACK *sk, void *p) {
}

int sk_find(_STACK *sk, size_t *out_index, void *p) {
const void *const *r;
size_t i;
int (*comp_func)(const void *,const void *);

if (sk == NULL) {
return 0;
}

if (sk->comp == NULL) {
/* Use pointer equality when no comparison function has been set. */
for (i = 0; i < sk->num; i++) {
for (size_t i = 0; i < sk->num; i++) {
if (sk->data[i] == p) {
if (out_index) {
*out_index = i;
@@ -257,18 +249,19 @@ int sk_find(_STACK *sk, size_t *out_index, void *p) {
* elements. However, since we're passing an array of pointers to
* qsort/bsearch, we can just cast the comparison function and everything
* works. */
comp_func=(int (*)(const void *,const void *))(sk->comp);
r = bsearch(&p, sk->data, sk->num, sizeof(void *), comp_func);
const void *const *r = bsearch(&p, sk->data, sk->num, sizeof(void *),
(int (*)(const void *, const void *))sk->comp);
if (r == NULL) {
return 0;
}
i = ((void **)r) - sk->data;
size_t idx = ((void **)r) - sk->data;
/* This function always returns the first result. */
while (i > 0 && sk->comp((const void**) &p, (const void**) &sk->data[i-1]) == 0) {
i--;
while (idx > 0 &&
sk->comp((const void **)&p, (const void **)&sk->data[idx - 1]) == 0) {
idx--;
}
if (out_index) {
*out_index = i;
*out_index = idx;
}
return 1;
}
@@ -364,15 +357,13 @@ _STACK *sk_deep_copy(const _STACK *sk, void *(*copy_func)(void *),
return NULL;
}

size_t i;
for (i = 0; i < ret->num; i++) {
for (size_t i = 0; i < ret->num; i++) {
if (ret->data[i] == NULL) {
continue;
}
ret->data[i] = copy_func(ret->data[i]);
if (ret->data[i] == NULL) {
size_t j;
for (j = 0; j < i; j++) {
for (size_t j = 0; j < i; j++) {
if (ret->data[j] != NULL) {
free_func(ret->data[j]);
}


+ 1
- 2
crypto/test/test_util.cc Visa fil

@@ -20,10 +20,9 @@

void hexdump(FILE *fp, const char *msg, const void *in, size_t len) {
const uint8_t *data = reinterpret_cast<const uint8_t*>(in);
size_t i;

fputs(msg, fp);
for (i = 0; i < len; i++) {
for (size_t i = 0; i < len; i++) {
fprintf(fp, "%02x", data[i]);
}
fputs("\n", fp);


+ 2
- 4
ssl/custom_extensions.c Visa fil

@@ -32,8 +32,7 @@ void SSL_CUSTOM_EXTENSION_free(SSL_CUSTOM_EXTENSION *custom_extension) {
static const SSL_CUSTOM_EXTENSION *custom_ext_find(
STACK_OF(SSL_CUSTOM_EXTENSION) *stack,
unsigned *out_index, uint16_t value) {
size_t i;
for (i = 0; i < sk_SSL_CUSTOM_EXTENSION_num(stack); i++) {
for (size_t i = 0; i < sk_SSL_CUSTOM_EXTENSION_num(stack); i++) {
const SSL_CUSTOM_EXTENSION *ext = sk_SSL_CUSTOM_EXTENSION_value(stack, i);
if (ext->value == value) {
if (out_index != NULL) {
@@ -69,8 +68,7 @@ static int custom_ext_add_hello(SSL *ssl, CBB *extensions) {
return 1;
}

size_t i;
for (i = 0; i < sk_SSL_CUSTOM_EXTENSION_num(stack); i++) {
for (size_t i = 0; i < sk_SSL_CUSTOM_EXTENSION_num(stack); i++) {
const SSL_CUSTOM_EXTENSION *ext = sk_SSL_CUSTOM_EXTENSION_value(stack, i);

if (ssl->server &&


+ 4
- 7
ssl/d1_both.c Visa fil

@@ -217,7 +217,6 @@ static uint8_t bit_range(size_t start, size_t end) {
* and |frag->reassembly| must not be NULL. */
static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start,
size_t end) {
size_t i;
size_t msg_len = frag->msg_len;

if (frag->reassembly == NULL || start > end || end > msg_len) {
@@ -231,7 +230,7 @@ static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start,
frag->reassembly[start >> 3] |= bit_range(start & 7, end & 7);
} else {
frag->reassembly[start >> 3] |= bit_range(start & 7, 8);
for (i = (start >> 3) + 1; i < (end >> 3); i++) {
for (size_t i = (start >> 3) + 1; i < (end >> 3); i++) {
frag->reassembly[i] = 0xff;
}
if ((end & 7) != 0) {
@@ -240,7 +239,7 @@ static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start,
}

/* Check if the fragment is complete. */
for (i = 0; i < (msg_len >> 3); i++) {
for (size_t i = 0; i < (msg_len >> 3); i++) {
if (frag->reassembly[i] != 0xff) {
return;
}
@@ -681,8 +680,7 @@ err:
}

void dtls_clear_outgoing_messages(SSL *ssl) {
size_t i;
for (i = 0; i < ssl->d1->outgoing_messages_len; i++) {
for (size_t i = 0; i < ssl->d1->outgoing_messages_len; i++) {
OPENSSL_free(ssl->d1->outgoing_messages[i].data);
ssl->d1->outgoing_messages[i].data = NULL;
}
@@ -816,8 +814,7 @@ int dtls1_retransmit_outgoing_messages(SSL *ssl) {
assert(ssl_is_wbio_buffered(ssl));

int ret = -1;
size_t i;
for (i = 0; i < ssl->d1->outgoing_messages_len; i++) {
for (size_t i = 0; i < ssl->d1->outgoing_messages_len; i++) {
if (dtls1_retransmit_message(ssl, &ssl->d1->outgoing_messages[i]) <= 0) {
goto err;
}


+ 1
- 2
ssl/handshake_client.c Visa fil

@@ -594,8 +594,7 @@ static int ssl_write_client_cipher_list(SSL *ssl, CBB *out,
STACK_OF(SSL_CIPHER) *ciphers = SSL_get_ciphers(ssl);

int any_enabled = 0;
size_t i;
for (i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) {
for (size_t i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) {
const SSL_CIPHER *cipher = sk_SSL_CIPHER_value(ciphers, i);
/* Skip disabled ciphers */
if ((cipher->algorithm_mkey & ssl->cert->mask_k) ||


+ 3
- 5
ssl/handshake_server.c Visa fil

@@ -1145,8 +1145,7 @@ static int add_cert_types(SSL *ssl, CBB *cbb) {
int have_ecdsa_sign = 0;
const uint16_t *sig_algs;
size_t sig_algs_len = tls12_get_psigalgs(ssl, &sig_algs);
size_t i;
for (i = 0; i < sig_algs_len; i++) {
for (size_t i = 0; i < sig_algs_len; i++) {
switch (sig_algs[i]) {
case SSL_SIGN_RSA_PKCS1_SHA512:
case SSL_SIGN_RSA_PKCS1_SHA384:
@@ -1494,8 +1493,7 @@ static int ssl3_get_client_key_exchange(SSL *ssl) {
size_t padding_len = decrypt_len - premaster_secret_len;
uint8_t good = constant_time_eq_int_8(decrypt_buf[0], 0) &
constant_time_eq_int_8(decrypt_buf[1], 2);
size_t i;
for (i = 2; i < padding_len - 1; i++) {
for (size_t i = 2; i < padding_len - 1; i++) {
good &= ~constant_time_is_zero_8(decrypt_buf[i]);
}
good &= constant_time_is_zero_8(decrypt_buf[padding_len - 1]);
@@ -1509,7 +1507,7 @@ static int ssl3_get_client_key_exchange(SSL *ssl) {

/* Select, in constant time, either the decrypted premaster or the random
* premaster based on |good|. */
for (i = 0; i < premaster_secret_len; i++) {
for (size_t i = 0; i < premaster_secret_len; i++) {
premaster_secret[i] = constant_time_select_8(
good, decrypt_buf[padding_len + i], premaster_secret[i]);
}


+ 1
- 2
ssl/s3_lib.c Visa fil

@@ -252,7 +252,6 @@ const SSL_CIPHER *ssl3_choose_cipher(
const struct ssl_cipher_preference_list_st *server_pref) {
const SSL_CIPHER *c, *ret = NULL;
STACK_OF(SSL_CIPHER) *srvr = server_pref->ciphers, *prio, *allow;
size_t i;
int ok;
size_t cipher_index;
uint32_t alg_k, alg_a, mask_k, mask_a;
@@ -282,7 +281,7 @@ const SSL_CIPHER *ssl3_choose_cipher(

ssl_get_compatible_server_ciphers(ssl, &mask_k, &mask_a);

for (i = 0; i < sk_SSL_CIPHER_num(prio); i++) {
for (size_t i = 0; i < sk_SSL_CIPHER_num(prio); i++) {
c = sk_SSL_CIPHER_value(prio, i);

ok = 1;


+ 2
- 4
ssl/ssl_aead_ctx.c Visa fil

@@ -229,8 +229,7 @@ int SSL_AEAD_CTX_open(SSL_AEAD_CTX *aead, CBS *out, uint8_t type,
/* XOR the fixed nonce, if necessary. */
if (aead->xor_fixed_nonce) {
assert(nonce_len == aead->fixed_nonce_len);
size_t i;
for (i = 0; i < aead->fixed_nonce_len; i++) {
for (size_t i = 0; i < aead->fixed_nonce_len; i++) {
nonce[i] ^= aead->fixed_nonce[i];
}
}
@@ -316,8 +315,7 @@ int SSL_AEAD_CTX_seal(SSL_AEAD_CTX *aead, uint8_t *out, size_t *out_len,
/* XOR the fixed nonce, if necessary. */
if (aead->xor_fixed_nonce) {
assert(nonce_len == aead->fixed_nonce_len);
size_t i;
for (i = 0; i < aead->fixed_nonce_len; i++) {
for (size_t i = 0; i < aead->fixed_nonce_len; i++) {
nonce[i] ^= aead->fixed_nonce[i];
}
}


+ 1
- 2
ssl/ssl_asn1.c Visa fil

@@ -340,8 +340,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data,
OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE);
goto err;
}
size_t i;
for (i = 0; i < sk_X509_num(in->cert_chain); i++) {
for (size_t i = 0; i < sk_X509_num(in->cert_chain); i++) {
if (!ssl_add_cert_to_cbb(&child, sk_X509_value(in->cert_chain, i))) {
goto err;
}


+ 3
- 6
ssl/ssl_cert.c Visa fil

@@ -364,8 +364,7 @@ STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list) {
return NULL;
}

size_t i;
for (i = 0; i < sk_X509_NAME_num(list); i++) {
for (size_t i = 0; i < sk_X509_NAME_num(list); i++) {
X509_NAME *name = X509_NAME_dup(sk_X509_NAME_value(list, i));
if (name == NULL || !sk_X509_NAME_push(ret, name)) {
X509_NAME_free(name);
@@ -544,8 +543,7 @@ int ssl_add_cert_chain(SSL *ssl, CBB *cbb) {
return 0;
}

size_t i;
for (i = 0; i < sk_X509_num(chain); i++) {
for (size_t i = 0; i < sk_X509_num(chain); i++) {
x = sk_X509_value(chain, i);
if (!ssl_add_cert_with_length(&child, x)) {
return 0;
@@ -562,8 +560,7 @@ int ssl_add_cert_chain(SSL *ssl, CBB *cbb) {
/* Don't leave errors in the queue */
ERR_clear_error();

size_t i;
for (i = 0; i < sk_X509_num(xs_ctx.chain); i++) {
for (size_t i = 0; i < sk_X509_num(xs_ctx.chain); i++) {
x = sk_X509_value(xs_ctx.chain, i);
if (!ssl_add_cert_with_length(&child, x)) {
X509_STORE_CTX_cleanup(&xs_ctx);


+ 2
- 3
ssl/ssl_cipher.c Visa fil

@@ -1047,8 +1047,7 @@ static void ssl_cipher_collect_ciphers(const SSL_PROTOCOL_METHOD *ssl_method,
/* The set of ciphers is static, but some subset may be unsupported by
* |ssl_method|, so the list may be smaller. */
size_t co_list_num = 0;
size_t i;
for (i = 0; i < kCiphersLen; i++) {
for (size_t i = 0; i < kCiphersLen; i++) {
const SSL_CIPHER *cipher = &kCiphers[i];
if (ssl_method->supports_cipher(cipher)) {
co_list[co_list_num].cipher = cipher;
@@ -1067,7 +1066,7 @@ static void ssl_cipher_collect_ciphers(const SSL_PROTOCOL_METHOD *ssl_method,
if (co_list_num > 1) {
co_list[0].next = &co_list[1];

for (i = 1; i < co_list_num - 1; i++) {
for (size_t i = 1; i < co_list_num - 1; i++) {
co_list[i].prev = &co_list[i - 1];
co_list[i].next = &co_list[i + 1];
}


+ 2
- 4
ssl/ssl_ecdh.c Visa fil

@@ -504,8 +504,7 @@ static const SSL_ECDH_METHOD kMethods[] = {
};

static const SSL_ECDH_METHOD *method_from_group_id(uint16_t group_id) {
size_t i;
for (i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) {
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) {
if (kMethods[i].group_id == group_id) {
return &kMethods[i];
}
@@ -514,8 +513,7 @@ static const SSL_ECDH_METHOD *method_from_group_id(uint16_t group_id) {
}

static const SSL_ECDH_METHOD *method_from_nid(int nid) {
size_t i;
for (i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) {
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) {
if (kMethods[i].nid == nid) {
return &kMethods[i];
}


+ 2
- 4
ssl/ssl_lib.c Visa fil

@@ -2533,13 +2533,12 @@ void SSL_CTX_set_current_time_cb(SSL_CTX *ctx,
static int cbb_add_hex(CBB *cbb, const uint8_t *in, size_t in_len) {
static const char hextable[] = "0123456789abcdef";
uint8_t *out;
size_t i;

if (!CBB_add_space(cbb, &out, in_len * 2)) {
return 0;
}

for (i = 0; i < in_len; i++) {
for (size_t i = 0; i < in_len; i++) {
*(out++) = (uint8_t)hextable[in[i] >> 4];
*(out++) = (uint8_t)hextable[in[i] & 0xf];
}
@@ -2708,9 +2707,8 @@ int ssl_get_full_version_range(const SSL *ssl, uint16_t *out_min_version,
* as a min/max range by picking the lowest contiguous non-empty range of
* enabled protocols. Note that this means it is impossible to set a maximum
* version of the higest supported TLS version in a future-proof way. */
size_t i;
int any_enabled = 0;
for (i = 0; i < kVersionsLen; i++) {
for (size_t i = 0; i < kVersionsLen; i++) {
/* Only look at the versions already enabled. */
if (min_version > kVersions[i].version) {
continue;


+ 10
- 19
ssl/t1_lib.c Visa fil

@@ -382,14 +382,13 @@ int tls1_get_shared_group(SSL *ssl, uint16_t *out_group_id) {
int tls1_set_curves(uint16_t **out_group_ids, size_t *out_group_ids_len,
const int *curves, size_t ncurves) {
uint16_t *group_ids;
size_t i;

group_ids = OPENSSL_malloc(ncurves * sizeof(uint16_t));
if (group_ids == NULL) {
return 0;
}

for (i = 0; i < ncurves; i++) {
for (size_t i = 0; i < ncurves; i++) {
if (!ssl_nid_to_group_id(&group_ids[i], curves[i])) {
OPENSSL_free(group_ids);
return 0;
@@ -1739,8 +1738,7 @@ static int ext_srtp_add_clienthello(SSL *ssl, CBB *out) {
return 0;
}

size_t i;
for (i = 0; i < num_profiles; i++) {
for (size_t i = 0; i < num_profiles; i++) {
if (!CBB_add_u16(&profile_ids,
sk_SRTP_PROTECTION_PROFILE_value(profiles, i)->id)) {
return 0;
@@ -1787,8 +1785,7 @@ static int ext_srtp_parse_serverhello(SSL *ssl, uint8_t *out_alert,

/* Check to see if the server gave us something we support (and presumably
* offered). */
size_t i;
for (i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(profiles); i++) {
for (size_t i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(profiles); i++) {
const SRTP_PROTECTION_PROFILE *profile =
sk_SRTP_PROTECTION_PROFILE_value(profiles, i);

@@ -1823,8 +1820,7 @@ static int ext_srtp_parse_clienthello(SSL *ssl, uint8_t *out_alert,
SSL_get_srtp_profiles(ssl);

/* Pick the server's most preferred profile. */
size_t i;
for (i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(server_profiles); i++) {
for (size_t i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(server_profiles); i++) {
const SRTP_PROTECTION_PROFILE *server_profile =
sk_SRTP_PROTECTION_PROFILE_value(server_profiles, i);

@@ -1877,8 +1873,7 @@ static int ssl_any_ec_cipher_suites_enabled(const SSL *ssl) {

const STACK_OF(SSL_CIPHER) *cipher_stack = SSL_get_ciphers(ssl);

size_t i;
for (i = 0; i < sk_SSL_CIPHER_num(cipher_stack); i++) {
for (size_t i = 0; i < sk_SSL_CIPHER_num(cipher_stack); i++) {
const SSL_CIPHER *cipher = sk_SSL_CIPHER_value(cipher_stack, i);

const uint32_t alg_k = cipher->algorithm_mkey;
@@ -2279,8 +2274,7 @@ static int ext_supported_groups_add_clienthello(SSL *ssl, CBB *out) {
size_t groups_len;
tls1_get_grouplist(ssl, 0, &groups, &groups_len);

size_t i;
for (i = 0; i < groups_len; i++) {
for (size_t i = 0; i < groups_len; i++) {
if (!CBB_add_u16(&groups_bytes, groups[i])) {
return 0;
}
@@ -2318,8 +2312,7 @@ static int ext_supported_groups_parse_clienthello(SSL *ssl, uint8_t *out_alert,
}

const size_t num_groups = CBS_len(&supported_group_list) / 2;
size_t i;
for (i = 0; i < num_groups; i++) {
for (size_t i = 0; i < num_groups; i++) {
if (!CBS_get_u16(&supported_group_list,
&ssl->s3->tmp.peer_supported_group_list[i])) {
goto err;
@@ -2524,14 +2517,13 @@ int ssl_add_clienthello_tlsext(SSL *ssl, CBB *out, size_t header_len) {
ssl->s3->tmp.extensions.sent = 0;
ssl->s3->tmp.custom_extensions.sent = 0;

size_t i;
for (i = 0; i < kNumExtensions; i++) {
for (size_t i = 0; i < kNumExtensions; i++) {
if (kExtensions[i].init != NULL) {
kExtensions[i].init(ssl);
}
}

for (i = 0; i < kNumExtensions; i++) {
for (size_t i = 0; i < kNumExtensions; i++) {
const size_t len_before = CBB_len(&extensions);
if (!kExtensions[i].add_clienthello(ssl, &extensions)) {
OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_ADDING_EXTENSION);
@@ -2787,8 +2779,7 @@ static int ssl_scan_serverhello_tlsext(SSL *ssl, CBS *cbs, int *out_alert) {
}
}

size_t i;
for (i = 0; i < kNumExtensions; i++) {
for (size_t i = 0; i < kNumExtensions; i++) {
if (!(received & (1u << i))) {
/* Extension wasn't observed so call the callback with a NULL
* parameter. */


+ 1
- 2
ssl/tls_record.c Visa fil

@@ -139,8 +139,7 @@ static int ssl_needs_record_splitting(const SSL *ssl) {
}

int ssl_record_sequence_update(uint8_t *seq, size_t seq_len) {
size_t i;
for (i = seq_len - 1; i < seq_len; i--) {
for (size_t i = seq_len - 1; i < seq_len; i--) {
++seq[i];
if (seq[i] != 0) {
return 1;


Laddar…
Avbryt
Spara