Pārlūkot izejas kodu

Run the comment converter on libcrypto.

crypto/{asn1,x509,x509v3,pem} were skipped as they are still OpenSSL
style.

Change-Id: I3cd9a60e1cb483a981aca325041f3fbce294247c
Reviewed-on: https://boringssl-review.googlesource.com/19504
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
kris/onging/CECPQ3_patch15
David Benjamin pirms 7 gadiem
committed by CQ bot account: commit-bot@chromium.org
vecāks
revīzija
808f832917
100 mainītis faili ar 2926 papildinājumiem un 2966 dzēšanām
  1. +32
    -32
      crypto/base64/base64.c
  2. +3
    -3
      crypto/base64/base64_test.cc
  3. +13
    -13
      crypto/bio/bio.c
  4. +9
    -9
      crypto/bio/bio_mem.c
  5. +17
    -17
      crypto/bio/connect.c
  6. +1
    -1
      crypto/bio/fd.c
  7. +13
    -14
      crypto/bio/file.c
  8. +15
    -16
      crypto/bio/hexdump.c
  9. +14
    -14
      crypto/bio/internal.h
  10. +45
    -45
      crypto/bio/pair.c
  11. +6
    -6
      crypto/bio/printf.c
  12. +9
    -9
      crypto/bn_extra/bn_asn1.c
  13. +16
    -16
      crypto/bn_extra/convert.c
  14. +3
    -3
      crypto/buf/buf.c
  15. +34
    -34
      crypto/bytestring/ber.c
  16. +24
    -24
      crypto/bytestring/cbb.c
  17. +30
    -31
      crypto/bytestring/cbs.c
  18. +38
    -38
      crypto/bytestring/internal.h
  19. +7
    -7
      crypto/chacha/chacha.c
  20. +3
    -3
      crypto/cipher_extra/aead_test.cc
  21. +14
    -14
      crypto/cipher_extra/e_aesctrhmac.c
  22. +97
    -97
      crypto/cipher_extra/e_aesgcmsiv.c
  23. +25
    -25
      crypto/cipher_extra/e_chacha20poly1305.c
  24. +8
    -8
      crypto/cipher_extra/e_rc2.c
  25. +52
    -52
      crypto/cipher_extra/e_ssl3.c
  26. +123
    -123
      crypto/cipher_extra/e_tls.c
  27. +40
    -40
      crypto/cipher_extra/internal.h
  28. +104
    -104
      crypto/cipher_extra/tls_cbc.c
  29. +20
    -20
      crypto/cmac/cmac.c
  30. +21
    -21
      crypto/conf/conf.c
  31. +3
    -3
      crypto/conf/internal.h
  32. +5
    -5
      crypto/cpu-aarch64-linux.c
  33. +40
    -40
      crypto/cpu-arm-linux.c
  34. +51
    -51
      crypto/cpu-intel.c
  35. +3
    -3
      crypto/cpu-ppc64le.c
  36. +28
    -28
      crypto/crypto.c
  37. +269
    -269
      crypto/curve25519/curve25519.c
  38. +7
    -7
      crypto/curve25519/internal.h
  39. +85
    -83
      crypto/curve25519/spake25519.c
  40. +1
    -1
      crypto/curve25519/spake25519_test.cc
  41. +9
    -9
      crypto/curve25519/x25519-x86_64.c
  42. +13
    -14
      crypto/dh/check.c
  43. +32
    -34
      crypto/dh/dh.c
  44. +1
    -1
      crypto/dh/dh_asn1.c
  45. +18
    -18
      crypto/digest_extra/digest_extra.c
  46. +2
    -2
      crypto/digest_extra/internal.h
  47. +66
    -66
      crypto/dsa/dsa.c
  48. +1
    -1
      crypto/dsa/dsa_asn1.c
  49. +8
    -8
      crypto/dsa/dsa_test.cc
  50. +51
    -51
      crypto/ec_extra/ec_asn1.c
  51. +1
    -1
      crypto/ecdh/ecdh.c
  52. +9
    -9
      crypto/ecdsa_extra/ecdsa_asn1.c
  53. +6
    -6
      crypto/engine/engine.c
  54. +81
    -81
      crypto/err/err.c
  55. +3
    -3
      crypto/err/err_test.cc
  56. +2
    -2
      crypto/evp/digestsign.c
  57. +4
    -4
      crypto/evp/evp.c
  58. +15
    -15
      crypto/evp/evp_asn1.c
  59. +5
    -5
      crypto/evp/evp_ctx.c
  60. +46
    -46
      crypto/evp/internal.h
  61. +9
    -9
      crypto/evp/p_dsa_asn1.c
  62. +4
    -4
      crypto/evp/p_ec.c
  63. +11
    -11
      crypto/evp/p_ec_asn1.c
  64. +1
    -1
      crypto/evp/p_ed25519.c
  65. +11
    -11
      crypto/evp/p_ed25519_asn1.c
  66. +10
    -10
      crypto/evp/p_rsa.c
  67. +11
    -11
      crypto/evp/p_rsa_asn1.c
  68. +14
    -14
      crypto/evp/pbkdf.c
  69. +4
    -4
      crypto/evp/print.c
  70. +41
    -41
      crypto/evp/scrypt.c
  71. +12
    -12
      crypto/ex_data.c
  72. +61
    -65
      crypto/fipsmodule/aes/aes.c
  73. +7
    -7
      crypto/fipsmodule/aes/internal.h
  74. +3
    -3
      crypto/fipsmodule/aes/key_wrap.c
  75. +1
    -1
      crypto/fipsmodule/aes/mode_wrappers.c
  76. +23
    -23
      crypto/fipsmodule/bcm.c
  77. +22
    -24
      crypto/fipsmodule/bn/add.c
  78. +13
    -16
      crypto/fipsmodule/bn/asm/x86_64-gcc.c
  79. +3
    -3
      crypto/fipsmodule/bn/bn.c
  80. +3
    -3
      crypto/fipsmodule/bn/bn_test.cc
  81. +33
    -33
      crypto/fipsmodule/bn/bytes.c
  82. +2
    -2
      crypto/fipsmodule/bn/cmp.c
  83. +35
    -37
      crypto/fipsmodule/bn/ctx.c
  84. +94
    -96
      crypto/fipsmodule/bn/div.c
  85. +164
    -169
      crypto/fipsmodule/bn/exponentiation.c
  86. +98
    -106
      crypto/fipsmodule/bn/gcd.c
  87. +14
    -14
      crypto/fipsmodule/bn/generic.c
  88. +31
    -31
      crypto/fipsmodule/bn/internal.h
  89. +19
    -19
      crypto/fipsmodule/bn/jacobi.c
  90. +22
    -22
      crypto/fipsmodule/bn/montgomery.c
  91. +95
    -95
      crypto/fipsmodule/bn/montgomery_inv.c
  92. +104
    -108
      crypto/fipsmodule/bn/mul.c
  93. +58
    -58
      crypto/fipsmodule/bn/prime.c
  94. +19
    -19
      crypto/fipsmodule/bn/random.c
  95. +1
    -1
      crypto/fipsmodule/bn/shift.c
  96. +102
    -108
      crypto/fipsmodule/bn/sqrt.c
  97. +12
    -12
      crypto/fipsmodule/cipher/aead.c
  98. +11
    -11
      crypto/fipsmodule/cipher/cipher.c
  99. +35
    -35
      crypto/fipsmodule/cipher/e_aes.c
  100. +12
    -12
      crypto/fipsmodule/cipher/internal.h

+ 32
- 32
crypto/base64/base64.c Parādīt failu

@@ -65,29 +65,29 @@
#include "../internal.h"


/* constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t|
* arguments for a slightly simpler implementation. */
// constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t|
// arguments for a slightly simpler implementation.
static inline uint8_t constant_time_lt_args_8(uint8_t a, uint8_t b) {
crypto_word_t aw = a;
crypto_word_t bw = b;
/* |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same
* MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1. */
// |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same
// MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1.
return constant_time_msb_w(aw - bw);
}

/* constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max|
* and |CONSTTIME_FALSE_8| otherwise. */
// constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max|
// and |CONSTTIME_FALSE_8| otherwise.
static inline uint8_t constant_time_in_range_8(uint8_t a, uint8_t min,
uint8_t max) {
a -= min;
return constant_time_lt_args_8(a, max - min + 1);
}

/* Encoding. */
// Encoding.

static uint8_t conv_bin2ascii(uint8_t a) {
/* Since PEM is sometimes used to carry private keys, we encode base64 data
* itself in constant-time. */
// Since PEM is sometimes used to carry private keys, we encode base64 data
// itself in constant-time.
a &= 0x3f;
uint8_t ret = constant_time_select_8(constant_time_eq_8(a, 62), '+', '/');
ret =
@@ -183,8 +183,8 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len,
ctx->data_used = (unsigned)in_len;

if (total > INT_MAX) {
/* We cannot signal an error, but we can at least avoid making *out_len
* negative. */
// We cannot signal an error, but we can at least avoid making *out_len
// negative.
total = 0;
}
*out_len = (int)total;
@@ -201,8 +201,8 @@ void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len) {
out[encoded] = '\0';
ctx->data_used = 0;

/* ctx->data_used is bounded by sizeof(ctx->data), so this does not
* overflow. */
// ctx->data_used is bounded by sizeof(ctx->data), so this does not
// overflow.
assert(encoded <= INT_MAX);
*out_len = (int)encoded;
}
@@ -240,7 +240,7 @@ size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) {
}


/* Decoding. */
// Decoding.

int EVP_DecodedLength(size_t *out_len, size_t len) {
if (len % 4 != 0) {
@@ -256,8 +256,8 @@ void EVP_DecodeInit(EVP_ENCODE_CTX *ctx) {
}

static uint8_t base64_ascii_to_bin(uint8_t a) {
/* Since PEM is sometimes used to carry private keys, we decode base64 data
* itself in constant-time. */
// Since PEM is sometimes used to carry private keys, we decode base64 data
// itself in constant-time.
const uint8_t is_upper = constant_time_in_range_8(a, 'A', 'Z');
const uint8_t is_lower = constant_time_in_range_8(a, 'a', 'z');
const uint8_t is_digit = constant_time_in_range_8(a, '0', '9');
@@ -265,21 +265,21 @@ static uint8_t base64_ascii_to_bin(uint8_t a) {
const uint8_t is_slash = constant_time_eq_8(a, '/');
const uint8_t is_equals = constant_time_eq_8(a, '=');

uint8_t ret = 0xff; /* 0xff signals invalid. */
ret = constant_time_select_8(is_upper, a - 'A', ret); /* [0,26) */
ret = constant_time_select_8(is_lower, a - 'a' + 26, ret); /* [26,52) */
ret = constant_time_select_8(is_digit, a - '0' + 52, ret); /* [52,62) */
uint8_t ret = 0xff; // 0xff signals invalid.
ret = constant_time_select_8(is_upper, a - 'A', ret); // [0,26)
ret = constant_time_select_8(is_lower, a - 'a' + 26, ret); // [26,52)
ret = constant_time_select_8(is_digit, a - '0' + 52, ret); // [52,62)
ret = constant_time_select_8(is_plus, 62, ret);
ret = constant_time_select_8(is_slash, 63, ret);
/* Padding maps to zero, to be further handled by the caller. */
// Padding maps to zero, to be further handled by the caller.
ret = constant_time_select_8(is_equals, 0, ret);
return ret;
}

/* base64_decode_quad decodes a single “quad” (i.e. four characters) of base64
* data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the
* number of bytes written, which will be less than three if the quad ended
* with padding. It returns one on success or zero on error. */
// base64_decode_quad decodes a single “quad” (i.e. four characters) of base64
// data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the
// number of bytes written, which will be less than three if the quad ended
// with padding. It returns one on success or zero on error.
static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes,
const uint8_t *in) {
const uint8_t a = base64_ascii_to_bin(in[0]);
@@ -300,20 +300,20 @@ static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes,

switch (padding_pattern) {
case 0:
/* The common case of no padding. */
// The common case of no padding.
*out_num_bytes = 3;
out[0] = v >> 16;
out[1] = v >> 8;
out[2] = v;
break;

case 1: /* xxx= */
case 1: // xxx=
*out_num_bytes = 2;
out[0] = v >> 16;
out[1] = v >> 8;
break;

case 3: /* xx== */
case 3: // xx==
*out_num_bytes = 1;
out[0] = v >> 16;
break;
@@ -424,7 +424,7 @@ int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out,
}

int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) {
/* Trim spaces and tabs from the beginning of the input. */
// Trim spaces and tabs from the beginning of the input.
while (src_len > 0) {
if (src[0] != ' ' && src[0] != '\t') {
break;
@@ -434,7 +434,7 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) {
src_len--;
}

/* Trim newlines, spaces and tabs from the end of the line. */
// Trim newlines, spaces and tabs from the end of the line.
while (src_len > 0) {
switch (src[src_len-1]) {
case ' ':
@@ -455,8 +455,8 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) {
return -1;
}

/* EVP_DecodeBlock does not take padding into account, so put the
* NULs back in... so the caller can strip them back out. */
// EVP_DecodeBlock does not take padding into account, so put the
// NULs back in... so the caller can strip them back out.
while (dst_len % 3 != 0) {
dst[dst_len++] = '\0';
}


+ 3
- 3
crypto/base64/base64_test.cc Parādīt failu

@@ -280,9 +280,9 @@ TEST_P(Base64Test, DecodeUpdateStreaming) {
out_len += bytes_written;
if (i == encoded_len ||
(i + 1 == encoded_len && t.encoded[i] == '\n') ||
/* If there was an '-' in the input (which means “EOF”) then
* this loop will continue to test that |EVP_DecodeUpdate| will
* ignore the remainder of the input. */
// If there was an '-' in the input (which means “EOF”) then
// this loop will continue to test that |EVP_DecodeUpdate| will
// ignore the remainder of the input.
strchr(t.encoded, '-') != nullptr) {
break;
}


+ 13
- 13
crypto/bio/bio.c Parādīt failu

@@ -409,14 +409,14 @@ void ERR_print_errors(BIO *bio) {
ERR_print_errors_cb(print_bio, bio);
}

/* bio_read_all reads everything from |bio| and prepends |prefix| to it. On
* success, |*out| is set to an allocated buffer (which should be freed with
* |OPENSSL_free|), |*out_len| is set to its length and one is returned. The
* buffer will contain |prefix| followed by the contents of |bio|. On failure,
* zero is returned.
*
* The function will fail if the size of the output would equal or exceed
* |max_len|. */
// bio_read_all reads everything from |bio| and prepends |prefix| to it. On
// success, |*out| is set to an allocated buffer (which should be freed with
// |OPENSSL_free|), |*out_len| is set to its length and one is returned. The
// buffer will contain |prefix| followed by the contents of |bio|. On failure,
// zero is returned.
//
// The function will fail if the size of the output would equal or exceed
// |max_len|.
static int bio_read_all(BIO *bio, uint8_t **out, size_t *out_len,
const uint8_t *prefix, size_t prefix_len,
size_t max_len) {
@@ -480,20 +480,20 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) {
const uint8_t length_byte = header[1];

if ((tag & 0x1f) == 0x1f) {
/* Long form tags are not supported. */
// Long form tags are not supported.
return 0;
}

size_t len, header_len;
if ((length_byte & 0x80) == 0) {
/* Short form length. */
// Short form length.
len = length_byte;
header_len = kInitialHeaderLen;
} else {
const size_t num_bytes = length_byte & 0x7f;

if ((tag & 0x20 /* constructed */) != 0 && num_bytes == 0) {
/* indefinite length. */
// indefinite length.
return bio_read_all(bio, out, out_len, header, kInitialHeaderLen,
max_len);
}
@@ -516,12 +516,12 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) {
}

if (len32 < 128) {
/* Length should have used short-form encoding. */
// Length should have used short-form encoding.
return 0;
}

if ((len32 >> ((num_bytes-1)*8)) == 0) {
/* Length should have been at least one byte shorter. */
// Length should have been at least one byte shorter.
return 0;
}



+ 9
- 9
crypto/bio/bio_mem.c Parādīt failu

@@ -82,16 +82,16 @@ BIO *BIO_new_mem_buf(const void *buf, int len) {
}

b = (BUF_MEM *)ret->ptr;
/* BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. */
// BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to.
b->data = (void *)buf;
b->length = size;
b->max = size;

ret->flags |= BIO_FLAGS_MEM_RDONLY;

/* |num| is used to store the value that this BIO will return when it runs
* out of data. If it's negative then the retry flags will also be set. Since
* this is static data, retrying wont help */
// |num| is used to store the value that this BIO will return when it runs
// out of data. If it's negative then the retry flags will also be set. Since
// this is static data, retrying wont help
ret->num = 0;

return ret;
@@ -105,8 +105,8 @@ static int mem_new(BIO *bio) {
return 0;
}

/* |shutdown| is used to store the close flag: whether the BIO has ownership
* of the BUF_MEM. */
// |shutdown| is used to store the close flag: whether the BIO has ownership
// of the BUF_MEM.
bio->shutdown = 1;
bio->init = 1;
bio->num = -1;
@@ -214,8 +214,8 @@ static int mem_gets(BIO *bio, char *buf, int size) {
}
}

/* i is now the max num of bytes to copy, either j or up to and including the
* first newline */
// i is now the max num of bytes to copy, either j or up to and including the
// first newline

i = mem_read(bio, buf, i);
if (i > 0) {
@@ -233,7 +233,7 @@ static long mem_ctrl(BIO *bio, int cmd, long num, void *ptr) {
switch (cmd) {
case BIO_CTRL_RESET:
if (b->data != NULL) {
/* For read only case reset to the start again */
// For read only case reset to the start again
if (bio->flags & BIO_FLAGS_MEM_RDONLY) {
b->data -= b->max - b->length;
b->length = b->max;


+ 17
- 17
crypto/bio/connect.c Parādīt failu

@@ -98,12 +98,12 @@ typedef struct bio_connect_st {
struct sockaddr_storage them;
socklen_t them_length;

/* the file descriptor is kept in bio->num in order to match the socket
* BIO. */
// the file descriptor is kept in bio->num in order to match the socket
// BIO.

/* info_callback is called when the connection is initially made
* callback(BIO,state,ret); The callback should return 'ret', state is for
* compatibility with the SSL info_callback. */
// info_callback is called when the connection is initially made
// callback(BIO,state,ret); The callback should return 'ret', state is for
// compatibility with the SSL info_callback.
int (*info_callback)(const BIO *bio, int state, int ret);
} BIO_CONNECT;

@@ -113,9 +113,9 @@ static int closesocket(int sock) {
}
#endif

/* split_host_and_port sets |*out_host| and |*out_port| to the host and port
* parsed from |name|. It returns one on success or zero on error. Even when
* successful, |*out_port| may be NULL on return if no port was specified. */
// split_host_and_port sets |*out_host| and |*out_port| to the host and port
// parsed from |name|. It returns one on success or zero on error. Even when
// successful, |*out_port| may be NULL on return if no port was specified.
static int split_host_and_port(char **out_host, char **out_port, const char *name) {
const char *host, *port = NULL;
size_t host_len = 0;
@@ -123,24 +123,24 @@ static int split_host_and_port(char **out_host, char **out_port, const char *nam
*out_host = NULL;
*out_port = NULL;

if (name[0] == '[') { /* bracketed IPv6 address */
if (name[0] == '[') { // bracketed IPv6 address
const char *close = strchr(name, ']');
if (close == NULL) {
return 0;
}
host = name + 1;
host_len = close - host;
if (close[1] == ':') { /* [IP]:port */
if (close[1] == ':') { // [IP]:port
port = close + 2;
} else if (close[1] != 0) {
return 0;
}
} else {
const char *colon = strchr(name, ':');
if (colon == NULL || strchr(colon + 1, ':') != NULL) { /* IPv6 address */
if (colon == NULL || strchr(colon + 1, ':') != NULL) { // IPv6 address
host = name;
host_len = strlen(name);
} else { /* host:port */
} else { // host:port
host = name;
host_len = colon - name;
port = colon + 1;
@@ -175,9 +175,9 @@ static int conn_state(BIO *bio, BIO_CONNECT *c) {
for (;;) {
switch (c->state) {
case BIO_CONN_S_BEFORE:
/* If there's a hostname and a port, assume that both are
* exactly what they say. If there is only a hostname, try
* (just once) to split it into a hostname and port. */
// If there's a hostname and a port, assume that both are
// exactly what they say. If there is only a hostname, try
// (just once) to split it into a hostname and port.

if (c->param_hostname == NULL) {
OPENSSL_PUT_ERROR(BIO, BIO_R_NO_HOSTNAME_SPECIFIED);
@@ -330,7 +330,7 @@ static void conn_close_socket(BIO *bio) {
return;
}

/* Only do a shutdown if things were established */
// Only do a shutdown if things were established
if (c->state == BIO_CONN_S_OK) {
shutdown(bio->num, 2);
}
@@ -415,7 +415,7 @@ static long conn_ctrl(BIO *bio, int cmd, long num, void *ptr) {
bio->flags = 0;
break;
case BIO_C_DO_STATE_MACHINE:
/* use this one to start the connection */
// use this one to start the connection
if (data->state != BIO_CONN_S_OK) {
ret = (long)conn_state(bio, data);
} else {


+ 1
- 1
crypto/bio/fd.c Parādīt failu

@@ -138,7 +138,7 @@ BIO *BIO_new_fd(int fd, int close_flag) {
}

static int fd_new(BIO *bio) {
/* num is used to store the file descriptor. */
// num is used to store the file descriptor.
bio->num = -1;
return 1;
}


+ 13
- 14
crypto/bio/file.c Parādīt failu

@@ -55,18 +55,17 @@
* [including the GNU Public Licence.] */

#if defined(__linux) || defined(__sun) || defined(__hpux)
/* Following definition aliases fopen to fopen64 on above mentioned
* platforms. This makes it possible to open and sequentially access
* files larger than 2GB from 32-bit application. It does not allow to
* traverse them beyond 2GB with fseek/ftell, but on the other hand *no*
* 32-bit platform permits that, not with fseek/ftell. Not to mention
* that breaking 2GB limit for seeking would require surgery to *our*
* API. But sequential access suffices for practical cases when you
* can run into large files, such as fingerprinting, so we can let API
* alone. For reference, the list of 32-bit platforms which allow for
* sequential access of large files without extra "magic" comprise *BSD,
* Darwin, IRIX...
*/
// Following definition aliases fopen to fopen64 on above mentioned
// platforms. This makes it possible to open and sequentially access
// files larger than 2GB from 32-bit application. It does not allow to
// traverse them beyond 2GB with fseek/ftell, but on the other hand *no*
// 32-bit platform permits that, not with fseek/ftell. Not to mention
// that breaking 2GB limit for seeking would require surgery to *our*
// API. But sequential access suffices for practical cases when you
// can run into large files, such as fingerprinting, so we can let API
// alone. For reference, the list of 32-bit platforms which allow for
// sequential access of large files without extra "magic" comprise *BSD,
// Darwin, IRIX...
#ifndef _FILE_OFFSET_BITS
#define _FILE_OFFSET_BITS 64
#endif
@@ -157,7 +156,7 @@ static int file_read(BIO *b, char *out, int outl) {
return -1;
}

/* fread reads at most |outl| bytes, so |ret| fits in an int. */
// fread reads at most |outl| bytes, so |ret| fits in an int.
return (int)ret;
}

@@ -232,7 +231,7 @@ static long file_ctrl(BIO *b, int cmd, long num, void *ptr) {
b->init = 1;
break;
case BIO_C_GET_FILE_PTR:
/* the ptr parameter is actually a FILE ** in this case. */
// the ptr parameter is actually a FILE ** in this case.
if (ptr != NULL) {
fpp = (FILE **)ptr;
*fpp = (FILE *)b->ptr;


+ 15
- 16
crypto/bio/hexdump.c Parādīt failu

@@ -62,12 +62,12 @@
#include "../internal.h"


/* hexdump_ctx contains the state of a hexdump. */
// hexdump_ctx contains the state of a hexdump.
struct hexdump_ctx {
BIO *bio;
char right_chars[18]; /* the contents of the right-hand side, ASCII dump. */
unsigned used; /* number of bytes in the current line. */
size_t n; /* number of bytes total. */
char right_chars[18]; // the contents of the right-hand side, ASCII dump.
unsigned used; // number of bytes in the current line.
size_t n; // number of bytes total.
unsigned indent;
};

@@ -84,21 +84,20 @@ static char to_char(uint8_t b) {
return b;
}

/* hexdump_write adds |len| bytes of |data| to the current hex dump described by
* |ctx|. */
// hexdump_write adds |len| bytes of |data| to the current hex dump described by
// |ctx|.
static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
size_t len) {
char buf[10];
unsigned l;

/* Output lines look like:
* 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=|
* ^ offset ^ extra space ^ ASCII of line
*/
// Output lines look like:
// 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=|
// ^ offset ^ extra space ^ ASCII of line

for (size_t i = 0; i < len; i++) {
if (ctx->used == 0) {
/* The beginning of a line. */
// The beginning of a line.
BIO_indent(ctx->bio, ctx->indent, UINT_MAX);

hexbyte(&buf[0], ctx->n >> 24);
@@ -115,12 +114,12 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
buf[2] = ' ';
l = 3;
if (ctx->used == 7) {
/* There's an additional space after the 8th byte. */
// There's an additional space after the 8th byte.
buf[3] = ' ';
l = 4;
} else if (ctx->used == 15) {
/* At the end of the line there's an extra space and the bar for the
* right column. */
// At the end of the line there's an extra space and the bar for the
// right column.
buf[3] = ' ';
buf[4] = '|';
l = 5;
@@ -145,9 +144,9 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
return 1;
}

/* finish flushes any buffered data in |ctx|. */
// finish flushes any buffered data in |ctx|.
static int finish(struct hexdump_ctx *ctx) {
/* See the comments in |hexdump| for the details of this format. */
// See the comments in |hexdump| for the details of this format.
const unsigned n_bytes = ctx->used;
unsigned l;
char buf[5];


+ 14
- 14
crypto/bio/internal.h Parādīt failu

@@ -61,7 +61,7 @@

#if !defined(OPENSSL_WINDOWS)
#if defined(OPENSSL_PNACL)
/* newlib uses u_short in socket.h without defining it. */
// newlib uses u_short in socket.h without defining it.
typedef unsigned short u_short;
#endif
#include <sys/types.h>
@@ -78,34 +78,34 @@ extern "C" {
#endif


/* BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr|
* and |*out_addr_length| with the correct values for connecting to |hostname|
* on |port_str|. It returns one on success or zero on error. */
// BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr|
// and |*out_addr_length| with the correct values for connecting to |hostname|
// on |port_str|. It returns one on success or zero on error.
int bio_ip_and_port_to_socket_and_addr(int *out_sock,
struct sockaddr_storage *out_addr,
socklen_t *out_addr_length,
const char *hostname,
const char *port_str);

/* BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on
* success and zero otherwise. */
// BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on
// success and zero otherwise.
int bio_socket_nbio(int sock, int on);

/* BIO_clear_socket_error clears the last system socket error.
*
* TODO(fork): remove all callers of this. */
// BIO_clear_socket_error clears the last system socket error.
//
// TODO(fork): remove all callers of this.
void bio_clear_socket_error(void);

/* BIO_sock_error returns the last socket error on |sock|. */
// BIO_sock_error returns the last socket error on |sock|.
int bio_sock_error(int sock);

/* BIO_fd_should_retry returns non-zero if |return_value| indicates an error
* and |errno| indicates that it's non-fatal. */
// BIO_fd_should_retry returns non-zero if |return_value| indicates an error
// and |errno| indicates that it's non-fatal.
int bio_fd_should_retry(int return_value);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_BIO_INTERNAL_H */
#endif // OPENSSL_HEADER_BIO_INTERNAL_H

+ 45
- 45
crypto/bio/pair.c Parādīt failu

@@ -63,22 +63,22 @@


struct bio_bio_st {
BIO *peer; /* NULL if buf == NULL.
* If peer != NULL, then peer->ptr is also a bio_bio_st,
* and its "peer" member points back to us.
* peer != NULL iff init != 0 in the BIO. */
/* This is for what we write (i.e. reading uses peer's struct): */
int closed; /* valid iff peer != NULL */
size_t len; /* valid iff buf != NULL; 0 if peer == NULL */
size_t offset; /* valid iff buf != NULL; 0 if len == 0 */
BIO *peer; // NULL if buf == NULL.
// If peer != NULL, then peer->ptr is also a bio_bio_st,
// and its "peer" member points back to us.
// peer != NULL iff init != 0 in the BIO.
// This is for what we write (i.e. reading uses peer's struct):
int closed; // valid iff peer != NULL
size_t len; // valid iff buf != NULL; 0 if peer == NULL
size_t offset; // valid iff buf != NULL; 0 if len == 0
size_t size;
uint8_t *buf; /* "size" elements (if != NULL) */
uint8_t *buf; // "size" elements (if != NULL)

size_t request; /* valid iff peer != NULL; 0 if len != 0,
* otherwise set by peer to number of bytes
* it (unsuccessfully) tried to read,
* never more than buffer space (size-len) warrants. */
size_t request; // valid iff peer != NULL; 0 if len != 0,
// otherwise set by peer to number of bytes
// it (unsuccessfully) tried to read,
// never more than buffer space (size-len) warrants.
};

static int bio_new(BIO *bio) {
@@ -90,7 +90,7 @@ static int bio_new(BIO *bio) {
}
OPENSSL_memset(b, 0, sizeof(struct bio_bio_st));

b->size = 17 * 1024; /* enough for one TLS record (just a default) */
b->size = 17 * 1024; // enough for one TLS record (just a default)
bio->ptr = b;
return 1;
}
@@ -165,7 +165,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
assert(peer_b != NULL);
assert(peer_b->buf != NULL);

peer_b->request = 0; /* will be set in "retry_read" situation */
peer_b->request = 0; // will be set in "retry_read" situation

if (buf == NULL || size == 0) {
return 0;
@@ -173,30 +173,30 @@ static int bio_read(BIO *bio, char *buf, int size_) {

if (peer_b->len == 0) {
if (peer_b->closed) {
return 0; /* writer has closed, and no data is left */
return 0; // writer has closed, and no data is left
} else {
BIO_set_retry_read(bio); /* buffer is empty */
BIO_set_retry_read(bio); // buffer is empty
if (size <= peer_b->size) {
peer_b->request = size;
} else {
/* don't ask for more than the peer can
* deliver in one write */
// don't ask for more than the peer can
// deliver in one write
peer_b->request = peer_b->size;
}
return -1;
}
}

/* we can read */
// we can read
if (peer_b->len < size) {
size = peer_b->len;
}

/* now read "size" bytes */
// now read "size" bytes
rest = size;

assert(rest > 0);
/* one or two iterations */
// one or two iterations
do {
size_t chunk;

@@ -204,7 +204,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
if (peer_b->offset + rest <= peer_b->size) {
chunk = rest;
} else {
/* wrap around ring buffer */
// wrap around ring buffer
chunk = peer_b->size - peer_b->offset;
}
assert(peer_b->offset + chunk <= peer_b->size);
@@ -220,7 +220,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
}
buf += chunk;
} else {
/* buffer now empty, no need to advance "buf" */
// buffer now empty, no need to advance "buf"
assert(chunk == rest);
peer_b->offset = 0;
}
@@ -248,7 +248,7 @@ static int bio_write(BIO *bio, const char *buf, int num_) {

b->request = 0;
if (b->closed) {
/* we already closed */
// we already closed
OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE);
return -1;
}
@@ -256,20 +256,20 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
assert(b->len <= b->size);

if (b->len == b->size) {
BIO_set_retry_write(bio); /* buffer is full */
BIO_set_retry_write(bio); // buffer is full
return -1;
}

/* we can write */
// we can write
if (num > b->size - b->len) {
num = b->size - b->len;
}

/* now write "num" bytes */
// now write "num" bytes
rest = num;

assert(rest > 0);
/* one or two iterations */
// one or two iterations
do {
size_t write_offset;
size_t chunk;
@@ -280,12 +280,12 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
if (write_offset >= b->size) {
write_offset -= b->size;
}
/* b->buf[write_offset] is the first byte we can write to. */
// b->buf[write_offset] is the first byte we can write to.

if (write_offset + rest <= b->size) {
chunk = rest;
} else {
/* wrap around ring buffer */
// wrap around ring buffer
chunk = b->size - write_offset;
}

@@ -363,15 +363,15 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) {
assert(b != NULL);

switch (cmd) {
/* specific CTRL codes */
// specific CTRL codes

case BIO_C_GET_WRITE_BUF_SIZE:
ret = (long)b->size;
break;

case BIO_C_GET_WRITE_GUARANTEE:
/* How many bytes can the caller feed to the next write
* without having to keep any? */
// How many bytes can the caller feed to the next write
// without having to keep any?
if (b->peer == NULL || b->closed) {
ret = 0;
} else {
@@ -380,28 +380,28 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) {
break;

case BIO_C_GET_READ_REQUEST:
/* If the peer unsuccessfully tried to read, how many bytes
* were requested? (As with BIO_CTRL_PENDING, that number
* can usually be treated as boolean.) */
// If the peer unsuccessfully tried to read, how many bytes
// were requested? (As with BIO_CTRL_PENDING, that number
// can usually be treated as boolean.)
ret = (long)b->request;
break;

case BIO_C_RESET_READ_REQUEST:
/* Reset request. (Can be useful after read attempts
* at the other side that are meant to be non-blocking,
* e.g. when probing SSL_read to see if any data is
* available.) */
// Reset request. (Can be useful after read attempts
// at the other side that are meant to be non-blocking,
// e.g. when probing SSL_read to see if any data is
// available.)
b->request = 0;
ret = 1;
break;

case BIO_C_SHUTDOWN_WR:
/* similar to shutdown(..., SHUT_WR) */
// similar to shutdown(..., SHUT_WR)
b->closed = 1;
ret = 1;
break;

/* standard CTRL codes follow */
// standard CTRL codes follow

case BIO_CTRL_GET_CLOSE:
ret = bio->shutdown;
@@ -453,7 +453,7 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) {

static const BIO_METHOD methods_biop = {
BIO_TYPE_BIO, "BIO pair", bio_write, bio_read, NULL /* puts */,
NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */
NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */,
};

static const BIO_METHOD *bio_s_bio(void) { return &methods_biop; }


+ 6
- 6
crypto/bio/printf.c Parādīt failu

@@ -55,7 +55,7 @@
* [including the GNU Public Licence.] */

#if !defined(_POSIX_C_SOURCE)
#define _POSIX_C_SOURCE 201410L /* for snprintf, vprintf etc */
#define _POSIX_C_SOURCE 201410L // for snprintf, vprintf etc
#endif

#include <openssl/bio.h>
@@ -77,8 +77,8 @@ int BIO_printf(BIO *bio, const char *format, ...) {
va_end(args);

#if defined(OPENSSL_WINDOWS)
/* On Windows, vsnprintf returns -1 rather than the requested length on
* truncation */
// On Windows, vsnprintf returns -1 rather than the requested length on
// truncation
if (out_len < 0) {
va_start(args, format);
out_len = _vscprintf(format, args);
@@ -93,9 +93,9 @@ int BIO_printf(BIO *bio, const char *format, ...) {

if ((size_t) out_len >= sizeof(buf)) {
const int requested_len = out_len;
/* The output was truncated. Note that vsnprintf's return value
* does not include a trailing NUL, but the buffer must be sized
* for it. */
// The output was truncated. Note that vsnprintf's return value
// does not include a trailing NUL, but the buffer must be sized
// for it.
out = OPENSSL_malloc(requested_len + 1);
out_malloced = 1;
if (out == NULL) {


+ 9
- 9
crypto/bn_extra/bn_asn1.c Parādīt failu

@@ -31,7 +31,7 @@ int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret) {
return 0;
}

/* INTEGERs must be minimal. */
// INTEGERs must be minimal.
if (CBS_data(&child)[0] == 0x00 &&
CBS_len(&child) > 1 &&
!(CBS_data(&child)[1] & 0x80)) {
@@ -50,16 +50,16 @@ int BN_parse_asn1_unsigned_buggy(CBS *cbs, BIGNUM *ret) {
return 0;
}

/* This function intentionally does not reject negative numbers or non-minimal
* encodings. Estonian IDs issued between September 2014 to September 2015 are
* broken. See https://crbug.com/532048 and https://crbug.com/534766.
*
* TODO(davidben): Remove this code and callers in March 2016. */
// This function intentionally does not reject negative numbers or non-minimal
// encodings. Estonian IDs issued between September 2014 to September 2015 are
// broken. See https://crbug.com/532048 and https://crbug.com/534766.
//
// TODO(davidben): Remove this code and callers in March 2016.
return BN_bin2bn(CBS_data(&child), CBS_len(&child), ret) != NULL;
}

int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) {
/* Negative numbers are unsupported. */
// Negative numbers are unsupported.
if (BN_is_negative(bn)) {
OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER);
return 0;
@@ -67,8 +67,8 @@ int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) {

CBB child;
if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER) ||
/* The number must be padded with a leading zero if the high bit would
* otherwise be set or if |bn| is zero. */
// The number must be padded with a leading zero if the high bit would
// otherwise be set or if |bn| is zero.
(BN_num_bits(bn) % 8 == 0 && !CBB_add_u8(&child, 0x00)) ||
!BN_bn2cbb_padded(&child, BN_num_bytes(bn), bn) ||
!CBB_flush(cbb)) {


+ 16
- 16
crypto/bn_extra/convert.c Parādīt failu

@@ -96,7 +96,7 @@ char *BN_bn2hex(const BIGNUM *bn) {
int z = 0;
for (int i = bn->top - 1; i >= 0; i--) {
for (int j = BN_BITS2 - 8; j >= 0; j -= 8) {
/* strip leading zeros */
// strip leading zeros
int v = ((int)(bn->d[i] >> (long)j)) & 0xff;
if (z || v != 0) {
*(p++) = hextable[v >> 4];
@@ -110,20 +110,20 @@ char *BN_bn2hex(const BIGNUM *bn) {
return buf;
}

/* decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. */
// decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|.
static int decode_hex(BIGNUM *bn, const char *in, int in_len) {
if (in_len > INT_MAX/4) {
OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG);
return 0;
}
/* |in_len| is the number of hex digits. */
// |in_len| is the number of hex digits.
if (!bn_expand(bn, in_len * 4)) {
return 0;
}

int i = 0;
while (in_len > 0) {
/* Decode one |BN_ULONG| at a time. */
// Decode one |BN_ULONG| at a time.
int todo = BN_BYTES * 2;
if (todo > in_len) {
todo = in_len;
@@ -143,7 +143,7 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) {
hex = c - 'A' + 10;
} else {
hex = 0;
/* This shouldn't happen. The caller checks |isxdigit|. */
// This shouldn't happen. The caller checks |isxdigit|.
assert(0);
}
word = (word << 4) | hex;
@@ -157,12 +157,12 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) {
return 1;
}

/* decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. */
// decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|.
static int decode_dec(BIGNUM *bn, const char *in, int in_len) {
int i, j;
BN_ULONG l = 0;

/* Decode |BN_DEC_NUM| digits at a time. */
// Decode |BN_DEC_NUM| digits at a time.
j = BN_DEC_NUM - (in_len % BN_DEC_NUM);
if (j == BN_DEC_NUM) {
j = 0;
@@ -207,7 +207,7 @@ static int bn_x2bn(BIGNUM **outp, const char *in, decode_func decode, char_test_
return num;
}

/* in is the start of the hex digits, and it is 'i' long */
// in is the start of the hex digits, and it is 'i' long
if (*outp == NULL) {
ret = BN_new();
if (ret == NULL) {
@@ -243,8 +243,8 @@ int BN_hex2bn(BIGNUM **outp, const char *in) {
}

char *BN_bn2dec(const BIGNUM *a) {
/* It is easier to print strings little-endian, so we assemble it in reverse
* and fix at the end. */
// It is easier to print strings little-endian, so we assemble it in reverse
// and fix at the end.
BIGNUM *copy = NULL;
CBB cbb;
if (!CBB_init(&cbb, 16) ||
@@ -290,7 +290,7 @@ char *BN_bn2dec(const BIGNUM *a) {
goto cbb_err;
}

/* Reverse the buffer. */
// Reverse the buffer.
for (size_t i = 0; i < len/2; i++) {
uint8_t tmp = data[i];
data[i] = data[len - 1 - i];
@@ -349,7 +349,7 @@ int BN_print(BIO *bp, const BIGNUM *a) {

for (i = a->top - 1; i >= 0; i--) {
for (j = BN_BITS2 - 4; j >= 0; j -= 4) {
/* strip leading zeros */
// strip leading zeros
v = ((int)(a->d[i] >> (long)j)) & 0x0f;
if (z || v != 0) {
if (BIO_write(bp, &hextable[v], 1) != 1) {
@@ -384,8 +384,8 @@ int BN_print_fp(FILE *fp, const BIGNUM *a) {
size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) {
const size_t bits = BN_num_bits(in);
const size_t bytes = (bits + 7) / 8;
/* If the number of bits is a multiple of 8, i.e. if the MSB is set,
* prefix with a zero byte. */
// If the number of bits is a multiple of 8, i.e. if the MSB is set,
// prefix with a zero byte.
int extend = 0;
if (bytes != 0 && (bits & 0x07) == 0) {
extend = 1;
@@ -395,8 +395,8 @@ size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) {
if (len < bytes ||
4 + len < len ||
(len & 0xffffffff) != len) {
/* If we cannot represent the number then we emit zero as the interface
* doesn't allow an error to be signalled. */
// If we cannot represent the number then we emit zero as the interface
// doesn't allow an error to be signalled.
if (out) {
OPENSSL_memset(out, 0, 4);
}


+ 3
- 3
crypto/buf/buf.c Parādīt failu

@@ -97,14 +97,14 @@ static int buf_mem_reserve(BUF_MEM *buf, size_t cap, int clean) {

size_t n = cap + 3;
if (n < cap) {
/* overflow */
// overflow
OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE);
return 0;
}
n = n / 3;
size_t alloc_size = n * 4;
if (alloc_size / 4 != n) {
/* overflow */
// overflow
OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE);
return 0;
}
@@ -185,7 +185,7 @@ char *BUF_strndup(const char *str, size_t size) {

alloc_size = size + 1;
if (alloc_size < size) {
/* overflow */
// overflow
OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE);
return NULL;
}


+ 34
- 34
crypto/bytestring/ber.c Parādīt failu

@@ -21,13 +21,13 @@
#include "../internal.h"


/* kMaxDepth is a just a sanity limit. The code should be such that the length
* of the input being processes always decreases. None the less, a very large
* input could otherwise cause the stack to overflow. */
// kMaxDepth is a just a sanity limit. The code should be such that the length
// of the input being processes always decreases. None the less, a very large
// input could otherwise cause the stack to overflow.
static const unsigned kMaxDepth = 2048;

/* is_string_type returns one if |tag| is a string type and zero otherwise. It
* ignores the constructed bit. */
// is_string_type returns one if |tag| is a string type and zero otherwise. It
// ignores the constructed bit.
static int is_string_type(unsigned tag) {
if ((tag & 0xc0) != 0) {
return 0;
@@ -52,10 +52,10 @@ static int is_string_type(unsigned tag) {
}
}

/* cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found|
* depending on whether an indefinite length element or constructed string was
* found. The value of |orig_in| is not changed. It returns one on success (i.e.
* |*ber_found| was set) and zero on error. */
// cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found|
// depending on whether an indefinite length element or constructed string was
// found. The value of |orig_in| is not changed. It returns one on success (i.e.
// |*ber_found| was set) and zero on error.
static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) {
CBS in;

@@ -77,13 +77,13 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) {
if (CBS_len(&contents) == header_len &&
header_len > 0 &&
CBS_data(&contents)[header_len-1] == 0x80) {
/* Found an indefinite-length element. */
// Found an indefinite-length element.
*ber_found = 1;
return 1;
}
if (tag & CBS_ASN1_CONSTRUCTED) {
if (is_string_type(tag)) {
/* Constructed strings are only legal in BER and require conversion. */
// Constructed strings are only legal in BER and require conversion.
*ber_found = 1;
return 1;
}
@@ -97,20 +97,20 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) {
return 1;
}

/* is_eoc returns true if |header_len| and |contents|, as returned by
* |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value. */
// is_eoc returns true if |header_len| and |contents|, as returned by
// |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value.
static char is_eoc(size_t header_len, CBS *contents) {
return header_len == 2 && CBS_len(contents) == 2 &&
OPENSSL_memcmp(CBS_data(contents), "\x00\x00", 2) == 0;
}

/* cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If
* |string_tag| is non-zero, then all elements must match |string_tag| up to the
* constructed bit and primitive element bodies are written to |out| without
* element headers. This is used when concatenating the fragments of a
* constructed string. If |looking_for_eoc| is set then any EOC elements found
* will cause the function to return after consuming it. It returns one on
* success and zero on error. */
// cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If
// |string_tag| is non-zero, then all elements must match |string_tag| up to the
// constructed bit and primitive element bodies are written to |out| without
// element headers. This is used when concatenating the fragments of a
// constructed string. If |looking_for_eoc| is set then any EOC elements found
// will cause the function to return after consuming it. It returns one on
// success and zero on error.
static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,
char looking_for_eoc, unsigned depth) {
assert(!(string_tag & CBS_ASN1_CONSTRUCTED));
@@ -134,9 +134,9 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,
}

if (string_tag != 0) {
/* This is part of a constructed string. All elements must match
* |string_tag| up to the constructed bit and get appended to |out|
* without a child element. */
// This is part of a constructed string. All elements must match
// |string_tag| up to the constructed bit and get appended to |out|
// without a child element.
if ((tag & ~CBS_ASN1_CONSTRUCTED) != string_tag) {
return 0;
}
@@ -144,8 +144,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,
} else {
unsigned out_tag = tag;
if ((tag & CBS_ASN1_CONSTRUCTED) && is_string_type(tag)) {
/* If a constructed string, clear the constructed bit and inform
* children to concatenate bodies. */
// If a constructed string, clear the constructed bit and inform
// children to concatenate bodies.
out_tag &= ~CBS_ASN1_CONSTRUCTED;
child_string_tag = out_tag;
}
@@ -157,7 +157,7 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,

if (CBS_len(&contents) == header_len && header_len > 0 &&
CBS_data(&contents)[header_len - 1] == 0x80) {
/* This is an indefinite length element. */
// This is an indefinite length element.
if (!cbs_convert_ber(in, out_contents, child_string_tag,
1 /* looking for eoc */, depth + 1) ||
!CBB_flush(out)) {
@@ -171,13 +171,13 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,
}

if (tag & CBS_ASN1_CONSTRUCTED) {
/* Recurse into children. */
// Recurse into children.
if (!cbs_convert_ber(&contents, out_contents, child_string_tag,
0 /* not looking for eoc */, depth + 1)) {
return 0;
}
} else {
/* Copy primitive contents as-is. */
// Copy primitive contents as-is.
if (!CBB_add_bytes(out_contents, CBS_data(&contents),
CBS_len(&contents))) {
return 0;
@@ -195,8 +195,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag,
int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len) {
CBB cbb;

/* First, do a quick walk to find any indefinite-length elements. Most of the
* time we hope that there aren't any and thus we can quickly return. */
// First, do a quick walk to find any indefinite-length elements. Most of the
// time we hope that there aren't any and thus we can quickly return.
char conversion_needed;
if (!cbs_find_ber(in, &conversion_needed, 0)) {
return 0;
@@ -225,14 +225,14 @@ int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage,
assert(is_string_type(inner_tag));

if (CBS_peek_asn1_tag(in, outer_tag)) {
/* Normal implicitly-tagged string. */
// Normal implicitly-tagged string.
*out_storage = NULL;
return CBS_get_asn1(in, out, outer_tag);
}

/* Otherwise, try to parse an implicitly-tagged constructed string.
* |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep
* of nesting. */
// Otherwise, try to parse an implicitly-tagged constructed string.
// |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep
// of nesting.
CBB result;
CBS child;
if (!CBB_init(&result, CBS_len(in)) ||


+ 24
- 24
crypto/bytestring/cbb.c Parādīt failu

@@ -27,7 +27,7 @@ void CBB_zero(CBB *cbb) {
}

static int cbb_init(CBB *cbb, uint8_t *buf, size_t cap) {
/* This assumes that |cbb| has already been zeroed. */
// This assumes that |cbb| has already been zeroed.
struct cbb_buffer_st *base;

base = OPENSSL_malloc(sizeof(struct cbb_buffer_st));
@@ -75,8 +75,8 @@ int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len) {

void CBB_cleanup(CBB *cbb) {
if (cbb->base) {
/* Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They
* are implicitly discarded when the parent is flushed or cleaned up. */
// Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They
// are implicitly discarded when the parent is flushed or cleaned up.
assert(cbb->is_top_level);

if (cbb->base->can_resize) {
@@ -97,7 +97,7 @@ static int cbb_buffer_reserve(struct cbb_buffer_st *base, uint8_t **out,

newlen = base->len + len;
if (newlen < base->len) {
/* Overflow */
// Overflow
goto err;
}

@@ -137,7 +137,7 @@ static int cbb_buffer_add(struct cbb_buffer_st *base, uint8_t **out,
if (!cbb_buffer_reserve(base, out, len)) {
return 0;
}
/* This will not overflow or |cbb_buffer_reserve| would have failed. */
// This will not overflow or |cbb_buffer_reserve| would have failed.
base->len += len;
return 1;
}
@@ -176,7 +176,7 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) {
}

if (cbb->base->can_resize && (out_data == NULL || out_len == NULL)) {
/* |out_data| and |out_len| can only be NULL if the CBB is fixed. */
// |out_data| and |out_len| can only be NULL if the CBB is fixed.
return 0;
}

@@ -191,15 +191,15 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) {
return 1;
}

/* CBB_flush recurses and then writes out any pending length prefix. The
* current length of the underlying base is taken to be the length of the
* length-prefixed data. */
// CBB_flush recurses and then writes out any pending length prefix. The
// current length of the underlying base is taken to be the length of the
// length-prefixed data.
int CBB_flush(CBB *cbb) {
size_t child_start, i, len;

/* If |cbb->base| has hit an error, the buffer is in an undefined state, so
* fail all following calls. In particular, |cbb->child| may point to invalid
* memory. */
// If |cbb->base| has hit an error, the buffer is in an undefined state, so
// fail all following calls. In particular, |cbb->child| may point to invalid
// memory.
if (cbb->base == NULL || cbb->base->error) {
return 0;
}
@@ -219,16 +219,16 @@ int CBB_flush(CBB *cbb) {
len = cbb->base->len - child_start;

if (cbb->child->pending_is_asn1) {
/* For ASN.1 we assume that we'll only need a single byte for the length.
* If that turned out to be incorrect, we have to move the contents along
* in order to make space. */
// For ASN.1 we assume that we'll only need a single byte for the length.
// If that turned out to be incorrect, we have to move the contents along
// in order to make space.
uint8_t len_len;
uint8_t initial_length_byte;

assert (cbb->child->pending_len_len == 1);

if (len > 0xfffffffe) {
/* Too large. */
// Too large.
goto err;
} else if (len > 0xffffff) {
len_len = 5;
@@ -249,7 +249,7 @@ int CBB_flush(CBB *cbb) {
}

if (len_len != 1) {
/* We need to move the contents along in order to make space. */
// We need to move the contents along in order to make space.
size_t extra_bytes = len_len - 1;
if (!cbb_buffer_add(cbb->base, NULL, extra_bytes)) {
goto err;
@@ -331,14 +331,14 @@ int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents) {
int CBB_add_asn1(CBB *cbb, CBB *out_contents, unsigned tag) {
if (tag > 0xff ||
(tag & 0x1f) == 0x1f) {
/* Long form identifier octets are not supported. Further, all current valid
* tag serializations are 8 bits. */
// Long form identifier octets are not supported. Further, all current valid
// tag serializations are 8 bits.
cbb->base->error = 1;
return 0;
}

if (!CBB_flush(cbb) ||
/* |tag|'s representation matches the DER encoding. */
// |tag|'s representation matches the DER encoding.
!CBB_add_u8(cbb, (uint8_t)tag)) {
return 0;
}
@@ -451,11 +451,11 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) {
uint8_t byte = (value >> 8*(7-i)) & 0xff;
if (!started) {
if (byte == 0) {
/* Don't encode leading zeros. */
// Don't encode leading zeros.
continue;
}
/* If the high bit is set, add a padding byte to make it
* unsigned. */
// If the high bit is set, add a padding byte to make it
// unsigned.
if ((byte & 0x80) && !CBB_add_u8(&child, 0)) {
return 0;
}
@@ -466,7 +466,7 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) {
}
}

/* 0 is encoded as a single 0, not the empty string. */
// 0 is encoded as a single 0, not the empty string.
if (!started && !CBB_add_u8(&child, 0)) {
return 0;
}


+ 30
- 31
crypto/bytestring/cbs.c Parādīt failu

@@ -190,13 +190,13 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag,
return 0;
}

/* ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag
* number no greater than 30.
*
* If the number portion is 31 (0x1f, the largest value that fits in the
* allotted bits), then the tag is more than one byte long and the
* continuation bytes contain the tag number. This parser only supports tag
* numbers less than 31 (and thus single-byte tags). */
// ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag
// number no greater than 30.
//
// If the number portion is 31 (0x1f, the largest value that fits in the
// allotted bits), then the tag is more than one byte long and the
// continuation bytes contain the tag number. This parser only supports tag
// numbers less than 31 (and thus single-byte tags).
if ((tag & 0x1f) == 0x1f) {
return 0;
}
@@ -206,52 +206,51 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag,
}

size_t len;
/* The format for the length encoding is specified in ITU-T X.690 section
* 8.1.3. */
// The format for the length encoding is specified in ITU-T X.690 section
// 8.1.3.
if ((length_byte & 0x80) == 0) {
/* Short form length. */
// Short form length.
len = ((size_t) length_byte) + 2;
if (out_header_len != NULL) {
*out_header_len = 2;
}
} else {
/* The high bit indicate that this is the long form, while the next 7 bits
* encode the number of subsequent octets used to encode the length (ITU-T
* X.690 clause 8.1.3.5.b). */
// The high bit indicate that this is the long form, while the next 7 bits
// encode the number of subsequent octets used to encode the length (ITU-T
// X.690 clause 8.1.3.5.b).
const size_t num_bytes = length_byte & 0x7f;
uint32_t len32;

if (ber_ok && (tag & CBS_ASN1_CONSTRUCTED) != 0 && num_bytes == 0) {
/* indefinite length */
// indefinite length
if (out_header_len != NULL) {
*out_header_len = 2;
}
return CBS_get_bytes(cbs, out, 2);
}

/* ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be
* used as the first byte of the length. If this parser encounters that
* value, num_bytes will be parsed as 127, which will fail the check below.
*/
// ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be
// used as the first byte of the length. If this parser encounters that
// value, num_bytes will be parsed as 127, which will fail the check below.
if (num_bytes == 0 || num_bytes > 4) {
return 0;
}
if (!cbs_get_u(&header, &len32, num_bytes)) {
return 0;
}
/* ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
* with the minimum number of octets. */
// ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
// with the minimum number of octets.
if (len32 < 128) {
/* Length should have used short-form encoding. */
// Length should have used short-form encoding.
return 0;
}
if ((len32 >> ((num_bytes-1)*8)) == 0) {
/* Length should have been at least one byte shorter. */
// Length should have been at least one byte shorter.
return 0;
}
len = len32;
if (len + 2 + num_bytes < len) {
/* Overflow. */
// Overflow.
return 0;
}
len += 2 + num_bytes;
@@ -338,23 +337,23 @@ int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) {
size_t len = CBS_len(&bytes);

if (len == 0) {
/* An INTEGER is encoded with at least one octet. */
// An INTEGER is encoded with at least one octet.
return 0;
}

if ((data[0] & 0x80) != 0) {
/* Negative number. */
// Negative number.
return 0;
}

if (data[0] == 0 && len > 1 && (data[1] & 0x80) == 0) {
/* Extra leading zeros. */
// Extra leading zeros.
return 0;
}

for (size_t i = 0; i < len; i++) {
if ((*out >> 56) != 0) {
/* Too large to represent as a uint64_t. */
// Too large to represent as a uint64_t.
return 0;
}
*out <<= 8;
@@ -462,7 +461,7 @@ int CBS_is_valid_asn1_bitstring(const CBS *cbs) {
return 1;
}

/* All num_unused_bits bits must exist and be zeros. */
// All num_unused_bits bits must exist and be zeros.
uint8_t last;
if (!CBS_get_last_u8(&in, &last) ||
(last & ((1 << num_unused_bits) - 1)) != 0) {
@@ -480,9 +479,9 @@ int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit) {
const unsigned byte_num = (bit >> 3) + 1;
const unsigned bit_num = 7 - (bit & 7);

/* Unused bits are zero, and this function does not distinguish between
* missing and unset bits. Thus it is sufficient to do a byte-level length
* check. */
// Unused bits are zero, and this function does not distinguish between
// missing and unset bits. Thus it is sufficient to do a byte-level length
// check.
return byte_num < CBS_len(cbs) &&
(CBS_data(cbs)[byte_num] & (1 << bit_num)) != 0;
}

+ 38
- 38
crypto/bytestring/internal.h Parādīt failu

@@ -22,54 +22,54 @@ extern "C" {
#endif


/* CBS_asn1_ber_to_der reads a BER element from |in|. If it finds
* indefinite-length elements or constructed strings then it converts the BER
* data to DER and sets |*out| and |*out_length| to describe a malloced buffer
* containing the DER data. Additionally, |*in| will be advanced over the BER
* element.
*
* If it doesn't find any indefinite-length elements or constructed strings then
* it sets |*out| to NULL and |*in| is unmodified.
*
* This function should successfully process any valid BER input, however it
* will not convert all of BER's deviations from DER. BER is ambiguous between
* implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed
* strings. Implicitly-tagged strings must be parsed with
* |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller
* must also account for BER variations in the contents of a primitive.
*
* It returns one on success and zero otherwise. */
// CBS_asn1_ber_to_der reads a BER element from |in|. If it finds
// indefinite-length elements or constructed strings then it converts the BER
// data to DER and sets |*out| and |*out_length| to describe a malloced buffer
// containing the DER data. Additionally, |*in| will be advanced over the BER
// element.
//
// If it doesn't find any indefinite-length elements or constructed strings then
// it sets |*out| to NULL and |*in| is unmodified.
//
// This function should successfully process any valid BER input, however it
// will not convert all of BER's deviations from DER. BER is ambiguous between
// implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed
// strings. Implicitly-tagged strings must be parsed with
// |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller
// must also account for BER variations in the contents of a primitive.
//
// It returns one on success and zero otherwise.
OPENSSL_EXPORT int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len);

/* CBS_get_asn1_implicit_string parses a BER string of primitive type
* |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the
* contents. If concatenation was needed, it sets |*out_storage| to a buffer
* which the caller must release with |OPENSSL_free|. Otherwise, it sets
* |*out_storage| to NULL.
*
* This function does not parse all of BER. It requires the string be
* definite-length. Constructed strings are allowed, but all children of the
* outermost element must be primitive. The caller should use
* |CBS_asn1_ber_to_der| before running this function.
*
* It returns one on success and zero otherwise. */
// CBS_get_asn1_implicit_string parses a BER string of primitive type
// |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the
// contents. If concatenation was needed, it sets |*out_storage| to a buffer
// which the caller must release with |OPENSSL_free|. Otherwise, it sets
// |*out_storage| to NULL.
//
// This function does not parse all of BER. It requires the string be
// definite-length. Constructed strings are allowed, but all children of the
// outermost element must be primitive. The caller should use
// |CBS_asn1_ber_to_der| before running this function.
//
// It returns one on success and zero otherwise.
OPENSSL_EXPORT int CBS_get_asn1_implicit_string(CBS *in, CBS *out,
uint8_t **out_storage,
unsigned outer_tag,
unsigned inner_tag);

/* CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized
* with |CBB_init|. If |outp| is not NULL then the result is written to |*outp|
* and |*outp| is advanced just past the output. It returns the number of bytes
* in the result, whether written or not, or a negative value on error. On
* error, it calls |CBB_cleanup| on |cbb|.
*
* This function may be used to help implement legacy i2d ASN.1 functions. */
// CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized
// with |CBB_init|. If |outp| is not NULL then the result is written to |*outp|
// and |*outp| is advanced just past the output. It returns the number of bytes
// in the result, whether written or not, or a negative value on error. On
// error, it calls |CBB_cleanup| on |cbb|.
//
// This function may be used to help implement legacy i2d ASN.1 functions.
int CBB_finish_i2d(CBB *cbb, uint8_t **outp);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_BYTESTRING_INTERNAL_H */
#endif // OPENSSL_HEADER_BYTESTRING_INTERNAL_H

+ 7
- 7
crypto/chacha/chacha.c Parādīt failu

@@ -12,7 +12,7 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */

/* Adapted from the public domain, estream code by D. Bernstein. */
// Adapted from the public domain, estream code by D. Bernstein.

#include <openssl/chacha.h>

@@ -32,7 +32,7 @@
(defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))

/* ChaCha20_ctr32 is defined in asm/chacha-*.pl. */
// ChaCha20_ctr32 is defined in asm/chacha-*.pl.
void ChaCha20_ctr32(uint8_t *out, const uint8_t *in, size_t in_len,
const uint32_t key[8], const uint32_t counter[4]);

@@ -48,7 +48,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len,

const uint32_t *key_ptr = (const uint32_t *)key;
#if !defined(OPENSSL_X86) && !defined(OPENSSL_X86_64)
/* The assembly expects the key to be four-byte aligned. */
// The assembly expects the key to be four-byte aligned.
uint32_t key_u32[8];
if ((((uintptr_t)key) & 3) != 0) {
key_u32[0] = U8TO32_LITTLE(key + 0);
@@ -69,7 +69,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len,

#else

/* sigma contains the ChaCha constants, which happen to be an ASCII string. */
// sigma contains the ChaCha constants, which happen to be an ASCII string.
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };

@@ -83,15 +83,15 @@ static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
(p)[3] = (v >> 24) & 0xff; \
}

/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */
// QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round.
#define QUARTERROUND(a, b, c, d) \
x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 16); \
x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 12); \
x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 8); \
x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 7);

/* chacha_core performs 20 rounds of ChaCha on the input words in
* |input| and writes the 64 output bytes to |output|. */
// chacha_core performs 20 rounds of ChaCha on the input words in
// |input| and writes the 64 output bytes to |output|.
static void chacha_core(uint8_t output[64], const uint32_t input[16]) {
uint32_t x[16];
int i;


+ 3
- 3
crypto/cipher_extra/aead_test.cc Parādīt failu

@@ -393,14 +393,14 @@ TEST_P(PerAEADTest, CleanupAfterInitFailure) {
9999 /* a silly tag length to trigger an error */, NULL /* ENGINE */));
ERR_clear_error();

/* Running a second, failed _init should not cause a memory leak. */
// Running a second, failed _init should not cause a memory leak.
ASSERT_FALSE(EVP_AEAD_CTX_init(
&ctx, aead(), key, key_len,
9999 /* a silly tag length to trigger an error */, NULL /* ENGINE */));
ERR_clear_error();

/* Calling _cleanup on an |EVP_AEAD_CTX| after a failed _init should be a
* no-op. */
// Calling _cleanup on an |EVP_AEAD_CTX| after a failed _init should be a
// no-op.
EVP_AEAD_CTX_cleanup(&ctx);
}



+ 14
- 14
crypto/cipher_extra/e_aesctrhmac.c Parādīt failu

@@ -66,13 +66,13 @@ static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key,

if (key_len < hmac_key_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; /* EVP_AEAD_CTX_init should catch this. */
return 0; // EVP_AEAD_CTX_init should catch this.
}

const size_t aes_key_len = key_len - hmac_key_len;
if (aes_key_len != 16 && aes_key_len != 32) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; /* EVP_AEAD_CTX_init should catch this. */
return 0; // EVP_AEAD_CTX_init should catch this.
}

if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
@@ -131,7 +131,7 @@ static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH],
SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN);
SHA256_Update(&sha256, ad, ad_len);

/* Pad with zeros to the end of the SHA-256 block. */
// Pad with zeros to the end of the SHA-256 block.
const unsigned num_padding =
(SHA256_CBLOCK - ((sizeof(uint64_t)*2 +
EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) %
@@ -154,8 +154,8 @@ static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH],
static void aead_aes_ctr_hmac_sha256_crypt(
const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out,
const uint8_t *in, size_t len, const uint8_t *nonce) {
/* Since the AEAD operation is one-shot, keeping a buffer of unused keystream
* bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. */
// Since the AEAD operation is one-shot, keeping a buffer of unused keystream
// bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it.
uint8_t partial_block_buffer[AES_BLOCK_SIZE];
unsigned partial_block_offset = 0;
OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer));
@@ -184,7 +184,7 @@ static int aead_aes_ctr_hmac_sha256_seal_scatter(
const uint64_t in_len_64 = in_len;

if (in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) {
/* This input is so large it would overflow the 32-bit block counter. */
// This input is so large it would overflow the 32-bit block counter.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
@@ -242,10 +242,10 @@ static int aead_aes_ctr_hmac_sha256_open_gather(

static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = {
16 /* AES key */ + 32 /* HMAC key */,
12, /* nonce length */
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
12, // nonce length
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_ctr_hmac_sha256_init,
NULL /* init_with_direction */,
@@ -259,10 +259,10 @@ static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = {

static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = {
32 /* AES key */ + 32 /* HMAC key */,
12, /* nonce length */
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
12, // nonce length
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_ctr_hmac_sha256_init,
NULL /* init_with_direction */,


+ 97
- 97
crypto/cipher_extra/e_aesgcmsiv.c Parādīt failu

@@ -29,20 +29,20 @@

#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM)

/* Optimised AES-GCM-SIV */
// Optimised AES-GCM-SIV

struct aead_aes_gcm_siv_asm_ctx {
alignas(16) uint8_t key[16*15];
int is_128_bit;
};

/* aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to
* |out_expanded_key|. */
// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to
// |out_expanded_key|.
extern void aes128gcmsiv_aes_ks(
const uint8_t key[16], uint8_t out_expanded_key[16*15]);

/* aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to
* |out_expanded_key|. */
// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to
// |out_expanded_key|.
extern void aes256gcmsiv_aes_ks(
const uint8_t key[16], uint8_t out_expanded_key[16*15]);

@@ -52,7 +52,7 @@ static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,

if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; /* EVP_AEAD_CTX_init should catch this. */
return 0; // EVP_AEAD_CTX_init should catch this.
}

if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
@@ -70,7 +70,7 @@ static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
return 0;
}

/* malloc should return a 16-byte-aligned address. */
// malloc should return a 16-byte-aligned address.
assert((((uintptr_t)gcm_siv_ctx) & 15) == 0);

if (key_bits == 128) {
@@ -92,123 +92,123 @@ static void aead_aes_gcm_siv_asm_cleanup(EVP_AEAD_CTX *ctx) {
OPENSSL_free(gcm_siv_asm_ctx);
}

/* aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to
* include a number (|in_blocks|) of 16-byte blocks of data from |in|, given
* the POLYVAL key in |key|. */
// aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to
// include a number (|in_blocks|) of 16-byte blocks of data from |in|, given
// the POLYVAL key in |key|.
extern void aesgcmsiv_polyval_horner(const uint8_t in_out_poly[16],
const uint8_t key[16], const uint8_t *in,
size_t in_blocks);

/* aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|. */
// aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|.
extern void aesgcmsiv_htable_init(uint8_t out_htable[16 * 8],
const uint8_t auth_key[16]);

/* aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|. */
// aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|.
extern void aesgcmsiv_htable6_init(uint8_t out_htable[16 * 6],
const uint8_t auth_key[16]);

/* aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to
* include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple
* of 16.) It uses the precomputed powers of the key given in |htable|. */
// aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to
// include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple
// of 16.) It uses the precomputed powers of the key given in |htable|.
extern void aesgcmsiv_htable_polyval(const uint8_t htable[16 * 8],
const uint8_t *in, size_t in_len,
uint8_t in_out_poly[16]);

/* aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to
* |in|. (The full value of |in_len| is still used to find the authentication
* tag appended to the ciphertext, however, so must not be pre-masked.)
*
* |in| and |out| may be equal, but must not otherwise overlap.
*
* While decrypting, it updates the POLYVAL value found at the beginning of
* |in_out_calculated_tag_and_scratch| and writes the updated value back before
* return. During executation, it may use the whole of this space for other
* purposes. In order to decrypt and update the POLYVAL value, it uses the
* expanded key from |key| and the table of powers in |htable|. */
// aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to
// |in|. (The full value of |in_len| is still used to find the authentication
// tag appended to the ciphertext, however, so must not be pre-masked.)
//
// |in| and |out| may be equal, but must not otherwise overlap.
//
// While decrypting, it updates the POLYVAL value found at the beginning of
// |in_out_calculated_tag_and_scratch| and writes the updated value back before
// return. During executation, it may use the whole of this space for other
// purposes. In order to decrypt and update the POLYVAL value, it uses the
// expanded key from |key| and the table of powers in |htable|.
extern void aes128gcmsiv_dec(const uint8_t *in, uint8_t *out,
uint8_t in_out_calculated_tag_and_scratch[16 * 8],
const uint8_t htable[16 * 6],
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256. */
// aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256.
extern void aes256gcmsiv_dec(const uint8_t *in, uint8_t *out,
uint8_t in_out_calculated_tag_and_scratch[16 * 8],
const uint8_t htable[16 * 6],
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from
* |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of
* the nonce are used, 16 bytes are read and so the value must be
* right-padded. */
// aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from
// |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of
// the nonce are used, 16 bytes are read and so the value must be
// right-padded.
extern void aes128gcmsiv_kdf(const uint8_t nonce[16],
uint64_t out_key_material[8],
const uint8_t *key_schedule);

/* aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256. */
// aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256.
extern void aes256gcmsiv_kdf(const uint8_t nonce[16],
uint64_t out_key_material[12],
const uint8_t *key_schedule);

/* aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in
* |key|, writes the expanded key to |out_expanded_key| and encrypts a single
* block from |in| to |out|. */
// aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in
// |key|, writes the expanded key to |out_expanded_key| and encrypts a single
// block from |in| to |out|.
extern void aes128gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16],
uint8_t out_expanded_key[16 * 15],
const uint64_t key[2]);

/* aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for
* AES-256. */
// aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for
// AES-256.
extern void aes256gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16],
uint8_t out_expanded_key[16 * 15],
const uint64_t key[4]);

/* aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using
* the expanded key in |expanded_key|. */
// aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using
// the expanded key in |expanded_key|.
extern void aes128gcmsiv_ecb_enc_block(
const uint8_t in[16], uint8_t out[16],
const struct aead_aes_gcm_siv_asm_ctx *expanded_key);

/* aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for
* AES-256. */
// aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for
// AES-256.
extern void aes256gcmsiv_ecb_enc_block(
const uint8_t in[16], uint8_t out[16],
const struct aead_aes_gcm_siv_asm_ctx *expanded_key);

/* aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the
* expanded key from |key|. (The value of |in_len| must be a multiple of 16.)
* The |in| and |out| buffers may be equal but must not otherwise overlap. The
* initial counter is constructed from the given |tag| as required by
* AES-GCM-SIV. */
// aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the
// expanded key from |key|. (The value of |in_len| must be a multiple of 16.)
// The |in| and |out| buffers may be equal but must not otherwise overlap. The
// initial counter is constructed from the given |tag| as required by
// AES-GCM-SIV.
extern void aes128gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for
* AES-256. */
// aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for
// AES-256.
extern void aes256gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is
* optimised for longer messages. */
// aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is
// optimised for longer messages.
extern void aes128gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is
* optimised for longer messages. */
// aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is
// optimised for longer messages.
extern void aes256gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);

/* gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext
* and AD. The result is written to |out_tag|. */
// gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext
// and AD. The result is written to |out_tag|.
static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in,
size_t in_len, const uint8_t *ad, size_t ad_len,
const uint8_t auth_key[16],
@@ -268,10 +268,10 @@ static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in,
out_tag[15] &= 0x7f;
}

/* aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption
* (same thing in CTR mode) of the final block of a plaintext/ciphertext. It
* writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter
* derived from |tag|. */
// aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption
// (same thing in CTR mode) of the final block of a plaintext/ciphertext. It
// writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter
// derived from |tag|.
static void aead_aes_gcm_siv_asm_crypt_last_block(
int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len,
const uint8_t tag[16],
@@ -299,8 +299,8 @@ static void aead_aes_gcm_siv_asm_crypt_last_block(
}
}

/* aead_aes_gcm_siv_kdf calculates the record encryption and authentication
* keys given the |nonce|. */
// aead_aes_gcm_siv_kdf calculates the record encryption and authentication
// keys given the |nonce|.
static void aead_aes_gcm_siv_kdf(
int is_128_bit, const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx,
uint64_t out_record_auth_key[2], uint64_t out_record_enc_key[4],
@@ -433,8 +433,8 @@ static int aead_aes_gcm_siv_asm_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
} else {
aes256gcmsiv_aes_ks((const uint8_t *) record_enc_key, &expanded_key.key[0]);
}
/* calculated_tag is 16*8 bytes, rather than 16 bytes, because
* aes[128|256]gcmsiv_dec uses the extra as scratch space. */
// calculated_tag is 16*8 bytes, rather than 16 bytes, because
// aes[128|256]gcmsiv_dec uses the extra as scratch space.
alignas(16) uint8_t calculated_tag[16 * 8] = {0};

OPENSSL_memset(calculated_tag, 0, EVP_AEAD_AES_GCM_SIV_TAG_LEN);
@@ -507,11 +507,11 @@ static int aead_aes_gcm_siv_asm_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
}

static const EVP_AEAD aead_aes_128_gcm_siv_asm = {
16, /* key length */
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
16, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_gcm_siv_asm_init,
NULL /* init_with_direction */,
@@ -524,11 +524,11 @@ static const EVP_AEAD aead_aes_128_gcm_siv_asm = {
};

static const EVP_AEAD aead_aes_256_gcm_siv_asm = {
32, /* key length */
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
32, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_gcm_siv_asm_init,
NULL /* init_with_direction */,
@@ -540,7 +540,7 @@ static const EVP_AEAD aead_aes_256_gcm_siv_asm = {
NULL /* tag_len */,
};

#endif /* X86_64 && !NO_ASM */
#endif // X86_64 && !NO_ASM

struct aead_aes_gcm_siv_ctx {
union {
@@ -557,7 +557,7 @@ static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key,

if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; /* EVP_AEAD_CTX_init should catch this. */
return 0; // EVP_AEAD_CTX_init should catch this.
}

if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
@@ -590,13 +590,13 @@ static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) {
OPENSSL_free(gcm_siv_ctx);
}

/* gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from
* |in| to |out|, using the block function |enc_block| with |key| in counter
* mode, starting at |initial_counter|. This differs from the traditional
* counter mode code in that the counter is handled little-endian, only the
* first four bytes are used and the GCM-SIV tweak to the final byte is
* applied. The |in| and |out| pointers may be equal but otherwise must not
* alias. */
// gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from
// |in| to |out|, using the block function |enc_block| with |key| in counter
// mode, starting at |initial_counter|. This differs from the traditional
// counter mode code in that the counter is handled little-endian, only the
// first four bytes are used and the GCM-SIV tweak to the final byte is
// applied. The |in| and |out| pointers may be equal but otherwise must not
// alias.
static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len,
const uint8_t initial_counter[AES_BLOCK_SIZE],
block128_f enc_block, const AES_KEY *key) {
@@ -626,8 +626,8 @@ static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len,
}
}

/* gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and
* AD. The result is written to |out_tag|. */
// gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and
// AD. The result is written to |out_tag|.
static void gcm_siv_polyval(
uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad,
size_t ad_len, const uint8_t auth_key[16],
@@ -671,7 +671,7 @@ static void gcm_siv_polyval(
out_tag[15] &= 0x7f;
}

/* gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. */
// gcm_siv_record_keys contains the keys used for a specific GCM-SIV record.
struct gcm_siv_record_keys {
uint8_t auth_key[16];
union {
@@ -681,8 +681,8 @@ struct gcm_siv_record_keys {
block128_f enc_block;
};

/* gcm_siv_keys calculates the keys for a specific GCM-SIV record with the
* given nonce and writes them to |*out_keys|. */
// gcm_siv_keys calculates the keys for a specific GCM-SIV record with the
// given nonce and writes them to |*out_keys|.
static void gcm_siv_keys(
const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx,
struct gcm_siv_record_keys *out_keys,
@@ -793,11 +793,11 @@ static int aead_aes_gcm_siv_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out,
}

static const EVP_AEAD aead_aes_128_gcm_siv = {
16, /* key length */
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
16, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_gcm_siv_init,
NULL /* init_with_direction */,
@@ -810,11 +810,11 @@ static const EVP_AEAD aead_aes_128_gcm_siv = {
};

static const EVP_AEAD aead_aes_256_gcm_siv = {
32, /* key length */
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
0, /* seal_scatter_supports_extra_in */
32, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
0, // seal_scatter_supports_extra_in

aead_aes_gcm_siv_init,
NULL /* init_with_direction */,
@@ -859,4 +859,4 @@ const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) {
return &aead_aes_256_gcm_siv;
}

#endif /* X86_64 && !NO_ASM */
#endif // X86_64 && !NO_ASM

+ 25
- 25
crypto/cipher_extra/e_chacha20poly1305.c Parādīt failu

@@ -120,7 +120,7 @@ static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
}

if (key_len != sizeof(c20_ctx->key)) {
return 0; /* internal error - EVP_AEAD_CTX_init should catch this. */
return 0; // internal error - EVP_AEAD_CTX_init should catch this.
}

c20_ctx = OPENSSL_malloc(sizeof(struct aead_chacha20_poly1305_ctx));
@@ -152,7 +152,7 @@ static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) {
CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes));
}

/* calc_tag fills |tag| with the authentication tag for the given inputs. */
// calc_tag fills |tag| with the authentication tag for the given inputs.
static void calc_tag(uint8_t tag[POLY1305_TAG_LEN],
const struct aead_chacha20_poly1305_ctx *c20_ctx,
const uint8_t nonce[12], const uint8_t *ad, size_t ad_len,
@@ -164,7 +164,7 @@ static void calc_tag(uint8_t tag[POLY1305_TAG_LEN],
CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key),
c20_ctx->key, nonce, 0);

static const uint8_t padding[16] = { 0 }; /* Padding is all zeros. */
static const uint8_t padding[16] = { 0 }; // Padding is all zeros.
poly1305_state ctx;
CRYPTO_poly1305_init(&ctx, poly1305_key);
CRYPTO_poly1305_update(&ctx, ad, ad_len);
@@ -203,12 +203,12 @@ static int aead_chacha20_poly1305_seal_scatter(
return 0;
}

/* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
* individual operations that work on more than 256GB at a time.
* |in_len_64| is needed because, on 32-bit platforms, size_t is only
* 32-bits and this produces a warning because it's always false.
* Casting to uint64_t inside the conditional is not sufficient to stop
* the warning. */
// |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
// individual operations that work on more than 256GB at a time.
// |in_len_64| is needed because, on 32-bit platforms, size_t is only
// 32-bits and this produces a warning because it's always false.
// Casting to uint64_t inside the conditional is not sufficient to stop
// the warning.
const uint64_t in_len_64 = in_len;
if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
@@ -220,8 +220,8 @@ static int aead_chacha20_poly1305_seal_scatter(
return 0;
}

/* The the extra input is given, it is expected to be very short and so is
* encrypted byte-by-byte first. */
// The the extra input is given, it is expected to be very short and so is
// encrypted byte-by-byte first.
if (extra_in_len) {
static const size_t kChaChaBlockSize = 64;
uint32_t block_counter = 1 + (in_len / kChaChaBlockSize);
@@ -275,12 +275,12 @@ static int aead_chacha20_poly1305_open_gather(
return 0;
}

/* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
* individual operations that work on more than 256GB at a time.
* |in_len_64| is needed because, on 32-bit platforms, size_t is only
* 32-bits and this produces a warning because it's always false.
* Casting to uint64_t inside the conditional is not sufficient to stop
* the warning. */
// |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
// individual operations that work on more than 256GB at a time.
// |in_len_64| is needed because, on 32-bit platforms, size_t is only
// 32-bits and this produces a warning because it's always false.
// Casting to uint64_t inside the conditional is not sufficient to stop
// the warning.
const uint64_t in_len_64 = in_len;
if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
@@ -307,20 +307,20 @@ static int aead_chacha20_poly1305_open_gather(
}

static const EVP_AEAD aead_chacha20_poly1305 = {
32, /* key len */
12, /* nonce len */
POLY1305_TAG_LEN, /* overhead */
POLY1305_TAG_LEN, /* max tag length */
1, /* seal_scatter_supports_extra_in */
32, // key len
12, // nonce len
POLY1305_TAG_LEN, // overhead
POLY1305_TAG_LEN, // max tag length
1, // seal_scatter_supports_extra_in

aead_chacha20_poly1305_init,
NULL, /* init_with_direction */
NULL, // init_with_direction
aead_chacha20_poly1305_cleanup,
NULL /* open */,
aead_chacha20_poly1305_seal_scatter,
aead_chacha20_poly1305_open_gather,
NULL, /* get_iv */
NULL, /* tag_len */
NULL, // get_iv
NULL, // tag_len
};

const EVP_AEAD *EVP_aead_chacha20_poly1305(void) {


+ 8
- 8
crypto/cipher_extra/e_rc2.c Parādīt failu

@@ -317,7 +317,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
unsigned int c, d;

k = (uint8_t *)&key->data[0];
*k = 0; /* for if there is a zero length key */
*k = 0; // for if there is a zero length key

if (len > 128) {
len = 128;
@@ -333,7 +333,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
k[i] = data[i];
}

/* expand table */
// expand table
d = k[len - 1];
j = 0;
for (i = len; i < 128; i++, j++) {
@@ -341,7 +341,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
k[i] = d;
}

/* hmm.... key reduction to 'bits' bits */
// hmm.... key reduction to 'bits' bits

j = (bits + 7) >> 3;
i = 128 - j;
@@ -354,7 +354,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
k[i] = d;
}

/* copy from bytes into uint16_t's */
// copy from bytes into uint16_t's
ki = &(key->data[63]);
for (i = 127; i >= 0; i -= 2) {
*(ki--) = ((k[i] << 8) | k[i - 1]) & 0xffff;
@@ -362,8 +362,8 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
}

typedef struct {
int key_bits; /* effective key bits */
RC2_KEY ks; /* key schedule */
int key_bits; // effective key bits
RC2_KEY ks; // key schedule
} EVP_RC2_KEY;

static int rc2_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
@@ -399,8 +399,8 @@ static int rc2_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) {
key->key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
return 1;
case EVP_CTRL_SET_RC2_KEY_BITS:
/* Should be overridden by later call to |EVP_CTRL_INIT|, but
* people call it, so it may as well work. */
// Should be overridden by later call to |EVP_CTRL_INIT|, but
// people call it, so it may as well work.
key->key_bits = arg;
return 1;



+ 52
- 52
crypto/cipher_extra/e_ssl3.c Parādīt failu

@@ -40,8 +40,8 @@ static int ssl3_mac(AEAD_SSL3_CTX *ssl3_ctx, uint8_t *out, unsigned *out_len,
size_t md_size = EVP_MD_CTX_size(&ssl3_ctx->md_ctx);
size_t pad_len = (md_size == 20) ? 40 : 48;

/* To allow for CBC mode which changes cipher length, |ad| doesn't include the
* length for legacy ciphers. */
// To allow for CBC mode which changes cipher length, |ad| doesn't include the
// length for legacy ciphers.
uint8_t ad_extra[2];
ad_extra[0] = (uint8_t)(in_len >> 8);
ad_extra[1] = (uint8_t)(in_len & 0xff);
@@ -135,8 +135,8 @@ static size_t aead_ssl3_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len,
}

const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx);
/* An overflow of |in_len + digest_len| doesn't affect the result mod
* |block_size|, provided that |block_size| is a smaller power of two. */
// An overflow of |in_len + digest_len| doesn't affect the result mod
// |block_size|, provided that |block_size| is a smaller power of two.
assert(block_size != 0 && (block_size & (block_size - 1)) == 0);
const size_t pad_len = block_size - ((in_len + digest_len) % block_size);
return digest_len + pad_len;
@@ -153,13 +153,13 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state;

if (!ssl3_ctx->cipher_ctx.encrypt) {
/* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */
// Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}

if (in_len > INT_MAX) {
/* EVP_CIPHER takes int as input. */
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
@@ -179,15 +179,15 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
return 0;
}

/* Compute the MAC. This must be first in case the operation is being done
* in-place. */
// Compute the MAC. This must be first in case the operation is being done
// in-place.
uint8_t mac[EVP_MAX_MD_SIZE];
unsigned mac_len;
if (!ssl3_mac(ssl3_ctx, mac, &mac_len, ad, ad_len, in, in_len)) {
return 0;
}

/* Encrypt the input. */
// Encrypt the input.
int len;
if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in,
(int)in_len)) {
@@ -196,9 +196,9 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,

const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx);

/* Feed the MAC into the cipher in two steps. First complete the final partial
* block from encrypting the input and split the result between |out| and
* |out_tag|. Then encrypt the remainder. */
// Feed the MAC into the cipher in two steps. First complete the final partial
// block from encrypting the input and split the result between |out| and
// |out_tag|. Then encrypt the remainder.

size_t early_mac_len = (block_size - (in_len % block_size)) % block_size;
if (early_mac_len != 0) {
@@ -225,7 +225,7 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
assert(block_size <= 256);
assert(EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);

/* Compute padding and feed that into the cipher. */
// Compute padding and feed that into the cipher.
uint8_t padding[256];
size_t padding_len = block_size - ((in_len + mac_len) % block_size);
OPENSSL_memset(padding, 0, padding_len - 1);
@@ -255,7 +255,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state;

if (ssl3_ctx->cipher_ctx.encrypt) {
/* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */
// Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
@@ -267,8 +267,8 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
}

if (max_out_len < in_len) {
/* This requires that the caller provide space for the MAC, even though it
* will always be removed on return. */
// This requires that the caller provide space for the MAC, even though it
// will always be removed on return.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
@@ -284,12 +284,12 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
}

if (in_len > INT_MAX) {
/* EVP_CIPHER takes int as input. */
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}

/* Decrypt to get the plaintext + MAC + padding. */
// Decrypt to get the plaintext + MAC + padding.
size_t total = 0;
int len;
if (!EVP_DecryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
@@ -302,9 +302,9 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
total += len;
assert(total == in_len);

/* Remove CBC padding and MAC. This would normally be timing-sensitive, but
* SSLv3 CBC ciphers are already broken. Support will be removed eventually.
* https://www.openssl.org/~bodo/ssl-poodle.pdf */
// Remove CBC padding and MAC. This would normally be timing-sensitive, but
// SSLv3 CBC ciphers are already broken. Support will be removed eventually.
// https://www.openssl.org/~bodo/ssl-poodle.pdf
size_t data_len;
if (EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
unsigned padding_length = out[total - 1];
@@ -312,7 +312,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
/* The padding must be minimal. */
// The padding must be minimal.
if (padding_length + 1 > EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
@@ -322,7 +322,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
data_len = total - mac_len;
}

/* Compute the MAC and compare against the one in the record. */
// Compute the MAC and compare against the one in the record.
uint8_t mac[EVP_MAX_MD_SIZE];
if (!ssl3_mac(ssl3_ctx, mac, NULL, ad, ad_len, out, data_len)) {
return 0;
@@ -378,70 +378,70 @@ static int aead_null_sha1_ssl3_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
}

static const EVP_AEAD aead_aes_128_cbc_sha1_ssl3 = {
SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
0, /* nonce len */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_128_cbc_sha1_ssl3_init,
aead_ssl3_cleanup,
aead_ssl3_open,
aead_ssl3_seal_scatter,
NULL, /* open_gather */
NULL, // open_gather
aead_ssl3_get_iv,
aead_ssl3_tag_len,
};

static const EVP_AEAD aead_aes_256_cbc_sha1_ssl3 = {
SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
0, /* nonce len */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_256_cbc_sha1_ssl3_init,
aead_ssl3_cleanup,
aead_ssl3_open,
aead_ssl3_seal_scatter,
NULL, /* open_gather */
NULL, // open_gather
aead_ssl3_get_iv,
aead_ssl3_tag_len,
};

static const EVP_AEAD aead_des_ede3_cbc_sha1_ssl3 = {
SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
0, /* nonce len */
8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV)
0, // nonce len
8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_des_ede3_cbc_sha1_ssl3_init,
aead_ssl3_cleanup,
aead_ssl3_open,
aead_ssl3_seal_scatter,
NULL, /* open_gather */
NULL, // open_gather
aead_ssl3_get_iv,
aead_ssl3_tag_len,
};

static const EVP_AEAD aead_null_sha1_ssl3 = {
SHA_DIGEST_LENGTH, /* key len */
0, /* nonce len */
SHA_DIGEST_LENGTH, /* overhead (SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH, // key len
0, // nonce len
SHA_DIGEST_LENGTH, // overhead (SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_null_sha1_ssl3_init,
aead_ssl3_cleanup,
aead_ssl3_open,
aead_ssl3_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_ssl3_tag_len,
};



+ 123
- 123
crypto/cipher_extra/e_tls.c Parādīt failu

@@ -33,12 +33,12 @@
typedef struct {
EVP_CIPHER_CTX cipher_ctx;
HMAC_CTX hmac_ctx;
/* mac_key is the portion of the key used for the MAC. It is retained
* separately for the constant-time CBC code. */
// mac_key is the portion of the key used for the MAC. It is retained
// separately for the constant-time CBC code.
uint8_t mac_key[EVP_MAX_MD_SIZE];
uint8_t mac_key_len;
/* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
* IV. */
// implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
// IV.
char implicit_iv;
} AEAD_TLS_CTX;

@@ -111,8 +111,8 @@ static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len,
}

const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
/* An overflow of |in_len + hmac_len| doesn't affect the result mod
* |block_size|, provided that |block_size| is a smaller power of two. */
// An overflow of |in_len + hmac_len| doesn't affect the result mod
// |block_size|, provided that |block_size| is a smaller power of two.
assert(block_size != 0 && (block_size & (block_size - 1)) == 0);
const size_t pad_len = block_size - (in_len + hmac_len) % block_size;
return hmac_len + pad_len;
@@ -129,13 +129,13 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;

if (!tls_ctx->cipher_ctx.encrypt) {
/* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
// Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}

if (in_len > INT_MAX) {
/* EVP_CIPHER takes int as input. */
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
@@ -155,14 +155,14 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
return 0;
}

/* To allow for CBC mode which changes cipher length, |ad| doesn't include the
* length for legacy ciphers. */
// To allow for CBC mode which changes cipher length, |ad| doesn't include the
// length for legacy ciphers.
uint8_t ad_extra[2];
ad_extra[0] = (uint8_t)(in_len >> 8);
ad_extra[1] = (uint8_t)(in_len & 0xff);

/* Compute the MAC. This must be first in case the operation is being done
* in-place. */
// Compute the MAC. This must be first in case the operation is being done
// in-place.
uint8_t mac[EVP_MAX_MD_SIZE];
unsigned mac_len;
if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
@@ -173,14 +173,14 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
return 0;
}

/* Configure the explicit IV. */
// Configure the explicit IV.
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
!tls_ctx->implicit_iv &&
!EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
return 0;
}

/* Encrypt the input. */
// Encrypt the input.
int len;
if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
return 0;
@@ -188,9 +188,9 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,

unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);

/* Feed the MAC into the cipher in two steps. First complete the final partial
* block from encrypting the input and split the result between |out| and
* |out_tag|. Then feed the rest. */
// Feed the MAC into the cipher in two steps. First complete the final partial
// block from encrypting the input and split the result between |out| and
// |out_tag|. Then feed the rest.

const size_t early_mac_len =
(block_size - (in_len % block_size) % block_size);
@@ -218,7 +218,7 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
assert(block_size <= 256);
assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);

/* Compute padding and feed that into the cipher. */
// Compute padding and feed that into the cipher.
uint8_t padding[256];
unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
OPENSSL_memset(padding, padding_len - 1, padding_len);
@@ -232,7 +232,7 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) {
return 0;
}
assert(len == 0); /* Padding is explicit. */
assert(len == 0); // Padding is explicit.
assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len));

*out_tag_len = tag_len;
@@ -246,7 +246,7 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;

if (tls_ctx->cipher_ctx.encrypt) {
/* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
// Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
@@ -257,8 +257,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
}

if (max_out_len < in_len) {
/* This requires that the caller provide space for the MAC, even though it
* will always be removed on return. */
// This requires that the caller provide space for the MAC, even though it
// will always be removed on return.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
@@ -274,19 +274,19 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
}

if (in_len > INT_MAX) {
/* EVP_CIPHER takes int as input. */
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}

/* Configure the explicit IV. */
// Configure the explicit IV.
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
!tls_ctx->implicit_iv &&
!EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
return 0;
}

/* Decrypt to get the plaintext + MAC + padding. */
// Decrypt to get the plaintext + MAC + padding.
size_t total = 0;
int len;
if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
@@ -299,8 +299,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
total += len;
assert(total == in_len);

/* Remove CBC padding. Code from here on is timing-sensitive with respect to
* |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
// Remove CBC padding. Code from here on is timing-sensitive with respect to
// |padding_ok| and |data_plus_mac_len| for CBC ciphers.
size_t data_plus_mac_len;
crypto_word_t padding_ok;
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
@@ -308,32 +308,32 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
&padding_ok, &data_plus_mac_len, out, total,
EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
HMAC_size(&tls_ctx->hmac_ctx))) {
/* Publicly invalid. This can be rejected in non-constant time. */
// Publicly invalid. This can be rejected in non-constant time.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
} else {
padding_ok = CONSTTIME_TRUE_W;
data_plus_mac_len = total;
/* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
* already been checked against the MAC size at the top of the function. */
// |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
// already been checked against the MAC size at the top of the function.
assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
}
size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);

/* At this point, if the padding is valid, the first |data_plus_mac_len| bytes
* after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
* still large enough to extract a MAC, but it will be irrelevant. */
// At this point, if the padding is valid, the first |data_plus_mac_len| bytes
// after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
// still large enough to extract a MAC, but it will be irrelevant.

/* To allow for CBC mode which changes cipher length, |ad| doesn't include the
* length for legacy ciphers. */
// To allow for CBC mode which changes cipher length, |ad| doesn't include the
// length for legacy ciphers.
uint8_t ad_fixed[13];
OPENSSL_memcpy(ad_fixed, ad, 11);
ad_fixed[11] = (uint8_t)(data_len >> 8);
ad_fixed[12] = (uint8_t)(data_len & 0xff);
ad_len += 2;

/* Compute the MAC and extract the one in the record. */
// Compute the MAC and extract the one in the record.
uint8_t mac[EVP_MAX_MD_SIZE];
size_t mac_len;
uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
@@ -351,8 +351,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
record_mac = record_mac_tmp;
EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
} else {
/* We should support the constant-time path for all CBC-mode ciphers
* implemented. */
// We should support the constant-time path for all CBC-mode ciphers
// implemented.
assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);

unsigned mac_len_u;
@@ -368,10 +368,10 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
record_mac = &out[data_len];
}

/* Perform the MAC check and the padding check in constant-time. It should be
* safe to simply perform the padding check first, but it would not be under a
* different choice of MAC location on padding failure. See
* EVP_tls_cbc_remove_padding. */
// Perform the MAC check and the padding check in constant-time. It should be
// safe to simply perform the padding check first, but it would not be under a
// different choice of MAC location on padding failure. See
// EVP_tls_cbc_remove_padding.
crypto_word_t good =
constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0);
good &= padding_ok;
@@ -380,7 +380,7 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
return 0;
}

/* End of timing-sensitive code. */
// End of timing-sensitive code.

*out_len = data_len;
return 1;
@@ -474,172 +474,172 @@ static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
}

static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
16, /* nonce len (IV) */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128)
16, // nonce len (IV)
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_128_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
0, /* nonce len */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_128_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
aead_tls_get_iv, /* get_iv */
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
16, /* nonce len (IV) */
16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
SHA256_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128)
16, // nonce len (IV)
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_128_cbc_sha256_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
16, /* nonce len (IV) */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256)
16, // nonce len (IV)
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_256_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
0, /* nonce len */
16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_256_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
aead_tls_get_iv, /* get_iv */
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
16, /* nonce len (IV) */
16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
SHA256_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA256_DIGEST_LENGTH + 32, // key len (SHA256 + AES256)
16, // nonce len (IV)
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_256_cbc_sha256_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
16, /* nonce len (IV) */
16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
SHA384_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA384_DIGEST_LENGTH + 32, // key len (SHA384 + AES256)
16, // nonce len (IV)
16 + SHA384_DIGEST_LENGTH, // overhead (padding + SHA384)
SHA384_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_aes_256_cbc_sha384_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
8, /* nonce len (IV) */
8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES)
8, // nonce len (IV)
8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_des_ede3_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
0, /* nonce len */
8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV)
0, // nonce len
8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
aead_tls_get_iv, /* get_iv */
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
};

static const EVP_AEAD aead_null_sha1_tls = {
SHA_DIGEST_LENGTH, /* key len */
0, /* nonce len */
SHA_DIGEST_LENGTH, /* overhead (SHA1) */
SHA_DIGEST_LENGTH, /* max tag length */
0, /* seal_scatter_supports_extra_in */
SHA_DIGEST_LENGTH, // key len
0, // nonce len
SHA_DIGEST_LENGTH, // overhead (SHA1)
SHA_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in

NULL, /* init */
NULL, // init
aead_null_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, /* open_gather */
NULL, /* get_iv */
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};



+ 40
- 40
crypto/cipher_extra/internal.h Parādīt failu

@@ -66,53 +66,53 @@ extern "C" {
#endif


/* EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC
* record in |in|. This decrypted record should not include any "decrypted"
* explicit IV. If the record is publicly invalid, it returns zero. Otherwise,
* it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the
* padding is valid and zero otherwise. It then sets |*out_len| to the length
* with the padding removed or |in_len| if invalid.
*
* If the function returns one, it runs in time independent of the contents of
* |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying
* |EVP_tls_cbc_copy_mac|'s precondition. */
// EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC
// record in |in|. This decrypted record should not include any "decrypted"
// explicit IV. If the record is publicly invalid, it returns zero. Otherwise,
// it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the
// padding is valid and zero otherwise. It then sets |*out_len| to the length
// with the padding removed or |in_len| if invalid.
//
// If the function returns one, it runs in time independent of the contents of
// |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying
// |EVP_tls_cbc_copy_mac|'s precondition.
int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
const uint8_t *in, size_t in_len,
size_t block_size, size_t mac_size);

/* EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first
* |in_len| bytes of |in| to |out| in constant time (independent of the concrete
* value of |in_len|, which may vary within a 256-byte window). |in| must point
* to a buffer of |orig_len| bytes.
*
* On entry:
* orig_len >= in_len >= md_size
* md_size <= EVP_MAX_MD_SIZE */
// EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first
// |in_len| bytes of |in| to |out| in constant time (independent of the concrete
// value of |in_len|, which may vary within a 256-byte window). |in| must point
// to a buffer of |orig_len| bytes.
//
// On entry:
// orig_len >= in_len >= md_size
// md_size <= EVP_MAX_MD_SIZE
void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
size_t in_len, size_t orig_len);

/* EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function
* which EVP_tls_cbc_digest_record supports. */
// EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function
// which EVP_tls_cbc_digest_record supports.
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md);

/* EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS
* record.
*
* md: the hash function used in the HMAC.
* EVP_tls_cbc_record_digest_supported must return true for this hash.
* md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written.
* md_out_size: the number of output bytes is written here.
* header: the 13-byte, TLS record header.
* data: the record data itself
* data_plus_mac_size: the secret, reported length of the data and MAC
* once the padding has been removed.
* data_plus_mac_plus_padding_size: the public length of the whole
* record, including padding.
*
* On entry: by virtue of having been through one of the remove_padding
* functions, above, we know that data_plus_mac_size is large enough to contain
* a padding byte and MAC. (If the padding was invalid, it might contain the
* padding too. ) */
// EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS
// record.
//
// md: the hash function used in the HMAC.
// EVP_tls_cbc_record_digest_supported must return true for this hash.
// md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written.
// md_out_size: the number of output bytes is written here.
// header: the 13-byte, TLS record header.
// data: the record data itself
// data_plus_mac_size: the secret, reported length of the data and MAC
// once the padding has been removed.
// data_plus_mac_plus_padding_size: the public length of the whole
// record, including padding.
//
// On entry: by virtue of having been through one of the remove_padding
// functions, above, we know that data_plus_mac_size is large enough to contain
// a padding byte and MAC. (If the padding was invalid, it might contain the
// padding too. )
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
size_t *md_out_size, const uint8_t header[13],
const uint8_t *data, size_t data_plus_mac_size,
@@ -122,7 +122,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H */
#endif // OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H

+ 104
- 104
crypto/cipher_extra/tls_cbc.c Parādīt failu

@@ -62,13 +62,13 @@
#include "../fipsmodule/cipher/internal.h"


/* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length
* field. (SHA-384/512 have 128-bit length.) */
// MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length
// field. (SHA-384/512 have 128-bit length.)
#define MAX_HASH_BIT_COUNT_BYTES 16

/* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support.
* Currently SHA-384/512 has a 128-byte block size and that's the largest
* supported by TLS.) */
// MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support.
// Currently SHA-384/512 has a 128-byte block size and that's the largest
// supported by TLS.)
#define MAX_HASH_BLOCK_SIZE 128

int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
@@ -76,7 +76,7 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
size_t block_size, size_t mac_size) {
const size_t overhead = 1 /* padding length byte */ + mac_size;

/* These lengths are all public so we can test them in non-constant time. */
// These lengths are all public so we can test them in non-constant time.
if (overhead > in_len) {
return 0;
}
@@ -84,16 +84,16 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
size_t padding_length = in[in_len - 1];

crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length);
/* The padding consists of a length byte at the end of the record and
* then that many bytes of padding, all with the same value as the
* length byte. Thus, with the length byte included, there are i+1
* bytes of padding.
*
* We can't check just |padding_length+1| bytes because that leaks
* decrypted information. Therefore we always have to check the maximum
* amount of padding possible. (Again, the length of the record is
* public information so we can use it.) */
size_t to_check = 256; /* maximum amount of padding, inc length byte. */
// The padding consists of a length byte at the end of the record and
// then that many bytes of padding, all with the same value as the
// length byte. Thus, with the length byte included, there are i+1
// bytes of padding.
//
// We can't check just |padding_length+1| bytes because that leaks
// decrypted information. Therefore we always have to check the maximum
// amount of padding possible. (Again, the length of the record is
// public information so we can use it.)
size_t to_check = 256; // maximum amount of padding, inc length byte.
if (to_check > in_len) {
to_check = in_len;
}
@@ -101,19 +101,19 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
for (size_t i = 0; i < to_check; i++) {
uint8_t mask = constant_time_ge_8(padding_length, i);
uint8_t b = in[in_len - 1 - i];
/* The final |padding_length+1| bytes should all have the value
* |padding_length|. Therefore the XOR should be zero. */
// The final |padding_length+1| bytes should all have the value
// |padding_length|. Therefore the XOR should be zero.
good &= ~(mask & (padding_length ^ b));
}

/* If any of the final |padding_length+1| bytes had the wrong value,
* one or more of the lower eight bits of |good| will be cleared. */
// If any of the final |padding_length+1| bytes had the wrong value,
// one or more of the lower eight bits of |good| will be cleared.
good = constant_time_eq_w(0xff, good & 0xff);

/* Always treat |padding_length| as zero on error. If, assuming block size of
* 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16
* and returned -1, distinguishing good MAC and bad padding from bad MAC and
* bad padding would give POODLE's padding oracle. */
// Always treat |padding_length| as zero on error. If, assuming block size of
// 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16
// and returned -1, distinguishing good MAC and bad padding from bad MAC and
// bad padding would give POODLE's padding oracle.
padding_length = good & (padding_length + 1);
*out_len = in_len - padding_length;
*out_padding_ok = good;
@@ -126,7 +126,7 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
uint8_t *rotated_mac = rotated_mac1;
uint8_t *rotated_mac_tmp = rotated_mac2;

/* mac_end is the index of |in| just after the end of the MAC. */
// mac_end is the index of |in| just after the end of the MAC.
size_t mac_end = in_len;
size_t mac_start = mac_end - md_size;

@@ -134,10 +134,10 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
assert(in_len >= md_size);
assert(md_size <= EVP_MAX_MD_SIZE);

/* scan_start contains the number of bytes that we can ignore because
* the MAC's position can only vary by 255 bytes. */
// scan_start contains the number of bytes that we can ignore because
// the MAC's position can only vary by 255 bytes.
size_t scan_start = 0;
/* This information is public so it's safe to branch based on it. */
// This information is public so it's safe to branch based on it.
if (orig_len > md_size + 255 + 1) {
scan_start = orig_len - (md_size + 255 + 1);
}
@@ -153,15 +153,15 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
mac_started |= is_mac_start;
uint8_t mac_ended = constant_time_ge_8(i, mac_end);
rotated_mac[j] |= in[i] & mac_started & ~mac_ended;
/* Save the offset that |mac_start| is mapped to. */
// Save the offset that |mac_start| is mapped to.
rotate_offset |= j & is_mac_start;
}

/* Now rotate the MAC. We rotate in log(md_size) steps, one for each bit
* position. */
// Now rotate the MAC. We rotate in log(md_size) steps, one for each bit
// position.
for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) {
/* Rotate by |offset| iff the corresponding bit is set in
* |rotate_offset|, placing the result in |rotated_mac_tmp|. */
// Rotate by |offset| iff the corresponding bit is set in
// |rotate_offset|, placing the result in |rotated_mac_tmp|.
const uint8_t skip_rotate = (rotate_offset & 1) - 1;
for (size_t i = 0, j = offset; i < md_size; i++, j++) {
if (j >= md_size) {
@@ -171,9 +171,9 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]);
}

/* Swap pointers so |rotated_mac| contains the (possibly) rotated value.
* Note the number of iterations and thus the identity of these pointers is
* public information. */
// Swap pointers so |rotated_mac| contains the (possibly) rotated value.
// Note the number of iterations and thus the identity of these pointers is
// public information.
uint8_t *tmp = rotated_mac;
rotated_mac = rotated_mac_tmp;
rotated_mac_tmp = tmp;
@@ -182,8 +182,8 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
OPENSSL_memcpy(out, rotated_mac, md_size);
}

/* u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in
* big-endian order. The value of p is advanced by four. */
// u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in
// big-endian order. The value of p is advanced by four.
#define u32toBE(n, p) \
do { \
*((p)++) = (uint8_t)((n) >> 24); \
@@ -192,8 +192,8 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
*((p)++) = (uint8_t)((n)); \
} while (0)

/* u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in
* big-endian order. The value of p is advanced by eight. */
// u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in
// big-endian order. The value of p is advanced by eight.
#define u64toBE(n, p) \
do { \
*((p)++) = (uint8_t)((n) >> 56); \
@@ -224,9 +224,9 @@ static void tls1_sha512_transform(HASH_CTX *ctx, const uint8_t *block) {
SHA512_Transform(&ctx->sha512, block);
}

/* These functions serialize the state of a hash and thus perform the standard
* "final" operation without adding the padding and length that such a function
* typically does. */
// These functions serialize the state of a hash and thus perform the standard
// "final" operation without adding the padding and length that such a function
// typically does.
static void tls1_sha1_final_raw(HASH_CTX *ctx, uint8_t *md_out) {
SHA_CTX *sha1 = &ctx->sha1;
u32toBE(sha1->h[0], md_out);
@@ -272,13 +272,13 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out);
void (*md_transform)(HASH_CTX *ctx, const uint8_t *block);
unsigned md_size, md_block_size = 64;
/* md_length_size is the number of bytes in the length field that terminates
* the hash. */
// md_length_size is the number of bytes in the length field that terminates
// the hash.
unsigned md_length_size = 8;

/* Bound the acceptable input so we can forget about many possible overflows
* later in this function. This is redundant with the record size limits in
* TLS. */
// Bound the acceptable input so we can forget about many possible overflows
// later in this function. This is redundant with the record size limits in
// TLS.
if (data_plus_mac_plus_padding_size >= 1024 * 1024) {
assert(0);
return 0;
@@ -309,8 +309,8 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
break;

default:
/* EVP_tls_cbc_record_digest_supported should have been called first to
* check that the hash function is supported. */
// EVP_tls_cbc_record_digest_supported should have been called first to
// check that the hash function is supported.
assert(0);
*md_out_size = 0;
return 0;
@@ -322,45 +322,45 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,

static const size_t kHeaderLength = 13;

/* kVarianceBlocks is the number of blocks of the hash that we have to
* calculate in constant time because they could be altered by the
* padding value.
*
* TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not
* required to be minimal. Therefore we say that the final six blocks
* can vary based on the padding. */
// kVarianceBlocks is the number of blocks of the hash that we have to
// calculate in constant time because they could be altered by the
// padding value.
//
// TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not
// required to be minimal. Therefore we say that the final six blocks
// can vary based on the padding.
static const size_t kVarianceBlocks = 6;

/* From now on we're dealing with the MAC, which conceptually has 13
* bytes of `header' before the start of the data. */
// From now on we're dealing with the MAC, which conceptually has 13
// bytes of `header' before the start of the data.
size_t len = data_plus_mac_plus_padding_size + kHeaderLength;
/* max_mac_bytes contains the maximum bytes of bytes in the MAC, including
* |header|, assuming that there's no padding. */
// max_mac_bytes contains the maximum bytes of bytes in the MAC, including
// |header|, assuming that there's no padding.
size_t max_mac_bytes = len - md_size - 1;
/* num_blocks is the maximum number of hash blocks. */
// num_blocks is the maximum number of hash blocks.
size_t num_blocks =
(max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size;
/* In order to calculate the MAC in constant time we have to handle
* the final blocks specially because the padding value could cause the
* end to appear somewhere in the final |kVarianceBlocks| blocks and we
* can't leak where. However, |num_starting_blocks| worth of data can
* be hashed right away because no padding value can affect whether
* they are plaintext. */
// In order to calculate the MAC in constant time we have to handle
// the final blocks specially because the padding value could cause the
// end to appear somewhere in the final |kVarianceBlocks| blocks and we
// can't leak where. However, |num_starting_blocks| worth of data can
// be hashed right away because no padding value can affect whether
// they are plaintext.
size_t num_starting_blocks = 0;
/* k is the starting byte offset into the conceptual header||data where
* we start processing. */
// k is the starting byte offset into the conceptual header||data where
// we start processing.
size_t k = 0;
/* mac_end_offset is the index just past the end of the data to be
* MACed. */
// mac_end_offset is the index just past the end of the data to be
// MACed.
size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size;
/* c is the index of the 0x80 byte in the final hash block that
* contains application data. */
// c is the index of the 0x80 byte in the final hash block that
// contains application data.
size_t c = mac_end_offset % md_block_size;
/* index_a is the hash block number that contains the 0x80 terminating
* value. */
// index_a is the hash block number that contains the 0x80 terminating
// value.
size_t index_a = mac_end_offset / md_block_size;
/* index_b is the hash block number that contains the 64-bit hash
* length, in bits. */
// index_b is the hash block number that contains the 64-bit hash
// length, in bits.
size_t index_b = (mac_end_offset + md_length_size) / md_block_size;

if (num_blocks > kVarianceBlocks) {
@@ -368,13 +368,13 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
k = md_block_size * num_starting_blocks;
}

/* bits is the hash-length in bits. It includes the additional hash
* block for the masked HMAC key. */
size_t bits = 8 * mac_end_offset; /* at most 18 bits to represent */
// bits is the hash-length in bits. It includes the additional hash
// block for the masked HMAC key.
size_t bits = 8 * mac_end_offset; // at most 18 bits to represent

/* Compute the initial HMAC block. */
// Compute the initial HMAC block.
bits += 8 * md_block_size;
/* hmac_pad is the masked HMAC key. */
// hmac_pad is the masked HMAC key.
uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE];
OPENSSL_memset(hmac_pad, 0, md_block_size);
assert(mac_secret_length <= sizeof(hmac_pad));
@@ -385,7 +385,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,

md_transform(&md_state, hmac_pad);

/* The length check means |bits| fits in four bytes. */
// The length check means |bits| fits in four bytes.
uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES];
OPENSSL_memset(length_bytes, 0, md_length_size - 4);
length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24);
@@ -394,7 +394,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
length_bytes[md_length_size - 1] = (uint8_t)bits;

if (k > 0) {
/* k is a multiple of md_block_size. */
// k is a multiple of md_block_size.
uint8_t first_block[MAX_HASH_BLOCK_SIZE];
OPENSSL_memcpy(first_block, header, 13);
OPENSSL_memcpy(first_block + 13, data, md_block_size - 13);
@@ -407,10 +407,10 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
uint8_t mac_out[EVP_MAX_MD_SIZE];
OPENSSL_memset(mac_out, 0, sizeof(mac_out));

/* We now process the final hash blocks. For each block, we construct
* it in constant time. If the |i==index_a| then we'll include the 0x80
* bytes and zero pad etc. For each block we selectively copy it, in
* constant time, to |mac_out|. */
// We now process the final hash blocks. For each block, we construct
// it in constant time. If the |i==index_a| then we'll include the 0x80
// bytes and zero pad etc. For each block we selectively copy it, in
// constant time, to |mac_out|.
for (size_t i = num_starting_blocks;
i <= num_starting_blocks + kVarianceBlocks; i++) {
uint8_t block[MAX_HASH_BLOCK_SIZE];
@@ -427,24 +427,24 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,

uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c);
uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1);
/* If this is the block containing the end of the
* application data, and we are at the offset for the
* 0x80 value, then overwrite b with 0x80. */
// If this is the block containing the end of the
// application data, and we are at the offset for the
// 0x80 value, then overwrite b with 0x80.
b = constant_time_select_8(is_past_c, 0x80, b);
/* If this the the block containing the end of the
* application data and we're past the 0x80 value then
* just write zero. */
// If this the the block containing the end of the
// application data and we're past the 0x80 value then
// just write zero.
b = b & ~is_past_cp1;
/* If this is index_b (the final block), but not
* index_a (the end of the data), then the 64-bit
* length didn't fit into index_a and we're having to
* add an extra block of zeros. */
// If this is index_b (the final block), but not
// index_a (the end of the data), then the 64-bit
// length didn't fit into index_a and we're having to
// add an extra block of zeros.
b &= ~is_block_b | is_block_a;

/* The final bytes of one of the blocks contains the
* length. */
// The final bytes of one of the blocks contains the
// length.
if (j >= md_block_size - md_length_size) {
/* If this is index_b, write a length byte. */
// If this is index_b, write a length byte.
b = constant_time_select_8(
is_block_b, length_bytes[j - (md_block_size - md_length_size)], b);
}
@@ -453,7 +453,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,

md_transform(&md_state, block);
md_final_raw(&md_state, block);
/* If this is index_b, copy the hash value to |mac_out|. */
// If this is index_b, copy the hash value to |mac_out|.
for (size_t j = 0; j < md_size; j++) {
mac_out[j] |= block[j] & is_block_b;
}
@@ -466,7 +466,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
return 0;
}

/* Complete the HMAC in the standard manner. */
// Complete the HMAC in the standard manner.
for (size_t i = 0; i < md_block_size; i++) {
hmac_pad[i] ^= 0x6a;
}


+ 20
- 20
crypto/cmac/cmac.c Parādīt failu

@@ -60,13 +60,13 @@

struct cmac_ctx_st {
EVP_CIPHER_CTX cipher_ctx;
/* k1 and k2 are the CMAC subkeys. See
* https://tools.ietf.org/html/rfc4493#section-2.3 */
// k1 and k2 are the CMAC subkeys. See
// https://tools.ietf.org/html/rfc4493#section-2.3
uint8_t k1[AES_BLOCK_SIZE];
uint8_t k2[AES_BLOCK_SIZE];
/* Last (possibly partial) scratch */
// Last (possibly partial) scratch
uint8_t block[AES_BLOCK_SIZE];
/* block_used contains the number of valid bytes in |block|. */
// block_used contains the number of valid bytes in |block|.
unsigned block_used;
};

@@ -124,20 +124,20 @@ void CMAC_CTX_free(CMAC_CTX *ctx) {
OPENSSL_free(ctx);
}

/* binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸)
* with a hard-coded reduction polynomial and sets |out| as x times the
* input.
*
* See https://tools.ietf.org/html/rfc4493#section-2.3 */
// binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸)
// with a hard-coded reduction polynomial and sets |out| as x times the
// input.
//
// See https://tools.ietf.org/html/rfc4493#section-2.3
static void binary_field_mul_x(uint8_t out[16], const uint8_t in[16]) {
unsigned i;

/* Shift |in| to left, including carry. */
// Shift |in| to left, including carry.
for (i = 0; i < 15; i++) {
out[i] = (in[i] << 1) | (in[i+1] >> 7);
}

/* If MSB set fixup with R. */
// If MSB set fixup with R.
const uint8_t carry = in[0] >> 7;
out[i] = (in[i] << 1) ^ ((0 - carry) & 0x87);
}
@@ -152,7 +152,7 @@ int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len,
EVP_CIPHER_key_length(cipher) != key_len ||
!EVP_EncryptInit_ex(&ctx->cipher_ctx, cipher, NULL, key, kZeroIV) ||
!EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, AES_BLOCK_SIZE) ||
/* Reset context again ready for first data. */
// Reset context again ready for first data.
!EVP_EncryptInit_ex(&ctx->cipher_ctx, NULL, NULL, NULL, kZeroIV)) {
return 0;
}
@@ -183,11 +183,11 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) {
in_len -= todo;
ctx->block_used += todo;

/* If |in_len| is zero then either |ctx->block_used| is less than
* |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used|
* is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the
* latter case we don't want to process this block now because it might be
* the last block and that block is treated specially. */
// If |in_len| is zero then either |ctx->block_used| is less than
// |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used|
// is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the
// latter case we don't want to process this block now because it might be
// the last block and that block is treated specially.
if (in_len == 0) {
return 1;
}
@@ -199,7 +199,7 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) {
}
}

/* Encrypt all but one of the remaining blocks. */
// Encrypt all but one of the remaining blocks.
while (in_len > AES_BLOCK_SIZE) {
if (!EVP_Cipher(&ctx->cipher_ctx, scratch, in, AES_BLOCK_SIZE)) {
return 0;
@@ -223,8 +223,8 @@ int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len) {
const uint8_t *mask = ctx->k1;

if (ctx->block_used != AES_BLOCK_SIZE) {
/* If the last block is incomplete, terminate it with a single 'one' bit
* followed by zeros. */
// If the last block is incomplete, terminate it with a single 'one' bit
// followed by zeros.
ctx->block[ctx->block_used] = 0x80;
OPENSSL_memset(ctx->block + ctx->block_used + 1, 0,
AES_BLOCK_SIZE - (ctx->block_used + 1));


+ 21
- 21
crypto/conf/conf.c Parādīt failu

@@ -69,8 +69,8 @@
#include "../internal.h"


/* The maximum length we can grow a value to after variable expansion. 64k
* should be more than enough for all reasonable uses. */
// The maximum length we can grow a value to after variable expansion. 64k
// should be more than enough for all reasonable uses.
#define MAX_CONF_VALUE_LENGTH 65536

static uint32_t conf_value_hash(const CONF_VALUE *v) {
@@ -263,7 +263,7 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) {
} else if (IS_EOF(conf, *from)) {
break;
} else if (*from == '$') {
/* try to expand it */
// try to expand it
rrp = NULL;
s = &(from[1]);
if (*s == '{') {
@@ -303,14 +303,14 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) {
}
e++;
}
/* So at this point we have
* np which is the start of the name string which is
* '\0' terminated.
* cp which is the start of the section string which is
* '\0' terminated.
* e is the 'next point after'.
* r and rr are the chars replaced by the '\0'
* rp and rrp is where 'r' and 'rr' came from. */
// So at this point we have
// np which is the start of the name string which is
// '\0' terminated.
// cp which is the start of the section string which is
// '\0' terminated.
// e is the 'next point after'.
// r and rr are the chars replaced by the '\0'
// rp and rrp is where 'r' and 'rr' came from.
p = NCONF_get_string(conf, cp, np);
if (rrp != NULL) {
*rrp = rr;
@@ -566,25 +566,25 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) {
i--;
}
}
/* we removed some trailing stuff so there is a new
* line on the end. */
// we removed some trailing stuff so there is a new
// line on the end.
if (ii && i == ii) {
again = 1; /* long line */
again = 1; // long line
} else {
p[i] = '\0';
eline++; /* another input line */
eline++; // another input line
}

/* we now have a line with trailing \r\n removed */
// we now have a line with trailing \r\n removed

/* i is the number of bytes */
// i is the number of bytes
bufnum += i;

v = NULL;
/* check for line continuation */
// check for line continuation
if (bufnum >= 1) {
/* If we have bytes and the last char '\\' and
* second last char is not '\\' */
// If we have bytes and the last char '\\' and
// second last char is not '\\'
p = &(buff->data[bufnum - 1]);
if (IS_ESC(conf, p[0]) && ((bufnum <= 1) || !IS_ESC(conf, p[-1]))) {
bufnum--;
@@ -600,7 +600,7 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) {
clear_comments(conf, buf);
s = eat_ws(conf, buf);
if (IS_EOF(conf, *s)) {
continue; /* blank line */
continue; // blank line
}
if (*s == '[') {
char *ss;


+ 3
- 3
crypto/conf/internal.h Parādīt failu

@@ -20,12 +20,12 @@ extern "C" {
#endif


/* CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. */
// CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|.
CONF_VALUE *CONF_VALUE_new(void);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H */
#endif // OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H

+ 5
- 5
crypto/cpu-aarch64-linux.c Parādīt failu

@@ -28,8 +28,8 @@ extern uint32_t OPENSSL_armcap_P;
void OPENSSL_cpuid_setup(void) {
unsigned long hwcap = getauxval(AT_HWCAP);

/* See /usr/include/asm/hwcap.h on an aarch64 installation for the source of
* these values. */
// See /usr/include/asm/hwcap.h on an aarch64 installation for the source of
// these values.
static const unsigned long kNEON = 1 << 1;
static const unsigned long kAES = 1 << 3;
static const unsigned long kPMULL = 1 << 4;
@@ -37,8 +37,8 @@ void OPENSSL_cpuid_setup(void) {
static const unsigned long kSHA256 = 1 << 6;

if ((hwcap & kNEON) == 0) {
/* Matching OpenSSL, if NEON is missing, don't report other features
* either. */
// Matching OpenSSL, if NEON is missing, don't report other features
// either.
return;
}

@@ -58,4 +58,4 @@ void OPENSSL_cpuid_setup(void) {
}
}

#endif /* OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP */
#endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP

+ 40
- 40
crypto/cpu-arm-linux.c Parādīt failu

@@ -34,15 +34,15 @@

#define HWCAP_NEON (1 << 12)

/* See /usr/include/asm/hwcap.h on an ARM installation for the source of
* these values. */
// See /usr/include/asm/hwcap.h on an ARM installation for the source of
// these values.
#define HWCAP2_AES (1 << 0)
#define HWCAP2_PMULL (1 << 1)
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)

/* |getauxval| is not available on Android until API level 20. Link it as a weak
* symbol and use other methods as fallback. */
// |getauxval| is not available on Android until API level 20. Link it as a weak
// symbol and use other methods as fallback.
unsigned long getauxval(unsigned long type) __attribute__((weak));

static int open_eintr(const char *path, int flags) {
@@ -61,8 +61,8 @@ static ssize_t read_eintr(int fd, void *out, size_t len) {
return ret;
}

/* read_full reads exactly |len| bytes from |fd| to |out|. On error or end of
* file, it returns zero. */
// read_full reads exactly |len| bytes from |fd| to |out|. On error or end of
// file, it returns zero.
static int read_full(int fd, void *out, size_t len) {
char *outp = out;
while (len > 0) {
@@ -76,9 +76,9 @@ static int read_full(int fd, void *out, size_t len) {
return 1;
}

/* read_file opens |path| and reads until end-of-file. On success, it returns
* one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the
* contents. Otherwise, it returns zero. */
// read_file opens |path| and reads until end-of-file. On success, it returns
// one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the
// contents. Otherwise, it returns zero.
static int read_file(char **out_ptr, size_t *out_len, const char *path) {
int fd = open_eintr(path, O_RDONLY);
if (fd < 0) {
@@ -128,7 +128,7 @@ err:
return ret;
}

/* getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv. */
// getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv.
static unsigned long getauxval_proc(unsigned long type) {
int fd = open_eintr("/proc/self/auxv", O_RDONLY);
if (fd < 0) {
@@ -164,16 +164,16 @@ static int STRING_PIECE_equals(const STRING_PIECE *a, const char *b) {
return a->len == b_len && OPENSSL_memcmp(a->data, b, b_len) == 0;
}

/* STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found,
* sets |*out_left| and |*out_right| to |in| split before and after it. It
* returns one if |sep| was found and zero otherwise. */
// STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found,
// sets |*out_left| and |*out_right| to |in| split before and after it. It
// returns one if |sep| was found and zero otherwise.
static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right,
const STRING_PIECE *in, char sep) {
const char *p = OPENSSL_memchr(in->data, sep, in->len);
if (p == NULL) {
return 0;
}
/* |out_left| or |out_right| may alias |in|, so make a copy. */
// |out_left| or |out_right| may alias |in|, so make a copy.
STRING_PIECE in_copy = *in;
out_left->data = in_copy.data;
out_left->len = p - in_copy.data;
@@ -182,7 +182,7 @@ static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right,
return 1;
}

/* STRING_PIECE_trim removes leading and trailing whitespace from |s|. */
// STRING_PIECE_trim removes leading and trailing whitespace from |s|.
static void STRING_PIECE_trim(STRING_PIECE *s) {
while (s->len != 0 && (s->data[0] == ' ' || s->data[0] == '\t')) {
s->data++;
@@ -194,12 +194,12 @@ static void STRING_PIECE_trim(STRING_PIECE *s) {
}
}

/* extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from
* |in|. If found, it sets |*out| to the value and returns one. Otherwise, it
* returns zero. */
// extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from
// |in|. If found, it sets |*out| to the value and returns one. Otherwise, it
// returns zero.
static int extract_cpuinfo_field(STRING_PIECE *out, const STRING_PIECE *in,
const char *field) {
/* Process |in| one line at a time. */
// Process |in| one line at a time.
STRING_PIECE remaining = *in, line;
while (STRING_PIECE_split(&line, &remaining, &remaining, '\n')) {
STRING_PIECE key, value;
@@ -224,8 +224,8 @@ static int cpuinfo_field_equals(const STRING_PIECE *cpuinfo, const char *field,
STRING_PIECE_equals(&extracted, value);
}

/* has_list_item treats |list| as a space-separated list of items and returns
* one if |item| is contained in |list| and zero otherwise. */
// has_list_item treats |list| as a space-separated list of items and returns
// one if |item| is contained in |list| and zero otherwise.
static int has_list_item(const STRING_PIECE *list, const char *item) {
STRING_PIECE remaining = *list, feature;
while (STRING_PIECE_split(&feature, &remaining, &remaining, ' ')) {
@@ -238,11 +238,11 @@ static int has_list_item(const STRING_PIECE *list, const char *item) {

static unsigned long get_hwcap_cpuinfo(const STRING_PIECE *cpuinfo) {
if (cpuinfo_field_equals(cpuinfo, "CPU architecture", "8")) {
/* This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always
* available on ARMv8. Linux omits required features, so reading the
* "Features" line does not work. (For simplicity, use strict equality. We
* assume everything running on future ARM architectures will have a
* working |getauxval|.) */
// This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always
// available on ARMv8. Linux omits required features, so reading the
// "Features" line does not work. (For simplicity, use strict equality. We
// assume everything running on future ARM architectures will have a
// working |getauxval|.)
return HWCAP_NEON;
}

@@ -276,8 +276,8 @@ static unsigned long get_hwcap2_cpuinfo(const STRING_PIECE *cpuinfo) {
return ret;
}

/* has_broken_neon returns one if |in| matches a CPU known to have a broken
* NEON unit. See https://crbug.com/341598. */
// has_broken_neon returns one if |in| matches a CPU known to have a broken
// NEON unit. See https://crbug.com/341598.
static int has_broken_neon(const STRING_PIECE *cpuinfo) {
return cpuinfo_field_equals(cpuinfo, "CPU implementer", "0x51") &&
cpuinfo_field_equals(cpuinfo, "CPU architecture", "7") &&
@@ -300,13 +300,13 @@ void OPENSSL_cpuid_setup(void) {
cpuinfo.data = cpuinfo_data;
cpuinfo.len = cpuinfo_len;

/* |getauxval| is not available on Android until API level 20. If it is
* unavailable, read from /proc/self/auxv as a fallback. This is unreadable
* on some versions of Android, so further fall back to /proc/cpuinfo.
*
* See
* https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2
* and b/13679666 (Google-internal) for details. */
// |getauxval| is not available on Android until API level 20. If it is
// unavailable, read from /proc/self/auxv as a fallback. This is unreadable
// on some versions of Android, so further fall back to /proc/cpuinfo.
//
// See
// https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2
// and b/13679666 (Google-internal) for details.
unsigned long hwcap = 0;
if (getauxval != NULL) {
hwcap = getauxval(AT_HWCAP);
@@ -318,18 +318,18 @@ void OPENSSL_cpuid_setup(void) {
hwcap = get_hwcap_cpuinfo(&cpuinfo);
}

/* Clear NEON support if known broken. */
// Clear NEON support if known broken.
g_has_broken_neon = has_broken_neon(&cpuinfo);
if (g_has_broken_neon) {
hwcap &= ~HWCAP_NEON;
}

/* Matching OpenSSL, only report other features if NEON is present. */
// Matching OpenSSL, only report other features if NEON is present.
if (hwcap & HWCAP_NEON) {
OPENSSL_armcap_P |= ARMV7_NEON;

/* Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to
* /proc/cpuinfo. See https://crbug.com/596156. */
// Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to
// /proc/cpuinfo. See https://crbug.com/596156.
unsigned long hwcap2 = 0;
if (getauxval != NULL) {
hwcap2 = getauxval(AT_HWCAP2);
@@ -357,4 +357,4 @@ void OPENSSL_cpuid_setup(void) {

int CRYPTO_has_broken_NEON(void) { return g_has_broken_neon; }

#endif /* OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP */
#endif // OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP

+ 51
- 51
crypto/cpu-intel.c Parādīt failu

@@ -78,9 +78,9 @@ OPENSSL_MSVC_PRAGMA(warning(pop))
#include "internal.h"


/* OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX
* is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through
* |*out_edx|. */
// OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX
// is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through
// |*out_edx|.
static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx,
uint32_t *out_ecx, uint32_t *out_edx, uint32_t leaf) {
#if defined(_MSC_VER)
@@ -91,8 +91,8 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx,
*out_ecx = (uint32_t)tmp[2];
*out_edx = (uint32_t)tmp[3];
#elif defined(__pic__) && defined(OPENSSL_32_BIT)
/* Inline assembly may not clobber the PIC register. For 32-bit, this is EBX.
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. */
// Inline assembly may not clobber the PIC register. For 32-bit, this is EBX.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602.
__asm__ volatile (
"xor %%ecx, %%ecx\n"
"mov %%ebx, %%edi\n"
@@ -111,8 +111,8 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx,
#endif
}

/* OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR).
* Currently only XCR0 is defined by Intel so |xcr| should always be zero. */
// OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR).
// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
static uint64_t OPENSSL_xgetbv(uint32_t xcr) {
#if defined(_MSC_VER)
return (uint64_t)_xgetbv(xcr);
@@ -123,8 +123,8 @@ static uint64_t OPENSSL_xgetbv(uint32_t xcr) {
#endif
}

/* handle_cpu_env applies the value from |in| to the CPUID values in |out[0]|
* and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. */
// handle_cpu_env applies the value from |in| to the CPUID values in |out[0]|
// and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this.
static void handle_cpu_env(uint32_t *out, const char *in) {
const int invert = in[0] == '~';
uint64_t v;
@@ -143,7 +143,7 @@ static void handle_cpu_env(uint32_t *out, const char *in) {
}

void OPENSSL_cpuid_setup(void) {
/* Determine the vendor and maximum input value. */
// Determine the vendor and maximum input value.
uint32_t eax, ebx, ecx, edx;
OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0);

@@ -158,8 +158,8 @@ void OPENSSL_cpuid_setup(void) {

int has_amd_xop = 0;
if (is_amd) {
/* AMD-specific logic.
* See http://developer.amd.com/wordpress/media/2012/10/254811.pdf */
// AMD-specific logic.
// See http://developer.amd.com/wordpress/media/2012/10/254811.pdf
OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0x80000000);
uint32_t num_extended_ids = eax;
if (num_extended_ids >= 0x80000001) {
@@ -176,23 +176,23 @@ void OPENSSL_cpuid_setup(void) {
extended_features = ebx;
}

/* Determine the number of cores sharing an L1 data cache to adjust the
* hyper-threading bit. */
// Determine the number of cores sharing an L1 data cache to adjust the
// hyper-threading bit.
uint32_t cores_per_cache = 0;
if (is_amd) {
/* AMD CPUs never share an L1 data cache between threads but do set the HTT
* bit on multi-core CPUs. */
// AMD CPUs never share an L1 data cache between threads but do set the HTT
// bit on multi-core CPUs.
cores_per_cache = 1;
} else if (num_ids >= 4) {
/* TODO(davidben): The Intel manual says this CPUID leaf enumerates all
* caches using ECX and doesn't say which is first. Does this matter? */
// TODO(davidben): The Intel manual says this CPUID leaf enumerates all
// caches using ECX and doesn't say which is first. Does this matter?
OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 4);
cores_per_cache = 1 + ((eax >> 14) & 0xfff);
}

OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 1);

/* Adjust the hyper-threading bit. */
// Adjust the hyper-threading bit.
if (edx & (1 << 28)) {
uint32_t num_logical_cores = (ebx >> 16) & 0xff;
if (cores_per_cache == 1 || num_logical_cores <= 1) {
@@ -200,17 +200,17 @@ void OPENSSL_cpuid_setup(void) {
}
}

/* Reserved bit #20 was historically repurposed to control the in-memory
* representation of RC4 state. Always set it to zero. */
// Reserved bit #20 was historically repurposed to control the in-memory
// representation of RC4 state. Always set it to zero.
edx &= ~(1 << 20);

/* Reserved bit #30 is repurposed to signal an Intel CPU. */
// Reserved bit #30 is repurposed to signal an Intel CPU.
if (is_intel) {
edx |= (1 << 30);

/* Clear the XSAVE bit on Knights Landing to mimic Silvermont. This enables
* some Silvermont-specific codepaths which perform better. See OpenSSL
* commit 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. */
// Clear the XSAVE bit on Knights Landing to mimic Silvermont. This enables
// some Silvermont-specific codepaths which perform better. See OpenSSL
// commit 64d92d74985ebb3d0be58a9718f9e080a14a8e7f.
if ((eax & 0x0fff0ff0) == 0x00050670 /* Knights Landing */ ||
(eax & 0x0fff0ff0) == 0x00080650 /* Knights Mill (per SDE) */) {
ecx &= ~(1 << 26);
@@ -219,7 +219,7 @@ void OPENSSL_cpuid_setup(void) {
edx &= ~(1 << 30);
}

/* The SDBG bit is repurposed to denote AMD XOP support. */
// The SDBG bit is repurposed to denote AMD XOP support.
if (has_amd_xop) {
ecx |= (1 << 11);
} else {
@@ -228,31 +228,31 @@ void OPENSSL_cpuid_setup(void) {

uint64_t xcr0 = 0;
if (ecx & (1 << 27)) {
/* XCR0 may only be queried if the OSXSAVE bit is set. */
// XCR0 may only be queried if the OSXSAVE bit is set.
xcr0 = OPENSSL_xgetbv(0);
}
/* See Intel manual, volume 1, section 14.3. */
// See Intel manual, volume 1, section 14.3.
if ((xcr0 & 6) != 6) {
/* YMM registers cannot be used. */
ecx &= ~(1 << 28); /* AVX */
ecx &= ~(1 << 12); /* FMA */
ecx &= ~(1 << 11); /* AMD XOP */
/* Clear AVX2 and AVX512* bits.
*
* TODO(davidben): Should bits 17 and 26-28 also be cleared? Upstream
* doesn't clear those. */
// YMM registers cannot be used.
ecx &= ~(1 << 28); // AVX
ecx &= ~(1 << 12); // FMA
ecx &= ~(1 << 11); // AMD XOP
// Clear AVX2 and AVX512* bits.
//
// TODO(davidben): Should bits 17 and 26-28 also be cleared? Upstream
// doesn't clear those.
extended_features &=
~((1 << 5) | (1 << 16) | (1 << 21) | (1 << 30) | (1 << 31));
}
/* See Intel manual, volume 1, section 15.2. */
// See Intel manual, volume 1, section 15.2.
if ((xcr0 & 0xe6) != 0xe6) {
/* Clear AVX512F. Note we don't touch other AVX512 extensions because they
* can be used with YMM. */
// Clear AVX512F. Note we don't touch other AVX512 extensions because they
// can be used with YMM.
extended_features &= ~(1 << 16);
}

/* Disable ADX instructions on Knights Landing. See OpenSSL commit
* 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. */
// Disable ADX instructions on Knights Landing. See OpenSSL commit
// 64d92d74985ebb3d0be58a9718f9e080a14a8e7f.
if ((ecx & (1 << 26)) == 0) {
extended_features &= ~(1 << 19);
}
@@ -268,15 +268,15 @@ void OPENSSL_cpuid_setup(void) {
return;
}

/* OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'.
* Each value is a 64-bit, unsigned value which may start with "0x" to
* indicate a hex value. Prior to the 64-bit value, a '~' may be given.
*
* If '~' isn't present, then the value is taken as the result of the CPUID.
* Otherwise the value is inverted and ANDed with the probed CPUID result.
*
* The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2]
* and [3]. */
// OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'.
// Each value is a 64-bit, unsigned value which may start with "0x" to
// indicate a hex value. Prior to the 64-bit value, a '~' may be given.
//
// If '~' isn't present, then the value is taken as the result of the CPUID.
// Otherwise the value is inverted and ANDed with the probed CPUID result.
//
// The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2]
// and [3].

handle_cpu_env(&OPENSSL_ia32cap_P[0], env1);
env2 = strchr(env1, ':');
@@ -285,4 +285,4 @@ void OPENSSL_cpuid_setup(void) {
}
}

#endif /* !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) */
#endif // !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64)

+ 3
- 3
crypto/cpu-ppc64le.c Parādīt failu

@@ -22,8 +22,8 @@


#if !defined(PPC_FEATURE2_HAS_VCRYPTO)
/* PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER
* ABI for Linux Supplement”. */
// PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER
// ABI for Linux Supplement”.
#define PPC_FEATURE2_HAS_VCRYPTO 0x02000000
#endif

@@ -35,4 +35,4 @@ int CRYPTO_is_PPC64LE_vcrypto_capable(void) {
return (OPENSSL_ppc64le_hwcap2 & PPC_FEATURE2_HAS_VCRYPTO) != 0;
}

#endif /* OPENSSL_PPC64LE */
#endif // OPENSSL_PPC64LE

+ 28
- 28
crypto/crypto.c Parādīt failu

@@ -23,14 +23,14 @@
(defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \
defined(OPENSSL_PPC64LE))
/* x86, x86_64, the ARMs and ppc64le need to record the result of a
* cpuid/getauxval call for the asm to work correctly, unless compiled without
* asm code. */
// x86, x86_64, the ARMs and ppc64le need to record the result of a
// cpuid/getauxval call for the asm to work correctly, unless compiled without
// asm code.
#define NEED_CPUID

#else

/* Otherwise, don't emit a static initialiser. */
// Otherwise, don't emit a static initialiser.

#if !defined(BORINGSSL_NO_STATIC_INITIALIZER)
#define BORINGSSL_NO_STATIC_INITIALIZER
@@ -40,23 +40,23 @@
OPENSSL_ARM || OPENSSL_AARCH64) */


/* The capability variables are defined in this file in order to work around a
* linker bug. When linking with a .a, if no symbols in a .o are referenced
* then the .o is discarded, even if it has constructor functions.
*
* This still means that any binaries that don't include some functionality
* that tests the capability values will still skip the constructor but, so
* far, the init constructor function only sets the capability variables. */
// The capability variables are defined in this file in order to work around a
// linker bug. When linking with a .a, if no symbols in a .o are referenced
// then the .o is discarded, even if it has constructor functions.
//
// This still means that any binaries that don't include some functionality
// that tests the capability values will still skip the constructor but, so
// far, the init constructor function only sets the capability variables.

#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)

/* This value must be explicitly initialised to zero in order to work around a
* bug in libtool or the linker on OS X.
*
* If not initialised then it becomes a "common symbol". When put into an
* archive, linking on OS X will fail to resolve common symbols. By
* initialising it to zero, it becomes a "data symbol", which isn't so
* affected. */
// This value must be explicitly initialised to zero in order to work around a
// bug in libtool or the linker on OS X.
//
// If not initialised then it becomes a "common symbol". When put into an
// archive, linking on OS X will fail to resolve common symbols. By
// initialising it to zero, it becomes a "data symbol", which isn't so
// affected.
uint32_t OPENSSL_ia32cap_P[4] = {0};

#elif defined(OPENSSL_PPC64LE)
@@ -94,8 +94,8 @@ uint32_t OPENSSL_armcap_P = 0;
#endif

#if defined(BORINGSSL_FIPS)
/* In FIPS mode, the power-on self-test function calls |CRYPTO_library_init|
* because we have to ensure that CPUID detection occurs first. */
// In FIPS mode, the power-on self-test function calls |CRYPTO_library_init|
// because we have to ensure that CPUID detection occurs first.
#define BORINGSSL_NO_STATIC_INITIALIZER
#endif

@@ -116,21 +116,21 @@ __declspec(allocate(".CRT$XCU")) void(*library_init_constructor)(void) =
static void do_library_init(void) __attribute__ ((constructor));
#endif

/* do_library_init is the actual initialization function. If
* BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static
* initializer. Otherwise, it is called by CRYPTO_library_init. */
// do_library_init is the actual initialization function. If
// BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static
// initializer. Otherwise, it is called by CRYPTO_library_init.
static void OPENSSL_CDECL do_library_init(void) {
/* WARNING: this function may only configure the capability variables. See the
* note above about the linker bug. */
// WARNING: this function may only configure the capability variables. See the
// note above about the linker bug.
#if defined(NEED_CPUID)
OPENSSL_cpuid_setup();
#endif
}

void CRYPTO_library_init(void) {
/* TODO(davidben): It would be tidier if this build knob could be replaced
* with an internal lazy-init mechanism that would handle things correctly
* in-library. https://crbug.com/542879 */
// TODO(davidben): It would be tidier if this build knob could be replaced
// with an internal lazy-init mechanism that would handle things correctly
// in-library. https://crbug.com/542879
#if defined(BORINGSSL_NO_STATIC_INITIALIZER)
CRYPTO_once(&once, do_library_init);
#endif


+ 269
- 269
crypto/curve25519/curve25519.c Parādīt failu

@@ -12,12 +12,12 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */

/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
* 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
* public domain but this file has the ISC license just to keep licencing
* simple.
*
* The field functions are shared by Ed25519 and X25519 where possible. */
// This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
// public domain but this file has the ISC license just to keep licencing
// simple.
//
// The field functions are shared by Ed25519 and X25519 where possible.

#include <openssl/curve25519.h>

@@ -55,7 +55,7 @@ static uint64_t load_4(const uint8_t *in) {
}

static void fe_frombytes(fe h, const uint8_t *s) {
/* Ignores top bit of h. */
// Ignores top bit of h.
int64_t h0 = load_4(s);
int64_t h1 = load_3(s + 4) << 6;
int64_t h2 = load_3(s + 7) << 5;
@@ -101,28 +101,28 @@ static void fe_frombytes(fe h, const uint8_t *s) {
h[9] = h9;
}

/* Preconditions:
* |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
*
* Write p=2^255-19; q=floor(h/p).
* Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
*
* Proof:
* Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
* Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4.
*
* Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
* Then 0<y<1.
*
* Write r=h-pq.
* Have 0<=r<=p-1=2^255-20.
* Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
*
* Write x=r+19(2^-255)r+y.
* Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
*
* Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
* so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q. */
// Preconditions:
// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Write p=2^255-19; q=floor(h/p).
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
//
// Proof:
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
// Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4.
//
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
// Then 0<y<1.
//
// Write r=h-pq.
// Have 0<=r<=p-1=2^255-20.
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
//
// Write x=r+19(2^-255)r+y.
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
//
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
static void fe_tobytes(uint8_t *s, const fe h) {
int32_t h0 = h[0];
int32_t h1 = h[1];
@@ -148,9 +148,9 @@ static void fe_tobytes(uint8_t *s, const fe h) {
q = (h8 + q) >> 26;
q = (h9 + q) >> 25;

/* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
h0 += 19 * q;
/* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.

h1 += h0 >> 26; h0 &= kBottom26Bits;
h2 += h1 >> 25; h1 &= kBottom25Bits;
@@ -162,12 +162,12 @@ static void fe_tobytes(uint8_t *s, const fe h) {
h8 += h7 >> 25; h7 &= kBottom25Bits;
h9 += h8 >> 26; h8 &= kBottom26Bits;
h9 &= kBottom25Bits;
/* h10 = carry9 */
// h10 = carry9

/* Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
* Have h0+...+2^230 h9 between 0 and 2^255-1;
* evidently 2^255 h10-2^255 q = 0.
* Goal: Output h0+...+2^230 h9. */
// Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
// Have h0+...+2^230 h9 between 0 and 2^255-1;
// evidently 2^255 h10-2^255 q = 0.
// Goal: Output h0+...+2^230 h9.

s[0] = h0 >> 0;
s[1] = h0 >> 8;
@@ -203,29 +203,29 @@ static void fe_tobytes(uint8_t *s, const fe h) {
s[31] = h9 >> 18;
}

/* h = f */
// h = f
static void fe_copy(fe h, const fe f) {
OPENSSL_memmove(h, f, sizeof(int32_t) * 10);
}

/* h = 0 */
// h = 0
static void fe_0(fe h) { OPENSSL_memset(h, 0, sizeof(int32_t) * 10); }

/* h = 1 */
// h = 1
static void fe_1(fe h) {
OPENSSL_memset(h, 0, sizeof(int32_t) * 10);
h[0] = 1;
}

/* h = f + g
* Can overlap h with f or g.
*
* Preconditions:
* |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
* |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
*
* Postconditions:
* |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
// h = f + g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
static void fe_add(fe h, const fe f, const fe g) {
unsigned i;
for (i = 0; i < 10; i++) {
@@ -233,15 +233,15 @@ static void fe_add(fe h, const fe f, const fe g) {
}
}

/* h = f - g
* Can overlap h with f or g.
*
* Preconditions:
* |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
* |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
*
* Postconditions:
* |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
// h = f - g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
static void fe_sub(fe h, const fe f, const fe g) {
unsigned i;
for (i = 0; i < 10; i++) {
@@ -249,33 +249,33 @@ static void fe_sub(fe h, const fe f, const fe g) {
}
}

/* h = f * g
* Can overlap h with f or g.
*
* Preconditions:
* |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
* |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
*
* Postconditions:
* |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
*
* Notes on implementation strategy:
*
* Using schoolbook multiplication.
* Karatsuba would save a little in some cost models.
*
* Most multiplications by 2 and 19 are 32-bit precomputations;
* cheaper than 64-bit postcomputations.
*
* There is one remaining multiplication by 19 in the carry chain;
* one *19 precomputation can be merged into this,
* but the resulting data flow is considerably less clean.
*
* There are 12 carries below.
* 10 of them are 2-way parallelizable and vectorizable.
* Can get away with 11 carries, but then data flow is much deeper.
*
* With tighter constraints on inputs can squeeze carries into int32. */
// h = f * g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
// |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
//
// Notes on implementation strategy:
//
// Using schoolbook multiplication.
// Karatsuba would save a little in some cost models.
//
// Most multiplications by 2 and 19 are 32-bit precomputations;
// cheaper than 64-bit postcomputations.
//
// There is one remaining multiplication by 19 in the carry chain;
// one *19 precomputation can be merged into this,
// but the resulting data flow is considerably less clean.
//
// There are 12 carries below.
// 10 of them are 2-way parallelizable and vectorizable.
// Can get away with 11 carries, but then data flow is much deeper.
//
// With tighter constraints on inputs can squeeze carries into int32.
static void fe_mul(fe h, const fe f, const fe g) {
int32_t f0 = f[0];
int32_t f1 = f[1];
@@ -297,8 +297,8 @@ static void fe_mul(fe h, const fe f, const fe g) {
int32_t g7 = g[7];
int32_t g8 = g[8];
int32_t g9 = g[9];
int32_t g1_19 = 19 * g1; /* 1.959375*2^29 */
int32_t g2_19 = 19 * g2; /* 1.959375*2^30; still ok */
int32_t g1_19 = 19 * g1; // 1.959375*2^29
int32_t g2_19 = 19 * g2; // 1.959375*2^30; still ok
int32_t g3_19 = 19 * g3;
int32_t g4_19 = 19 * g4;
int32_t g5_19 = 19 * g5;
@@ -432,53 +432,53 @@ static void fe_mul(fe h, const fe f, const fe g) {
int64_t carry8;
int64_t carry9;

/* |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38))
* i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8
* |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19))
* i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 */
// |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38))
// i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8
// |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19))
// i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9

carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
/* |h0| <= 2^25 */
/* |h4| <= 2^25 */
/* |h1| <= 1.71*2^59 */
/* |h5| <= 1.71*2^59 */
// |h0| <= 2^25
// |h4| <= 2^25
// |h1| <= 1.71*2^59
// |h5| <= 1.71*2^59

carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
/* |h1| <= 2^24; from now on fits into int32 */
/* |h5| <= 2^24; from now on fits into int32 */
/* |h2| <= 1.41*2^60 */
/* |h6| <= 1.41*2^60 */
// |h1| <= 2^24; from now on fits into int32
// |h5| <= 2^24; from now on fits into int32
// |h2| <= 1.41*2^60
// |h6| <= 1.41*2^60

carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
/* |h2| <= 2^25; from now on fits into int32 unchanged */
/* |h6| <= 2^25; from now on fits into int32 unchanged */
/* |h3| <= 1.71*2^59 */
/* |h7| <= 1.71*2^59 */
// |h2| <= 2^25; from now on fits into int32 unchanged
// |h6| <= 2^25; from now on fits into int32 unchanged
// |h3| <= 1.71*2^59
// |h7| <= 1.71*2^59

carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
/* |h3| <= 2^24; from now on fits into int32 unchanged */
/* |h7| <= 2^24; from now on fits into int32 unchanged */
/* |h4| <= 1.72*2^34 */
/* |h8| <= 1.41*2^60 */
// |h3| <= 2^24; from now on fits into int32 unchanged
// |h7| <= 2^24; from now on fits into int32 unchanged
// |h4| <= 1.72*2^34
// |h8| <= 1.41*2^60

carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
/* |h4| <= 2^25; from now on fits into int32 unchanged */
/* |h8| <= 2^25; from now on fits into int32 unchanged */
/* |h5| <= 1.01*2^24 */
/* |h9| <= 1.71*2^59 */
// |h4| <= 2^25; from now on fits into int32 unchanged
// |h8| <= 2^25; from now on fits into int32 unchanged
// |h5| <= 1.01*2^24
// |h9| <= 1.71*2^59

carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
/* |h9| <= 2^24; from now on fits into int32 unchanged */
/* |h0| <= 1.1*2^39 */
// |h9| <= 2^24; from now on fits into int32 unchanged
// |h0| <= 1.1*2^39

carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
/* |h0| <= 2^25; from now on fits into int32 unchanged */
/* |h1| <= 1.01*2^24 */
// |h0| <= 2^25; from now on fits into int32 unchanged
// |h1| <= 1.01*2^24

h[0] = h0;
h[1] = h1;
@@ -492,16 +492,16 @@ static void fe_mul(fe h, const fe f, const fe g) {
h[9] = h9;
}

/* h = f * f
* Can overlap h with f.
*
* Preconditions:
* |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
*
* Postconditions:
* |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
*
* See fe_mul.c for discussion of implementation strategy. */
// h = f * f
// Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
//
// See fe_mul.c for discussion of implementation strategy.
static void fe_sq(fe h, const fe f) {
int32_t f0 = f[0];
int32_t f1 = f[1];
@@ -521,11 +521,11 @@ static void fe_sq(fe h, const fe f) {
int32_t f5_2 = 2 * f5;
int32_t f6_2 = 2 * f6;
int32_t f7_2 = 2 * f7;
int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */
int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */
int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */
int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */
int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */
int32_t f5_38 = 38 * f5; // 1.959375*2^30
int32_t f6_19 = 19 * f6; // 1.959375*2^30
int32_t f7_38 = 38 * f7; // 1.959375*2^30
int32_t f8_19 = 19 * f8; // 1.959375*2^30
int32_t f9_38 = 38 * f9; // 1.959375*2^30
int64_t f0f0 = f0 * (int64_t) f0;
int64_t f0f1_2 = f0_2 * (int64_t) f1;
int64_t f0f2_2 = f0_2 * (int64_t) f2;
@@ -691,13 +691,13 @@ static void fe_invert(fe out, const fe z) {
fe_mul(out, t1, t0);
}

/* h = -f
*
* Preconditions:
* |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
*
* Postconditions:
* |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */
// h = -f
//
// Preconditions:
// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
static void fe_neg(fe h, const fe f) {
unsigned i;
for (i = 0; i < 10; i++) {
@@ -705,10 +705,10 @@ static void fe_neg(fe h, const fe f) {
}
}

/* Replace (f,g) with (g,g) if b == 1;
* replace (f,g) with (f,g) if b == 0.
*
* Preconditions: b in {0,1}. */
// Replace (f,g) with (g,g) if b == 1;
// replace (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
static void fe_cmov(fe f, const fe g, unsigned b) {
b = 0-b;
unsigned i;
@@ -719,11 +719,11 @@ static void fe_cmov(fe f, const fe g, unsigned b) {
}
}

/* return 0 if f == 0
* return 1 if f != 0
*
* Preconditions:
* |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
// return 0 if f == 0
// return 1 if f != 0
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
static int fe_isnonzero(const fe f) {
uint8_t s[32];
fe_tobytes(s, f);
@@ -732,27 +732,27 @@ static int fe_isnonzero(const fe f) {
return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0;
}

/* return 1 if f is in {1,3,5,...,q-2}
* return 0 if f is in {0,2,4,...,q-1}
*
* Preconditions:
* |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
// return 1 if f is in {1,3,5,...,q-2}
// return 0 if f is in {0,2,4,...,q-1}
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
static int fe_isnegative(const fe f) {
uint8_t s[32];
fe_tobytes(s, f);
return s[0] & 1;
}

/* h = 2 * f * f
* Can overlap h with f.
*
* Preconditions:
* |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
*
* Postconditions:
* |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
*
* See fe_mul.c for discussion of implementation strategy. */
// h = 2 * f * f
// Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
//
// See fe_mul.c for discussion of implementation strategy.
static void fe_sq2(fe h, const fe f) {
int32_t f0 = f[0];
int32_t f1 = f[1];
@@ -772,11 +772,11 @@ static void fe_sq2(fe h, const fe f) {
int32_t f5_2 = 2 * f5;
int32_t f6_2 = 2 * f6;
int32_t f7_2 = 2 * f7;
int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */
int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */
int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */
int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */
int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */
int32_t f5_38 = 38 * f5; // 1.959375*2^30
int32_t f6_19 = 19 * f6; // 1.959375*2^30
int32_t f7_38 = 38 * f7; // 1.959375*2^30
int32_t f8_19 = 19 * f8; // 1.959375*2^30
int32_t f9_38 = 38 * f9; // 1.959375*2^30
int64_t f0f0 = f0 * (int64_t) f0;
int64_t f0f1_2 = f0_2 * (int64_t) f1;
int64_t f0f2_2 = f0_2 * (int64_t) f2;
@@ -993,24 +993,24 @@ int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s) {
fe_1(h->Z);
fe_sq(u, h->Y);
fe_mul(v, u, d);
fe_sub(u, u, h->Z); /* u = y^2-1 */
fe_add(v, v, h->Z); /* v = dy^2+1 */
fe_sub(u, u, h->Z); // u = y^2-1
fe_add(v, v, h->Z); // v = dy^2+1

fe_sq(v3, v);
fe_mul(v3, v3, v); /* v3 = v^3 */
fe_mul(v3, v3, v); // v3 = v^3
fe_sq(h->X, v3);
fe_mul(h->X, h->X, v);
fe_mul(h->X, h->X, u); /* x = uv^7 */
fe_mul(h->X, h->X, u); // x = uv^7

fe_pow22523(h->X, h->X); /* x = (uv^7)^((q-5)/8) */
fe_pow22523(h->X, h->X); // x = (uv^7)^((q-5)/8)
fe_mul(h->X, h->X, v3);
fe_mul(h->X, h->X, u); /* x = uv^3(uv^7)^((q-5)/8) */
fe_mul(h->X, h->X, u); // x = uv^3(uv^7)^((q-5)/8)

fe_sq(vxx, h->X);
fe_mul(vxx, vxx, v);
fe_sub(check, vxx, u); /* vx^2-u */
fe_sub(check, vxx, u); // vx^2-u
if (fe_isnonzero(check)) {
fe_add(check, vxx, u); /* vx^2+u */
fe_add(check, vxx, u); // vx^2+u
if (fe_isnonzero(check)) {
return -1;
}
@@ -1051,7 +1051,7 @@ static void ge_precomp_0(ge_precomp *h) {
fe_0(h->xy2d);
}

/* r = p */
// r = p
static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) {
fe_copy(r->X, p->X);
fe_copy(r->Y, p->Y);
@@ -1061,7 +1061,7 @@ static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) {
static const fe d2 = {-21827239, -5839606, -30745221, 13898782, 229458,
15978800, -12551817, -6495438, 29715968, 9444199};

/* r = p */
// r = p
void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) {
fe_add(r->YplusX, p->Y, p->X);
fe_sub(r->YminusX, p->Y, p->X);
@@ -1069,14 +1069,14 @@ void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) {
fe_mul(r->T2d, p->T, d2);
}

/* r = p */
// r = p
void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) {
fe_mul(r->X, p->X, p->T);
fe_mul(r->Y, p->Y, p->Z);
fe_mul(r->Z, p->Z, p->T);
}

/* r = p */
// r = p
void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) {
fe_mul(r->X, p->X, p->T);
fe_mul(r->Y, p->Y, p->Z);
@@ -1084,14 +1084,14 @@ void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) {
fe_mul(r->T, p->X, p->Y);
}

/* r = p */
// r = p
static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) {
ge_p3 t;
x25519_ge_p1p1_to_p3(&t, p);
x25519_ge_p3_to_cached(r, &t);
}

/* r = 2 * p */
// r = 2 * p
static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) {
fe t0;

@@ -1106,14 +1106,14 @@ static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) {
fe_sub(r->T, r->T, r->Z);
}

/* r = 2 * p */
// r = 2 * p
static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) {
ge_p2 q;
ge_p3_to_p2(&q, p);
ge_p2_dbl(r, &q);
}

/* r = p + q */
// r = p + q
static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
fe t0;

@@ -1129,7 +1129,7 @@ static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
fe_sub(r->T, t0, r->T);
}

/* r = p - q */
// r = p - q
static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
fe t0;

@@ -1145,7 +1145,7 @@ static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
fe_add(r->T, t0, r->T);
}

/* r = p + q */
// r = p + q
void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
fe t0;

@@ -1162,7 +1162,7 @@ void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
fe_sub(r->T, t0, r->T);
}

/* r = p - q */
// r = p - q
void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
fe t0;

@@ -1182,10 +1182,10 @@ void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
static uint8_t equal(signed char b, signed char c) {
uint8_t ub = b;
uint8_t uc = c;
uint8_t x = ub ^ uc; /* 0: yes; 1..255: no */
uint32_t y = x; /* 0: yes; 1..255: no */
y -= 1; /* 4294967295: yes; 0..254: no */
y >>= 31; /* 1: yes; 0: no */
uint8_t x = ub ^ uc; // 0: yes; 1..255: no
uint32_t y = x; // 0: yes; 1..255: no
y -= 1; // 4294967295: yes; 0..254: no
y >>= 31; // 1: yes; 0: no
return y;
}

@@ -1197,8 +1197,8 @@ static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) {

void x25519_ge_scalarmult_small_precomp(
ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) {
/* precomp_table is first expanded into matching |ge_precomp|
* elements. */
// precomp_table is first expanded into matching |ge_precomp|
// elements.
ge_precomp multiples[15];

unsigned i;
@@ -1215,9 +1215,9 @@ void x25519_ge_scalarmult_small_precomp(
fe_mul(out->xy2d, out->xy2d, d2);
}

/* See the comment above |k25519SmallPrecomp| about the structure of the
* precomputed elements. This loop does 64 additions and 64 doublings to
* calculate the result. */
// See the comment above |k25519SmallPrecomp| about the structure of the
// precomputed elements. This loop does 64 additions and 64 doublings to
// calculate the result.
ge_p3_0(h);

for (i = 63; i < 64; i--) {
@@ -1249,14 +1249,14 @@ void x25519_ge_scalarmult_small_precomp(

#if defined(OPENSSL_SMALL)

/* This block of code replaces the standard base-point table with a much smaller
* one. The standard table is 30,720 bytes while this one is just 960.
*
* This table contains 15 pairs of group elements, (x, y), where each field
* element is serialised with |fe_tobytes|. If |i| is the index of the group
* element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀
* is the most significant bit). The value of the group element is then:
* (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. */
// This block of code replaces the standard base-point table with a much smaller
// one. The standard table is 30,720 bytes while this one is just 960.
//
// This table contains 15 pairs of group elements, (x, y), where each field
// element is serialised with |fe_tobytes|. If |i| is the index of the group
// element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀
// is the most significant bit). The value of the group element is then:
// (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator.
static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = {
0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95,
0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0,
@@ -1346,7 +1346,7 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) {

#else

/* k25519Precomp[i][j] = (j+1)*256^i*B */
// k25519Precomp[i][j] = (j+1)*256^i*B
static const ge_precomp k25519Precomp[32][8] = {
{
{
@@ -3464,7 +3464,7 @@ static const ge_precomp k25519Precomp[32][8] = {

static uint8_t negative(signed char b) {
uint32_t x = b;
x >>= 31; /* 1: yes; 0: no */
x >>= 31; // 1: yes; 0: no
return x;
}

@@ -3488,12 +3488,12 @@ static void table_select(ge_precomp *t, int pos, signed char b) {
cmov(t, &minust, bnegative);
}

/* h = a * B
* where a = a[0]+256*a[1]+...+256^31 a[31]
* B is the Ed25519 base point (x,4/5) with x positive.
*
* Preconditions:
* a[31] <= 127 */
// h = a * B
// where a = a[0]+256*a[1]+...+256^31 a[31]
// B is the Ed25519 base point (x,4/5) with x positive.
//
// Preconditions:
// a[31] <= 127
void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) {
signed char e[64];
signed char carry;
@@ -3506,8 +3506,8 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) {
e[2 * i + 0] = (a[i] >> 0) & 15;
e[2 * i + 1] = (a[i] >> 4) & 15;
}
/* each e[i] is between 0 and 15 */
/* e[63] is between 0 and 7 */
// each e[i] is between 0 and 15
// e[63] is between 0 and 7

carry = 0;
for (i = 0; i < 63; ++i) {
@@ -3517,7 +3517,7 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) {
e[i] -= carry << 4;
}
e[63] += carry;
/* each e[i] is between -8 and 8 */
// each e[i] is between -8 and 8

ge_p3_0(h);
for (i = 1; i < 64; i += 2) {
@@ -3551,8 +3551,8 @@ static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) {
fe_cmov(t->T2d, u->T2d, b);
}

/* r = scalar * A.
* where a = a[0]+256*a[1]+...+256^31 a[31]. */
// r = scalar * A.
// where a = a[0]+256*a[1]+...+256^31 a[31].
void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) {
ge_p2 Ai_p2[8];
ge_cached Ai[16];
@@ -3706,15 +3706,15 @@ static const ge_precomp Bi[8] = {
},
};

/* r = a * A + b * B
* where a = a[0]+256*a[1]+...+256^31 a[31].
* and b = b[0]+256*b[1]+...+256^31 b[31].
* B is the Ed25519 base point (x,4/5) with x positive. */
// r = a * A + b * B
// where a = a[0]+256*a[1]+...+256^31 a[31].
// and b = b[0]+256*b[1]+...+256^31 b[31].
// B is the Ed25519 base point (x,4/5) with x positive.
static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a,
const ge_p3 *A, const uint8_t *b) {
signed char aslide[256];
signed char bslide[256];
ge_cached Ai[8]; /* A,3A,5A,7A,9A,11A,13A,15A */
ge_cached Ai[8]; // A,3A,5A,7A,9A,11A,13A,15A
ge_p1p1 t;
ge_p3 u;
ge_p3 A2;
@@ -3779,16 +3779,16 @@ static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a,
}
}

/* The set of scalars is \Z/l
* where l = 2^252 + 27742317777372353535851937790883648493. */
// The set of scalars is \Z/l
// where l = 2^252 + 27742317777372353535851937790883648493.

/* Input:
* s[0]+256*s[1]+...+256^63*s[63] = s
*
* Output:
* s[0]+256*s[1]+...+256^31*s[31] = s mod l
* where l = 2^252 + 27742317777372353535851937790883648493.
* Overwrites s in place. */
// Input:
// s[0]+256*s[1]+...+256^63*s[63] = s
//
// Output:
// s[0]+256*s[1]+...+256^31*s[31] = s mod l
// where l = 2^252 + 27742317777372353535851937790883648493.
// Overwrites s in place.
void x25519_sc_reduce(uint8_t *s) {
int64_t s0 = 2097151 & load_3(s);
int64_t s1 = 2097151 & (load_4(s + 2) >> 5);
@@ -4122,14 +4122,14 @@ void x25519_sc_reduce(uint8_t *s) {
s[31] = s11 >> 17;
}

/* Input:
* a[0]+256*a[1]+...+256^31*a[31] = a
* b[0]+256*b[1]+...+256^31*b[31] = b
* c[0]+256*c[1]+...+256^31*c[31] = c
*
* Output:
* s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
* where l = 2^252 + 27742317777372353535851937790883648493. */
// Input:
// a[0]+256*a[1]+...+256^31*a[31] = a
// b[0]+256*b[1]+...+256^31*b[31] = b
// c[0]+256*c[1]+...+256^31*c[31] = c
//
// Output:
// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
// where l = 2^252 + 27742317777372353535851937790883648493.
static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b,
const uint8_t *c) {
int64_t a0 = 2097151 & load_3(a);
@@ -4716,10 +4716,10 @@ static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32],

#else

/* Replace (f,g) with (g,f) if b == 1;
* replace (f,g) with (f,g) if b == 0.
*
* Preconditions: b in {0,1}. */
// Replace (f,g) with (g,f) if b == 1;
// replace (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
static void fe_cswap(fe f, fe g, unsigned int b) {
b = 0-b;
unsigned i;
@@ -4731,14 +4731,14 @@ static void fe_cswap(fe f, fe g, unsigned int b) {
}
}

/* h = f * 121666
* Can overlap h with f.
*
* Preconditions:
* |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
*
* Postconditions:
* |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */
// h = f * 121666
// Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
static void fe_mul121666(fe h, fe f) {
int32_t f0 = f[0];
int32_t f1 = f[1];
@@ -4858,25 +4858,25 @@ static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32],
x25519_scalar_mult_generic(out, scalar, point);
}

#endif /* BORINGSSL_X25519_X86_64 */
#endif // BORINGSSL_X25519_X86_64


void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) {
RAND_bytes(out_private_key, 32);

/* All X25519 implementations should decode scalars correctly (see
* https://tools.ietf.org/html/rfc7748#section-5). However, if an
* implementation doesn't then it might interoperate with random keys a
* fraction of the time because they'll, randomly, happen to be correctly
* formed.
*
* Thus we do the opposite of the masking here to make sure that our private
* keys are never correctly masked and so, hopefully, any incorrect
* implementations are deterministically broken.
*
* This does not affect security because, although we're throwing away
* entropy, a valid implementation of scalarmult should throw away the exact
* same bits anyway. */
// All X25519 implementations should decode scalars correctly (see
// https://tools.ietf.org/html/rfc7748#section-5). However, if an
// implementation doesn't then it might interoperate with random keys a
// fraction of the time because they'll, randomly, happen to be correctly
// formed.
//
// Thus we do the opposite of the masking here to make sure that our private
// keys are never correctly masked and so, hopefully, any incorrect
// implementations are deterministically broken.
//
// This does not affect security because, although we're throwing away
// entropy, a valid implementation of scalarmult should throw away the exact
// same bits anyway.
out_private_key[0] |= 7;
out_private_key[31] &= 63;
out_private_key[31] |= 128;
@@ -4888,15 +4888,15 @@ int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32],
const uint8_t peer_public_value[32]) {
static const uint8_t kZeros[32] = {0};
x25519_scalar_mult(out_shared_key, private_key, peer_public_value);
/* The all-zero output results when the input is a point of small order. */
// The all-zero output results when the input is a point of small order.
return CRYPTO_memcmp(kZeros, out_shared_key, 32) != 0;
}

#if defined(BORINGSSL_X25519_X86_64)

/* When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with
* the Montgomery ladder because it's faster. Otherwise it's done using the
* Ed25519 tables. */
// When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with
// the Montgomery ladder because it's faster. Otherwise it's done using the
// Ed25519 tables.

void X25519_public_from_private(uint8_t out_public_value[32],
const uint8_t private_key[32]) {
@@ -4925,8 +4925,8 @@ void X25519_public_from_private(uint8_t out_public_value[32],
ge_p3 A;
x25519_ge_scalarmult_base(&A, e);

/* We only need the u-coordinate of the curve25519 point. The map is
* u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). */
// We only need the u-coordinate of the curve25519 point. The map is
// u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y).
fe zplusy, zminusy, zminusy_inv;
fe_add(zplusy, A.Z, A.Y);
fe_sub(zminusy, A.Z, A.Y);
@@ -4935,4 +4935,4 @@ void X25519_public_from_private(uint8_t out_public_value[32],
fe_tobytes(out_public_value, zplusy);
}

#endif /* BORINGSSL_X25519_X86_64 */
#endif // BORINGSSL_X25519_X86_64

+ 7
- 7
crypto/curve25519/internal.h Parādīt failu

@@ -32,15 +32,15 @@ void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32],
#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE)
#define BORINGSSL_X25519_NEON

/* x25519_NEON is defined in asm/x25519-arm.S. */
// x25519_NEON is defined in asm/x25519-arm.S.
void x25519_NEON(uint8_t out[32], const uint8_t scalar[32],
const uint8_t point[32]);
#endif

/* fe means field element. Here the field is \Z/(2^255-19). An element t,
* entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
* t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
* context. */
// fe means field element. Here the field is \Z/(2^255-19). An element t,
// entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
// context.
typedef int32_t fe[10];

/* ge means group element.
@@ -103,7 +103,7 @@ void x25519_sc_reduce(uint8_t *s);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_CURVE25519_INTERNAL_H */
#endif // OPENSSL_HEADER_CURVE25519_INTERNAL_H

+ 85
- 83
crypto/curve25519/spake25519.c Parādīt failu

@@ -25,80 +25,82 @@
#include "../internal.h"


/* The following precomputation tables are for the following
* points used in the SPAKE2 protocol.
*
* N:
* x: 49918732221787544735331783592030787422991506689877079631459872391322455579424
* y: 54629554431565467720832445949441049581317094546788069926228343916274969994000
* encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778
*
* M:
* x: 31406539342727633121250288103050113562375374900226415211311216773867585644232
* y: 21177308356423958466833845032658859666296341766942662650232962324899758529114
* encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e
*
* These points and their precomputation tables are generated with the
* following Python code. For a description of the precomputation table,
* see curve25519.c in this directory.
*
* Exact copies of the source code are kept in bug 27296743.
*
* import hashlib
* import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py
*
* SEED_N = 'edwards25519 point generation seed (N)'
* SEED_M = 'edwards25519 point generation seed (M)'
*
* def genpoint(seed):
* v = hashlib.sha256(seed).digest()
* it = 1
* while True:
* try:
* x,y = E.decodepoint(v)
* except Exception, e:
* print e
* it += 1
* v = hashlib.sha256(v).digest()
* continue
* print "Found in %d iterations:" % it
* print " x = %d" % x
* print " y = %d" % y
* print " Encoded (hex)"
* print E.encodepoint((x,y)).encode('hex')
* return (x,y)
*
* def gentable(P):
* t = []
* for i in range(1,16):
* k = (i >> 3 & 1) * (1 << 192) + \
* (i >> 2 & 1) * (1 << 128) + \
* (i >> 1 & 1) * (1 << 64) + \
* (i & 1)
* t.append(E.scalarmult(P, k))
* return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t)
*
* def printtable(table, name):
* print "static const uint8_t %s[15 * 2 * 32] = {" % name,
* for i in range(15 * 2 * 32):
* if i % 12 == 0:
* print "\n ",
* print " 0x%02x," % ord(table[i]),
* print "\n};"
*
* if __name__ == "__main__":
* print "Searching for N"
* N = genpoint(SEED_N)
* print "Generating precomputation table for N"
* Ntable = gentable(N)
* printtable(Ntable, "kSpakeNSmallPrecomp")
*
* print "Searching for M"
* M = genpoint(SEED_M)
* print "Generating precomputation table for M"
* Mtable = gentable(M)
* printtable(Mtable, "kSpakeMSmallPrecomp")
*/
// The following precomputation tables are for the following
// points used in the SPAKE2 protocol.
//
// N:
// x: 49918732221787544735331783592030787422991506689877079631459872391322455579424
// y: 54629554431565467720832445949441049581317094546788069926228343916274969994000
// encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778
//
// M:
// x: 31406539342727633121250288103050113562375374900226415211311216773867585644232
// y: 21177308356423958466833845032658859666296341766942662650232962324899758529114
// encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e
//
// These points and their precomputation tables are generated with the
// following Python code. For a description of the precomputation table,
// see curve25519.c in this directory.
//
// Exact copies of the source code are kept in bug 27296743.
//
// import hashlib
// import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py
//
// SEED_N = 'edwards25519 point generation seed (N)'
// SEED_M = 'edwards25519 point generation seed (M)'

/*
def genpoint(seed):
v = hashlib.sha256(seed).digest()
it = 1
while True:
try:
x,y = E.decodepoint(v)
except Exception, e:
print e
it += 1
v = hashlib.sha256(v).digest()
continue
print "Found in %d iterations:" % it
print " x = %d" % x
print " y = %d" % y
print " Encoded (hex)"
print E.encodepoint((x,y)).encode('hex')
return (x,y)

def gentable(P):
t = []
for i in range(1,16):
k = (i >> 3 & 1) * (1 << 192) + \
(i >> 2 & 1) * (1 << 128) + \
(i >> 1 & 1) * (1 << 64) + \
(i & 1)
t.append(E.scalarmult(P, k))
return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t)

def printtable(table, name):
print "static const uint8_t %s[15 * 2 * 32] = {" % name,
for i in range(15 * 2 * 32):
if i % 12 == 0:
print "\n ",
print " 0x%02x," % ord(table[i]),
print "\n};"

if __name__ == "__main__":
print "Searching for N"
N = genpoint(SEED_N)
print "Generating precomputation table for N"
Ntable = gentable(N)
printtable(Ntable, "kSpakeNSmallPrecomp")

print "Searching for M"
M = genpoint(SEED_M)
print "Generating precomputation table for M"
Mtable = gentable(M)
printtable(Mtable, "kSpakeMSmallPrecomp")
*/

static const uint8_t kSpakeNSmallPrecomp[15 * 2 * 32] = {
0x20, 0x1b, 0xc5, 0xb3, 0x43, 0x17, 0x71, 0x10, 0x44, 0x1e, 0x73, 0xb3,
0xae, 0x3f, 0xbf, 0x9f, 0xf5, 0x44, 0xc8, 0x13, 0x8f, 0xd1, 0x01, 0xc2,
@@ -317,8 +319,8 @@ void SPAKE2_CTX_free(SPAKE2_CTX *ctx) {
OPENSSL_free(ctx);
}

/* left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian
* order. */
// left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian
// order.
static void left_shift_3(uint8_t n[32]) {
uint8_t carry = 0;
unsigned i;
@@ -344,15 +346,15 @@ int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len,
uint8_t private_tmp[64];
RAND_bytes(private_tmp, sizeof(private_tmp));
x25519_sc_reduce(private_tmp);
/* Multiply by the cofactor (eight) so that we'll clear it when operating on
* the peer's point later in the protocol. */
// Multiply by the cofactor (eight) so that we'll clear it when operating on
// the peer's point later in the protocol.
left_shift_3(private_tmp);
OPENSSL_memcpy(ctx->private_key, private_tmp, sizeof(ctx->private_key));

ge_p3 P;
x25519_ge_scalarmult_base(&P, ctx->private_key);

/* mask = h(password) * <N or M>. */
// mask = h(password) * <N or M>.
uint8_t password_tmp[SHA512_DIGEST_LENGTH];
SHA512(password, password_len, password_tmp);
OPENSSL_memcpy(ctx->password_hash, password_tmp, sizeof(ctx->password_hash));
@@ -365,13 +367,13 @@ int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len,
? kSpakeMSmallPrecomp
: kSpakeNSmallPrecomp);

/* P* = P + mask. */
// P* = P + mask.
ge_cached mask_cached;
x25519_ge_p3_to_cached(&mask_cached, &mask);
ge_p1p1 Pstar;
x25519_ge_add(&Pstar, &P, &mask_cached);

/* Encode P* */
// Encode P*
ge_p2 Pstar_proj;
x25519_ge_p1p1_to_p2(&Pstar_proj, &Pstar);
x25519_ge_tobytes(ctx->my_msg, &Pstar_proj);
@@ -408,11 +410,11 @@ int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len,

ge_p3 Qstar;
if (0 != x25519_ge_frombytes_vartime(&Qstar, their_msg)) {
/* Point received from peer was not on the curve. */
// Point received from peer was not on the curve.
return 0;
}

/* Unmask peer's value. */
// Unmask peer's value.
ge_p3 peers_mask;
x25519_ge_scalarmult_small_precomp(&peers_mask, ctx->password_scalar,
ctx->my_role == spake2_role_alice


+ 1
- 1
crypto/curve25519/spake25519_test.cc Parādīt failu

@@ -25,7 +25,7 @@
#include "../internal.h"


/* TODO(agl): add tests with fixed vectors once SPAKE2 is nailed down. */
// TODO(agl): add tests with fixed vectors once SPAKE2 is nailed down.

struct SPAKE2Run {
bool Run() {


+ 9
- 9
crypto/curve25519/x25519-x86_64.c Parādīt failu

@@ -12,12 +12,12 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */

/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
* 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
* public domain but this file has the ISC license just to keep licencing
* simple.
*
* The field functions are shared by Ed25519 and X25519 where possible. */
// This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
// public domain but this file has the ISC license just to keep licencing
// simple.
//
// The field functions are shared by Ed25519 and X25519 where possible.

#include <openssl/curve25519.h>

@@ -31,7 +31,7 @@

typedef struct { uint64_t v[5]; } fe25519;

/* These functions are defined in asm/x25519-x86_64.S */
// These functions are defined in asm/x25519-x86_64.S
void x25519_x86_64_work_cswap(fe25519 *, uint64_t);
void x25519_x86_64_mul(fe25519 *out, const fe25519 *a, const fe25519 *b);
void x25519_x86_64_square(fe25519 *out, const fe25519 *a);
@@ -46,7 +46,7 @@ static void fe25519_setint(fe25519 *r, unsigned v) {
r->v[4] = 0;
}

/* Assumes input x being reduced below 2^255 */
// Assumes input x being reduced below 2^255
static void fe25519_pack(unsigned char r[32], const fe25519 *x) {
fe25519 t;
t = *x;
@@ -244,4 +244,4 @@ void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32],
fe25519_pack(out, &t);
}

#endif /* BORINGSSL_X25519_X86_64 */
#endif // BORINGSSL_X25519_X86_64

+ 13
- 14
crypto/dh/check.c Parādīt failu

@@ -70,7 +70,7 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) {

int ok = 0;

/* Check |pub_key| is greater than 1. */
// Check |pub_key| is greater than 1.
BIGNUM *tmp = BN_CTX_get(ctx);
if (tmp == NULL ||
!BN_set_word(tmp, 1)) {
@@ -80,7 +80,7 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) {
*out_flags |= DH_CHECK_PUBKEY_TOO_SMALL;
}

/* Check |pub_key| is less than |dh->p| - 1. */
// Check |pub_key| is less than |dh->p| - 1.
if (!BN_copy(tmp, dh->p) ||
!BN_sub_word(tmp, 1)) {
goto err;
@@ -90,9 +90,9 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) {
}

if (dh->q != NULL) {
/* Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114
* groups which are not safe primes but pick a generator on a prime-order
* subgroup of size |dh->q|. */
// Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114
// groups which are not safe primes but pick a generator on a prime-order
// subgroup of size |dh->q|.
if (!BN_mod_exp_mont(tmp, pub_key, dh->q, dh->p, ctx, NULL)) {
goto err;
}
@@ -111,13 +111,12 @@ err:


int DH_check(const DH *dh, int *out_flags) {
/* Check that p is a safe prime and if g is 2, 3 or 5, check that it is a
* suitable generator where:
* for 2, p mod 24 == 11
* for 3, p mod 12 == 5
* for 5, p mod 10 == 3 or 7
* should hold.
*/
// Check that p is a safe prime and if g is 2, 3 or 5, check that it is a
// suitable generator where:
// for 2, p mod 24 == 11
// for 3, p mod 12 == 5
// for 5, p mod 10 == 3 or 7
// should hold.
int ok = 0, r;
BN_CTX *ctx = NULL;
BN_ULONG l;
@@ -144,7 +143,7 @@ int DH_check(const DH *dh, int *out_flags) {
} else if (BN_cmp(dh->g, dh->p) >= 0) {
*out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR;
} else {
/* Check g^q == 1 mod p */
// Check g^q == 1 mod p
if (!BN_mod_exp_mont(t1, dh->g, dh->q, dh->p, ctx, NULL)) {
goto err;
}
@@ -159,7 +158,7 @@ int DH_check(const DH *dh, int *out_flags) {
if (!r) {
*out_flags |= DH_CHECK_Q_NOT_PRIME;
}
/* Check p == 1 mod q i.e. q divides p - 1 */
// Check p == 1 mod q i.e. q divides p - 1
if (!BN_div(t1, t2, dh->p, dh->q, ctx)) {
goto err;
}


+ 32
- 34
crypto/dh/dh.c Parādīt failu

@@ -138,32 +138,30 @@ void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q,
}

int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb) {
/* We generate DH parameters as follows
* find a prime q which is prime_bits/2 bits long.
* p=(2*q)+1 or (p-1)/2 = q
* For this case, g is a generator if
* g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1.
* Since the factors of p-1 are q and 2, we just need to check
* g^2 mod p != 1 and g^q mod p != 1.
*
* Having said all that,
* there is another special case method for the generators 2, 3 and 5.
* for 2, p mod 24 == 11
* for 3, p mod 12 == 5 <<<<< does not work for safe primes.
* for 5, p mod 10 == 3 or 7
*
* Thanks to Phil Karn <karn@qualcomm.com> for the pointers about the
* special generators and for answering some of my questions.
*
* I've implemented the second simple method :-).
* Since DH should be using a safe prime (both p and q are prime),
* this generator function can take a very very long time to run.
*/

/* Actually there is no reason to insist that 'generator' be a generator.
* It's just as OK (and in some sense better) to use a generator of the
* order-q subgroup.
*/
// We generate DH parameters as follows
// find a prime q which is prime_bits/2 bits long.
// p=(2*q)+1 or (p-1)/2 = q
// For this case, g is a generator if
// g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1.
// Since the factors of p-1 are q and 2, we just need to check
// g^2 mod p != 1 and g^q mod p != 1.
//
// Having said all that,
// there is another special case method for the generators 2, 3 and 5.
// for 2, p mod 24 == 11
// for 3, p mod 12 == 5 <<<<< does not work for safe primes.
// for 5, p mod 10 == 3 or 7
//
// Thanks to Phil Karn <karn@qualcomm.com> for the pointers about the
// special generators and for answering some of my questions.
//
// I've implemented the second simple method :-).
// Since DH should be using a safe prime (both p and q are prime),
// this generator function can take a very very long time to run.

// Actually there is no reason to insist that 'generator' be a generator.
// It's just as OK (and in some sense better) to use a generator of the
// order-q subgroup.

BIGNUM *t1, *t2;
int g, ok = 0;
@@ -180,7 +178,7 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c
goto err;
}

/* Make sure |dh| has the necessary elements */
// Make sure |dh| has the necessary elements
if (dh->p == NULL) {
dh->p = BN_new();
if (dh->p == NULL) {
@@ -213,14 +211,14 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c
if (!BN_set_word(t2, 3)) {
goto err;
}
/* BN_set_word(t3,7); just have to miss
* out on these ones :-( */
// BN_set_word(t3,7); just have to miss
// out on these ones :-(
g = 5;
} else {
/* in the general case, don't worry if 'generator' is a
* generator or not: since we are using safe primes,
* it will generate either an order-q or an order-2q group,
* which both is OK */
// in the general case, don't worry if 'generator' is a
// generator or not: since we are using safe primes,
// it will generate either an order-q or an order-2q group,
// which both is OK
if (!BN_set_word(t1, 2)) {
goto err;
}
@@ -299,7 +297,7 @@ int DH_generate_key(DH *dh) {
goto err;
}
} else {
/* secret exponent length */
// secret exponent length
unsigned priv_bits = dh->priv_length;
if (priv_bits == 0) {
const unsigned p_bits = BN_num_bits(dh->p);


+ 1
- 1
crypto/dh/dh_asn1.c Parādīt failu

@@ -76,7 +76,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) {

static int marshal_integer(CBB *cbb, BIGNUM *bn) {
if (bn == NULL) {
/* A DH object may be missing some components. */
// A DH object may be missing some components.
OPENSSL_PUT_ERROR(DH, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}


+ 18
- 18
crypto/digest_extra/digest_extra.c Parādīt failu

@@ -82,11 +82,11 @@ static const struct nid_to_digest nid_to_digest_mapping[] = {
{NID_sha384, EVP_sha384, SN_sha384, LN_sha384},
{NID_sha512, EVP_sha512, SN_sha512, LN_sha512},
{NID_md5_sha1, EVP_md5_sha1, SN_md5_sha1, LN_md5_sha1},
/* As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding
* hash function when given a signature OID. To avoid unintended lax parsing
* of hash OIDs, this is no longer supported for lookup by OID or NID.
* Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to
* consumers so we retain it there. */
// As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding
// hash function when given a signature OID. To avoid unintended lax parsing
// of hash OIDs, this is no longer supported for lookup by OID or NID.
// Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to
// consumers so we retain it there.
{NID_undef, EVP_sha1, SN_dsaWithSHA, LN_dsaWithSHA},
{NID_undef, EVP_sha1, SN_dsaWithSHA1, LN_dsaWithSHA1},
{NID_undef, EVP_sha1, SN_ecdsa_with_SHA1, NULL},
@@ -104,7 +104,7 @@ static const struct nid_to_digest nid_to_digest_mapping[] = {

const EVP_MD* EVP_get_digestbynid(int nid) {
if (nid == NID_undef) {
/* Skip the |NID_undef| entries in |nid_to_digest_mapping|. */
// Skip the |NID_undef| entries in |nid_to_digest_mapping|.
return NULL;
}

@@ -122,19 +122,19 @@ static const struct {
uint8_t oid_len;
const EVP_MD *(*md_func) (void);
} kMDOIDs[] = {
/* 1.2.840.113549.2.4 */
// 1.2.840.113549.2.4
{ {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04}, 8, EVP_md4 },
/* 1.2.840.113549.2.5 */
// 1.2.840.113549.2.5
{ {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05}, 8, EVP_md5 },
/* 1.3.14.3.2.26 */
// 1.3.14.3.2.26
{ {0x2b, 0x0e, 0x03, 0x02, 0x1a}, 5, EVP_sha1 },
/* 2.16.840.1.101.3.4.2.1 */
// 2.16.840.1.101.3.4.2.1
{ {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}, 9, EVP_sha256 },
/* 2.16.840.1.101.3.4.2.2 */
// 2.16.840.1.101.3.4.2.2
{ {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}, 9, EVP_sha384 },
/* 2.16.840.1.101.3.4.2.3 */
// 2.16.840.1.101.3.4.2.3
{ {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03}, 9, EVP_sha512 },
/* 2.16.840.1.101.3.4.2.4 */
// 2.16.840.1.101.3.4.2.4
{ {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04}, 9, EVP_sha224 },
};

@@ -151,7 +151,7 @@ static const EVP_MD *cbs_to_md(const CBS *cbs) {
}

const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) {
/* Handle objects with no corresponding OID. */
// Handle objects with no corresponding OID.
if (obj->nid != NID_undef) {
return EVP_get_digestbynid(obj->nid);
}
@@ -175,10 +175,10 @@ const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs) {
return NULL;
}

/* The parameters, if present, must be NULL. Historically, whether the NULL
* was included or omitted was not well-specified. When parsing an
* AlgorithmIdentifier, we allow both. (Note this code is not used when
* verifying RSASSA-PKCS1-v1_5 signatures.) */
// The parameters, if present, must be NULL. Historically, whether the NULL
// was included or omitted was not well-specified. When parsing an
// AlgorithmIdentifier, we allow both. (Note this code is not used when
// verifying RSASSA-PKCS1-v1_5 signatures.)
if (CBS_len(&algorithm) > 0) {
CBS param;
if (!CBS_get_asn1(&algorithm, &param, CBS_ASN1_NULL) ||


+ 2
- 2
crypto/digest_extra/internal.h Parādīt failu

@@ -26,7 +26,7 @@ const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_DIGEST_EXTRA_INTERNAL */
#endif // OPENSSL_HEADER_DIGEST_EXTRA_INTERNAL

+ 66
- 66
crypto/dsa/dsa.c Parādīt failu

@@ -78,8 +78,8 @@

#define OPENSSL_DSA_MAX_MODULUS_BITS 10000

/* Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of
* Rabin-Miller */
// Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of
// Rabin-Miller
#define DSS_prime_checks 50

static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT;
@@ -186,7 +186,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
return 0;
}
if (seed_len > (size_t)qsize) {
/* Only consume as much seed as is expected. */
// Only consume as much seed as is expected.
seed_len = qsize;
}
OPENSSL_memcpy(seed, seed_in, seed_len);
@@ -217,9 +217,9 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
}

for (;;) {
/* Find q. */
// Find q.
for (;;) {
/* step 1 */
// step 1
if (!BN_GENCB_call(cb, 0, m++)) {
goto err;
}
@@ -230,12 +230,12 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
goto err;
}
} else {
/* If we come back through, use random seed next time. */
// If we come back through, use random seed next time.
seed_in = NULL;
}
OPENSSL_memcpy(buf, seed, qsize);
OPENSSL_memcpy(buf2, seed, qsize);
/* precompute "SEED + 1" for step 7: */
// precompute "SEED + 1" for step 7:
for (i = qsize - 1; i < qsize; i--) {
buf[i]++;
if (buf[i] != 0) {
@@ -243,7 +243,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
}
}

/* step 2 */
// step 2
if (!EVP_Digest(seed, qsize, md, NULL, evpmd, NULL) ||
!EVP_Digest(buf, qsize, buf2, NULL, evpmd, NULL)) {
goto err;
@@ -252,14 +252,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
md[i] ^= buf2[i];
}

/* step 3 */
// step 3
md[0] |= 0x80;
md[qsize - 1] |= 0x01;
if (!BN_bin2bn(md, qsize, q)) {
goto err;
}

/* step 4 */
// step 4
r = BN_is_prime_fasttest_ex(q, DSS_prime_checks, ctx, use_random_seed, cb);
if (r > 0) {
break;
@@ -268,17 +268,17 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
goto err;
}

/* do a callback call */
/* step 5 */
// do a callback call
// step 5
}

if (!BN_GENCB_call(cb, 2, 0) || !BN_GENCB_call(cb, 3, 0)) {
goto err;
}

/* step 6 */
// step 6
counter = 0;
/* "offset = 2" */
// "offset = 2"

n = (bits - 1) / 160;

@@ -287,11 +287,11 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
goto err;
}

/* step 7 */
// step 7
BN_zero(W);
/* now 'buf' contains "SEED + offset - 1" */
// now 'buf' contains "SEED + offset - 1"
for (k = 0; k <= n; k++) {
/* obtain "SEED + offset + k" by incrementing: */
// obtain "SEED + offset + k" by incrementing:
for (i = qsize - 1; i < qsize; i--) {
buf[i]++;
if (buf[i] != 0) {
@@ -303,7 +303,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
goto err;
}

/* step 8 */
// step 8
if (!BN_bin2bn(md, qsize, r0) ||
!BN_lshift(r0, r0, (qsize << 3) * k) ||
!BN_add(W, W, r0)) {
@@ -311,14 +311,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
}
}

/* more of step 8 */
// more of step 8
if (!BN_mask_bits(W, bits - 1) ||
!BN_copy(X, W) ||
!BN_add(X, X, test)) {
goto err;
}

/* step 9 */
// step 9
if (!BN_lshift1(r0, q) ||
!BN_mod(c, X, r0, ctx) ||
!BN_sub(r0, c, BN_value_one()) ||
@@ -326,23 +326,23 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in,
goto err;
}

/* step 10 */
// step 10
if (BN_cmp(p, test) >= 0) {
/* step 11 */
// step 11
r = BN_is_prime_fasttest_ex(p, DSS_prime_checks, ctx, 1, cb);
if (r > 0) {
goto end; /* found it */
goto end; // found it
}
if (r != 0) {
goto err;
}
}

/* step 13 */
// step 13
counter++;
/* "offset = offset + n + 1" */
// "offset = offset + n + 1"

/* step 14 */
// step 14
if (counter >= 4096) {
break;
}
@@ -353,8 +353,8 @@ end:
goto err;
}

/* We now need to generate g */
/* Set r0=(p-1)/q */
// We now need to generate g
// Set r0=(p-1)/q
if (!BN_sub(test, p, BN_value_one()) ||
!BN_div(r0, NULL, test, q, ctx)) {
goto err;
@@ -366,7 +366,7 @@ end:
}

for (;;) {
/* g=test^r0%p */
// g=test^r0%p
if (!BN_mod_exp_mont(g, test, r0, p, ctx, mont)) {
goto err;
}
@@ -544,9 +544,9 @@ redo:
}

if (digest_len > BN_num_bytes(dsa->q)) {
/* if the digest length is greater than the size of q use the
* BN_num_bits(dsa->q) leftmost bits of the digest, see
* fips 186-3, 4.2 */
// if the digest length is greater than the size of q use the
// BN_num_bits(dsa->q) leftmost bits of the digest, see
// fips 186-3, 4.2
digest_len = BN_num_bytes(dsa->q);
}

@@ -554,12 +554,12 @@ redo:
goto err;
}

/* Compute s = inv(k) (m + xr) mod q */
// Compute s = inv(k) (m + xr) mod q
if (!BN_mod_mul(&xr, dsa->priv_key, r, dsa->q, ctx)) {
goto err; /* s = xr */
goto err; // s = xr
}
if (!BN_add(s, &xr, &m)) {
goto err; /* s = m + xr */
goto err; // s = m + xr
}
if (BN_cmp(s, dsa->q) > 0) {
if (!BN_sub(s, s, dsa->q)) {
@@ -570,8 +570,8 @@ redo:
goto err;
}

/* Redo if r or s is zero as required by FIPS 186-3: this is
* very unlikely. */
// Redo if r or s is zero as required by FIPS 186-3: this is
// very unlikely.
if (BN_is_zero(r) || BN_is_zero(s)) {
if (noredo) {
reason = DSA_R_NEED_NEW_SETUP_VALUES;
@@ -624,7 +624,7 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest,
}

i = BN_num_bits(dsa->q);
/* fips 186-3 allows only different sizes for q */
// fips 186-3 allows only different sizes for q
if (i != 160 && i != 224 && i != 256) {
OPENSSL_PUT_ERROR(DSA, DSA_R_BAD_Q_VALUE);
return 0;
@@ -655,17 +655,17 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest,
goto err;
}

/* Calculate W = inv(S) mod Q
* save W in u2 */
// Calculate W = inv(S) mod Q
// save W in u2
if (BN_mod_inverse(&u2, sig->s, dsa->q, ctx) == NULL) {
goto err;
}

/* save M in u1 */
// save M in u1
if (digest_len > (i >> 3)) {
/* if the digest length is greater than the size of q use the
* BN_num_bits(dsa->q) leftmost bits of the digest, see
* fips 186-3, 4.2 */
// if the digest length is greater than the size of q use the
// BN_num_bits(dsa->q) leftmost bits of the digest, see
// fips 186-3, 4.2
digest_len = (i >> 3);
}

@@ -673,12 +673,12 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest,
goto err;
}

/* u1 = M * w mod q */
// u1 = M * w mod q
if (!BN_mod_mul(&u1, &u1, &u2, dsa->q, ctx)) {
goto err;
}

/* u2 = r * w mod q */
// u2 = r * w mod q
if (!BN_mod_mul(&u2, sig->r, &u2, dsa->q, ctx)) {
goto err;
}
@@ -694,14 +694,14 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest,
goto err;
}

/* BN_copy(&u1,&t1); */
/* let u1 = u1 mod q */
// BN_copy(&u1,&t1);
// let u1 = u1 mod q
if (!BN_mod(&u1, &t1, dsa->q, ctx)) {
goto err;
}

/* V is now in u1. If the signature is correct, it will be
* equal to R. */
// V is now in u1. If the signature is correct, it will be
// equal to R.
*out_valid = BN_ucmp(&u1, sig->r) == 0;
ret = 1;

@@ -758,7 +758,7 @@ int DSA_check_signature(int *out_valid, const uint8_t *digest,
goto err;
}

/* Ensure that the signature uses DER and doesn't have trailing garbage. */
// Ensure that the signature uses DER and doesn't have trailing garbage.
int der_len = i2d_DSA_SIG(s, &der);
if (der_len < 0 || (size_t)der_len != sig_len ||
OPENSSL_memcmp(sig, der, sig_len)) {
@@ -773,8 +773,8 @@ err:
return ret;
}

/* der_len_len returns the number of bytes needed to represent a length of |len|
* in DER. */
// der_len_len returns the number of bytes needed to represent a length of |len|
// in DER.
static size_t der_len_len(size_t len) {
if (len < 0x80) {
return 1;
@@ -789,18 +789,18 @@ static size_t der_len_len(size_t len) {

int DSA_size(const DSA *dsa) {
size_t order_len = BN_num_bytes(dsa->q);
/* Compute the maximum length of an |order_len| byte integer. Defensively
* assume that the leading 0x00 is included. */
// Compute the maximum length of an |order_len| byte integer. Defensively
// assume that the leading 0x00 is included.
size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len;
if (integer_len < order_len) {
return 0;
}
/* A DSA signature is two INTEGERs. */
// A DSA signature is two INTEGERs.
size_t value_len = 2 * integer_len;
if (value_len < integer_len) {
return 0;
}
/* Add the header. */
// Add the header.
size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len;
if (ret < value_len) {
return 0;
@@ -835,7 +835,7 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv,
goto err;
}

/* Get random k */
// Get random k
if (!BN_rand_range_ex(&k, 1, dsa->q)) {
goto err;
}
@@ -849,16 +849,16 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv,
goto err;
}

/* Compute r = (g^k mod p) mod q */
// Compute r = (g^k mod p) mod q
if (!BN_copy(&kq, &k)) {
goto err;
}

/* We do not want timing information to leak the length of k,
* so we compute g^k using an equivalent exponent of fixed length.
*
* (This is a kludge that we need because the BN_mod_exp_mont()
* does not let us specify the desired timing behaviour.) */
// We do not want timing information to leak the length of k,
// so we compute g^k using an equivalent exponent of fixed length.
//
// (This is a kludge that we need because the BN_mod_exp_mont()
// does not let us specify the desired timing behaviour.)

if (!BN_add(&kq, &kq, dsa->q)) {
goto err;
@@ -875,8 +875,8 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv,
goto err;
}

/* Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little
* Theorem. */
// Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little
// Theorem.
kinv = BN_new();
if (kinv == NULL ||
!bn_mod_inverse_prime(kinv, &k, dsa->q, ctx, dsa->method_mont_q)) {


+ 1
- 1
crypto/dsa/dsa_asn1.c Parādīt failu

@@ -75,7 +75,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) {

static int marshal_integer(CBB *cbb, BIGNUM *bn) {
if (bn == NULL) {
/* A DSA object may be missing some components. */
// A DSA object may be missing some components.
OPENSSL_PUT_ERROR(DSA, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}


+ 8
- 8
crypto/dsa/dsa_test.cc Parādīt failu

@@ -71,8 +71,8 @@
#include "../internal.h"


/* The following values are taken from the updated Appendix 5 to FIPS PUB 186
* and also appear in Appendix 5 to FIPS PUB 186-1. */
// The following values are taken from the updated Appendix 5 to FIPS PUB 186
// and also appear in Appendix 5 to FIPS PUB 186-1.

static const uint8_t seed[20] = {
0xd5, 0x01, 0x4e, 0x4b, 0x60, 0xef, 0x2b, 0xa8, 0xb6, 0x21, 0x1b,
@@ -121,7 +121,7 @@ static const uint8_t fips_digest[] = {
0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d,
};

/* fips_sig is a DER-encoded version of the r and s values in FIPS PUB 186-1. */
// fips_sig is a DER-encoded version of the r and s values in FIPS PUB 186-1.
static const uint8_t fips_sig[] = {
0x30, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10,
0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92,
@@ -130,7 +130,7 @@ static const uint8_t fips_sig[] = {
0xdc, 0xd8, 0xc8,
};

/* fips_sig_negative is fips_sig with r encoded as a negative number. */
// fips_sig_negative is fips_sig with r encoded as a negative number.
static const uint8_t fips_sig_negative[] = {
0x30, 0x2c, 0x02, 0x14, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43,
0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92, 0xb3,
@@ -139,7 +139,7 @@ static const uint8_t fips_sig_negative[] = {
0xd8, 0xc8,
};

/* fip_sig_extra is fips_sig with trailing data. */
// fip_sig_extra is fips_sig with trailing data.
static const uint8_t fips_sig_extra[] = {
0x30, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10,
0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92,
@@ -148,7 +148,7 @@ static const uint8_t fips_sig_extra[] = {
0xdc, 0xd8, 0xc8, 0x00,
};

/* fips_sig_lengths is fips_sig with a non-minimally encoded length. */
// fips_sig_lengths is fips_sig with a non-minimally encoded length.
static const uint8_t fips_sig_bad_length[] = {
0x30, 0x81, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64,
0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c,
@@ -157,7 +157,7 @@ static const uint8_t fips_sig_bad_length[] = {
0xb6, 0xdc, 0xd8, 0xc8, 0x00,
};

/* fips_sig_bad_r is fips_sig with a bad r value. */
// fips_sig_bad_r is fips_sig with a bad r value.
static const uint8_t fips_sig_bad_r[] = {
0x30, 0x2d, 0x02, 0x15, 0x00, 0x8c, 0xac, 0x1a, 0xb6, 0x64, 0x10,
0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92,
@@ -299,7 +299,7 @@ static bool TestVerify(const uint8_t *sig, size_t sig_len, int expect) {
return false;
}

/* Clear any errors from a test with expected failure. */
// Clear any errors from a test with expected failure.
ERR_clear_error();
return true;
}


+ 51
- 51
crypto/ec_extra/ec_asn1.c Parādīt failu

@@ -83,14 +83,14 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
return NULL;
}

/* Parse the optional parameters field. */
// Parse the optional parameters field.
EC_GROUP *inner_group = NULL;
EC_KEY *ret = NULL;
if (CBS_peek_asn1_tag(&ec_private_key, kParametersTag)) {
/* Per SEC 1, as an alternative to omitting it, one is allowed to specify
* this field and put in a NULL to mean inheriting this value. This was
* omitted in a previous version of this logic without problems, so leave it
* unimplemented. */
// Per SEC 1, as an alternative to omitting it, one is allowed to specify
// this field and put in a NULL to mean inheriting this value. This was
// omitted in a previous version of this logic without problems, so leave it
// unimplemented.
CBS child;
if (!CBS_get_asn1(&ec_private_key, &child, kParametersTag)) {
OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR);
@@ -103,7 +103,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
if (group == NULL) {
group = inner_group;
} else if (EC_GROUP_cmp(group, inner_group, NULL) != 0) {
/* If a group was supplied externally, it must match. */
// If a group was supplied externally, it must match.
OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH);
goto err;
}
@@ -123,9 +123,9 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
goto err;
}

/* Although RFC 5915 specifies the length of the key, OpenSSL historically
* got this wrong, so accept any length. See upstream's
* 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. */
// Although RFC 5915 specifies the length of the key, OpenSSL historically
// got this wrong, so accept any length. See upstream's
// 30cd4ff294252c4b6a4b69cbef6a5b4117705d22.
ret->priv_key =
BN_bin2bn(CBS_data(&private_key), CBS_len(&private_key), NULL);
ret->pub_key = EC_POINT_new(group);
@@ -143,12 +143,12 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
uint8_t padding;
if (!CBS_get_asn1(&ec_private_key, &child, kPublicKeyTag) ||
!CBS_get_asn1(&child, &public_key, CBS_ASN1_BITSTRING) ||
/* As in a SubjectPublicKeyInfo, the byte-encoded public key is then
* encoded as a BIT STRING with bits ordered as in the DER encoding. */
// As in a SubjectPublicKeyInfo, the byte-encoded public key is then
// encoded as a BIT STRING with bits ordered as in the DER encoding.
!CBS_get_u8(&public_key, &padding) ||
padding != 0 ||
/* Explicitly check |public_key| is non-empty to save the conversion
* form later. */
// Explicitly check |public_key| is non-empty to save the conversion
// form later.
CBS_len(&public_key) == 0 ||
!EC_POINT_oct2point(group, ret->pub_key, CBS_data(&public_key),
CBS_len(&public_key), NULL) ||
@@ -157,17 +157,17 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
goto err;
}

/* Save the point conversion form.
* TODO(davidben): Consider removing this. */
// Save the point conversion form.
// TODO(davidben): Consider removing this.
ret->conv_form =
(point_conversion_form_t)(CBS_data(&public_key)[0] & ~0x01);
} else {
/* Compute the public key instead. */
// Compute the public key instead.
if (!EC_POINT_mul(group, ret->pub_key, ret->priv_key, NULL, NULL, NULL)) {
goto err;
}
/* Remember the original private-key-only encoding.
* TODO(davidben): Consider removing this. */
// Remember the original private-key-only encoding.
// TODO(davidben): Consider removing this.
ret->enc_flag |= EC_PKEY_NO_PUBKEY;
}

@@ -176,7 +176,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) {
goto err;
}

/* Ensure the resulting key is valid. */
// Ensure the resulting key is valid.
if (!EC_KEY_check_key(ret)) {
goto err;
}
@@ -218,13 +218,13 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key,
}
}

/* TODO(fork): replace this flexibility with sensible default? */
// TODO(fork): replace this flexibility with sensible default?
if (!(enc_flags & EC_PKEY_NO_PUBKEY) && key->pub_key != NULL) {
CBB child, public_key;
if (!CBB_add_asn1(&ec_private_key, &child, kPublicKeyTag) ||
!CBB_add_asn1(&child, &public_key, CBS_ASN1_BITSTRING) ||
/* As in a SubjectPublicKeyInfo, the byte-encoded public key is then
* encoded as a BIT STRING with bits ordered as in the DER encoding. */
// As in a SubjectPublicKeyInfo, the byte-encoded public key is then
// encoded as a BIT STRING with bits ordered as in the DER encoding.
!CBB_add_u8(&public_key, 0 /* padding */) ||
!EC_POINT_point2cbb(&public_key, key->group, key->pub_key,
key->conv_form, NULL) ||
@@ -242,8 +242,8 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key,
return 1;
}

/* is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and
* zero otherwise. */
// is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and
// zero otherwise.
static int is_unsigned_integer(const CBS *cbs) {
if (CBS_len(cbs) == 0) {
return 0;
@@ -251,20 +251,20 @@ static int is_unsigned_integer(const CBS *cbs) {
uint8_t byte = CBS_data(cbs)[0];
if ((byte & 0x80) ||
(byte == 0 && CBS_len(cbs) > 1 && (CBS_data(cbs)[1] & 0x80) == 0)) {
/* Negative or not minimally-encoded. */
// Negative or not minimally-encoded.
return 0;
}
return 1;
}

/* kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. */
// kPrimeFieldOID is the encoding of 1.2.840.10045.1.1.
static const uint8_t kPrimeField[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x01};

static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a,
CBS *out_b, CBS *out_base_x,
CBS *out_base_y, CBS *out_order) {
/* See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an
* ECParameters while RFC 5480 calls it a SpecifiedECDomain. */
// See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an
// ECParameters while RFC 5480 calls it a SpecifiedECDomain.
CBS params, field_id, field_type, curve, base;
uint64_t version;
if (!CBS_get_asn1(in, &params, CBS_ASN1_SEQUENCE) ||
@@ -280,7 +280,7 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a,
!CBS_get_asn1(&params, &curve, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&curve, out_a, CBS_ASN1_OCTETSTRING) ||
!CBS_get_asn1(&curve, out_b, CBS_ASN1_OCTETSTRING) ||
/* |curve| has an optional BIT STRING seed which we ignore. */
// |curve| has an optional BIT STRING seed which we ignore.
!CBS_get_asn1(&params, &base, CBS_ASN1_OCTETSTRING) ||
!CBS_get_asn1(&params, out_order, CBS_ASN1_INTEGER) ||
!is_unsigned_integer(out_order)) {
@@ -288,11 +288,11 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a,
return 0;
}

/* |params| has an optional cofactor which we ignore. With the optional seed
* in |curve|, a group already has arbitrarily many encodings. Parse enough to
* uniquely determine the curve. */
// |params| has an optional cofactor which we ignore. With the optional seed
// in |curve|, a group already has arbitrarily many encodings. Parse enough to
// uniquely determine the curve.

/* Require that the base point use uncompressed form. */
// Require that the base point use uncompressed form.
uint8_t form;
if (!CBS_get_u8(&base, &form) || form != POINT_CONVERSION_UNCOMPRESSED) {
OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FORM);
@@ -310,10 +310,10 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a,
return 1;
}

/* integers_equal returns one if |a| and |b| are equal, up to leading zeros, and
* zero otherwise. */
// integers_equal returns one if |a| and |b| are equal, up to leading zeros, and
// zero otherwise.
static int integers_equal(const CBS *a, const uint8_t *b, size_t b_len) {
/* Remove leading zeros from |a| and |b|. */
// Remove leading zeros from |a| and |b|.
CBS a_copy = *a;
while (CBS_len(&a_copy) > 0 && CBS_data(&a_copy)[0] == 0) {
CBS_skip(&a_copy, 1);
@@ -332,7 +332,7 @@ EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs) {
return NULL;
}

/* Look for a matching curve. */
// Look for a matching curve.
const struct built_in_curves *const curves = OPENSSL_built_in_curves();
for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) {
const struct built_in_curve *curve = &curves->curves[i];
@@ -374,26 +374,26 @@ EC_GROUP *EC_KEY_parse_parameters(CBS *cbs) {
return EC_KEY_parse_curve_name(cbs);
}

/* OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions
* of named curves.
*
* TODO(davidben): Remove support for this. */
// OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions
// of named curves.
//
// TODO(davidben): Remove support for this.
CBS prime, a, b, base_x, base_y, order;
if (!parse_explicit_prime_curve(cbs, &prime, &a, &b, &base_x, &base_y,
&order)) {
return NULL;
}

/* Look for a matching prime curve. */
// Look for a matching prime curve.
const struct built_in_curves *const curves = OPENSSL_built_in_curves();
for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) {
const struct built_in_curve *curve = &curves->curves[i];
const unsigned param_len = curve->param_len;
/* |curve->params| is ordered p, a, b, x, y, order, each component
* zero-padded up to the field length. Although SEC 1 states that the
* Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes
* |a| and |b|, so this comparison must allow omitting leading zeros. (This
* is relevant for P-521 whose |b| has a leading 0.) */
// |curve->params| is ordered p, a, b, x, y, order, each component
// zero-padded up to the field length. Although SEC 1 states that the
// Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes
// |a| and |b|, so this comparison must allow omitting leading zeros. (This
// is relevant for P-521 whose |b| has a leading 0.)
if (integers_equal(&prime, curve->params, param_len) &&
integers_equal(&a, curve->params + param_len, param_len) &&
integers_equal(&b, curve->params + param_len * 2, param_len) &&
@@ -420,8 +420,8 @@ int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point,
}

EC_KEY *d2i_ECPrivateKey(EC_KEY **out, const uint8_t **inp, long len) {
/* This function treats its |out| parameter differently from other |d2i|
* functions. If supplied, take the group from |*out|. */
// This function treats its |out| parameter differently from other |d2i|
// functions. If supplied, take the group from |*out|.
const EC_GROUP *group = NULL;
if (out != NULL && *out != NULL) {
group = EC_KEY_get0_group(*out);
@@ -515,7 +515,7 @@ EC_KEY *o2i_ECPublicKey(EC_KEY **keyp, const uint8_t **inp, long len) {
OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB);
return NULL;
}
/* save the point conversion form */
// save the point conversion form
ret->conv_form = (point_conversion_form_t)(*inp[0] & ~0x01);
*inp += len;
return ret;
@@ -534,7 +534,7 @@ int i2o_ECPublicKey(const EC_KEY *key, uint8_t **outp) {
0, NULL);

if (outp == NULL || buf_len == 0) {
/* out == NULL => just return the length of the octet string */
// out == NULL => just return the length of the octet string
return buf_len;
}



+ 1
- 1
crypto/ecdh/ecdh.c Parādīt failu

@@ -138,7 +138,7 @@ int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key,
goto err;
}
} else {
/* no KDF, just copy as much as we can */
// no KDF, just copy as much as we can
if (buflen < outlen) {
outlen = buflen;
}


+ 9
- 9
crypto/ecdsa_extra/ecdsa_asn1.c Parādīt failu

@@ -120,17 +120,17 @@ int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len,
int ret = 0;
uint8_t *der = NULL;

/* Decode the ECDSA signature. */
// Decode the ECDSA signature.
s = ECDSA_SIG_from_bytes(sig, sig_len);
if (s == NULL) {
goto err;
}

/* Defend against potential laxness in the DER parser. */
// Defend against potential laxness in the DER parser.
size_t der_len;
if (!ECDSA_SIG_to_bytes(&der, &der_len, s) ||
der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len) != 0) {
/* This should never happen. crypto/bytestring is strictly DER. */
// This should never happen. crypto/bytestring is strictly DER.
OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR);
goto err;
}
@@ -219,8 +219,8 @@ int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len,
return 1;
}

/* der_len_len returns the number of bytes needed to represent a length of |len|
* in DER. */
// der_len_len returns the number of bytes needed to represent a length of |len|
// in DER.
static size_t der_len_len(size_t len) {
if (len < 0x80) {
return 1;
@@ -234,18 +234,18 @@ static size_t der_len_len(size_t len) {
}

size_t ECDSA_SIG_max_len(size_t order_len) {
/* Compute the maximum length of an |order_len| byte integer. Defensively
* assume that the leading 0x00 is included. */
// Compute the maximum length of an |order_len| byte integer. Defensively
// assume that the leading 0x00 is included.
size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len;
if (integer_len < order_len) {
return 0;
}
/* An ECDSA signature is two INTEGERs. */
// An ECDSA signature is two INTEGERs.
size_t value_len = 2 * integer_len;
if (value_len < integer_len) {
return 0;
}
/* Add the header. */
// Add the header.
size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len;
if (ret < value_len) {
return 0;


+ 6
- 6
crypto/engine/engine.c Parādīt failu

@@ -42,15 +42,15 @@ ENGINE *ENGINE_new(void) {
}

void ENGINE_free(ENGINE *engine) {
/* Methods are currently required to be static so are not unref'ed. */
// Methods are currently required to be static so are not unref'ed.
OPENSSL_free(engine);
}

/* set_method takes a pointer to a method and its given size and sets
* |*out_member| to point to it. This function might want to be extended in the
* future to support making a copy of the method so that a stable ABI for
* ENGINEs can be supported. But, for the moment, all *_METHODS must be
* static. */
// set_method takes a pointer to a method and its given size and sets
// |*out_member| to point to it. This function might want to be extended in the
// future to support making a copy of the method so that a stable ABI for
// ENGINEs can be supported. But, for the moment, all *_METHODS must be
// static.
static int set_method(void **out_member, const void *method, size_t method_size,
size_t compiled_size) {
const struct openssl_method_common_st *common = method;


+ 81
- 81
crypto/err/err.c Parādīt failu

@@ -129,7 +129,7 @@ extern const uint32_t kOpenSSLReasonValues[];
extern const size_t kOpenSSLReasonValuesLen;
extern const char kOpenSSLReasonStringData[];

/* err_clear_data frees the optional |data| member of the given error. */
// err_clear_data frees the optional |data| member of the given error.
static void err_clear_data(struct err_error_st *error) {
if ((error->flags & ERR_FLAG_MALLOCED) != 0) {
OPENSSL_free(error->data);
@@ -138,17 +138,17 @@ static void err_clear_data(struct err_error_st *error) {
error->flags &= ~ERR_FLAG_MALLOCED;
}

/* err_clear clears the given queued error. */
// err_clear clears the given queued error.
static void err_clear(struct err_error_st *error) {
err_clear_data(error);
OPENSSL_memset(error, 0, sizeof(struct err_error_st));
}

/* global_next_library contains the next custom library value to return. */
// global_next_library contains the next custom library value to return.
static int global_next_library = ERR_NUM_LIBS;

/* global_next_library_mutex protects |global_next_library| from concurrent
* updates. */
// global_next_library_mutex protects |global_next_library| from concurrent
// updates.
static struct CRYPTO_STATIC_MUTEX global_next_library_mutex =
CRYPTO_STATIC_MUTEX_INIT;

@@ -167,7 +167,7 @@ static void err_state_free(void *statep) {
OPENSSL_free(state);
}

/* err_get_state gets the ERR_STATE object for the current thread. */
// err_get_state gets the ERR_STATE object for the current thread.
static ERR_STATE *err_get_state(void) {
ERR_STATE *state = CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_ERR);
if (state == NULL) {
@@ -199,7 +199,7 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line,

if (top) {
assert(!inc);
/* last error */
// last error
i = state->top;
} else {
i = (state->bottom + 1) % ERR_NUM_ERRORS;
@@ -229,11 +229,11 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line,
if (flags != NULL) {
*flags = error->flags & ERR_FLAG_PUBLIC_MASK;
}
/* If this error is being removed, take ownership of data from
* the error. The semantics are such that the caller doesn't
* take ownership either. Instead the error system takes
* ownership and retains it until the next call that affects the
* error queue. */
// If this error is being removed, take ownership of data from
// the error. The semantics are such that the caller doesn't
// take ownership either. Instead the error system takes
// ownership and retains it until the next call that affects the
// error queue.
if (inc) {
if (error->flags & ERR_FLAG_MALLOCED) {
OPENSSL_free(state->to_free);
@@ -342,13 +342,13 @@ char *ERR_error_string(uint32_t packed_error, char *ret) {
static char buf[ERR_ERROR_STRING_BUF_LEN];

if (ret == NULL) {
/* TODO(fork): remove this. */
// TODO(fork): remove this.
ret = buf;
}

#if !defined(NDEBUG)
/* This is aimed to help catch callers who don't provide
* |ERR_ERROR_STRING_BUF_LEN| bytes of space. */
// This is aimed to help catch callers who don't provide
// |ERR_ERROR_STRING_BUF_LEN| bytes of space.
OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN);
#endif

@@ -386,15 +386,15 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) {
packed_error, lib_str, reason_str);

if (strlen(buf) == len - 1) {
/* output may be truncated; make sure we always have 5 colon-separated
* fields, i.e. 4 colons. */
// output may be truncated; make sure we always have 5 colon-separated
// fields, i.e. 4 colons.
static const unsigned num_colons = 4;
unsigned i;
char *s = buf;

if (len <= num_colons) {
/* In this situation it's not possible to ensure that the correct number
* of colons are included in the output. */
// In this situation it's not possible to ensure that the correct number
// of colons are included in the output.
return;
}

@@ -403,10 +403,10 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) {
char *last_pos = &buf[len - 1] - num_colons + i;

if (colon == NULL || colon > last_pos) {
/* set colon |i| at last possible position (buf[len-1] is the
* terminating 0). If we're setting this colon, then all whole of the
* rest of the string must be colons in order to have the correct
* number. */
// set colon |i| at last possible position (buf[len-1] is the
// terminating 0). If we're setting this colon, then all whole of the
// rest of the string must be colons in order to have the correct
// number.
OPENSSL_memset(last_pos, ':', num_colons - i);
break;
}
@@ -431,25 +431,25 @@ static int err_string_cmp(const void *a, const void *b) {
}
}

/* err_string_lookup looks up the string associated with |lib| and |key| in
* |values| and |string_data|. It returns the string or NULL if not found. */
// err_string_lookup looks up the string associated with |lib| and |key| in
// |values| and |string_data|. It returns the string or NULL if not found.
static const char *err_string_lookup(uint32_t lib, uint32_t key,
const uint32_t *values,
size_t num_values,
const char *string_data) {
/* |values| points to data in err_data.h, which is generated by
* err_data_generate.go. It's an array of uint32_t values. Each value has the
* following structure:
* | lib | key | offset |
* |6 bits| 11 bits | 15 bits |
*
* The |lib| value is a library identifier: one of the |ERR_LIB_*| values.
* The |key| is a reason code, depending on the context.
* The |offset| is the number of bytes from the start of |string_data| where
* the (NUL terminated) string for this value can be found.
*
* Values are sorted based on treating the |lib| and |key| part as an
* unsigned integer. */
// |values| points to data in err_data.h, which is generated by
// err_data_generate.go. It's an array of uint32_t values. Each value has the
// following structure:
// | lib | key | offset |
// |6 bits| 11 bits | 15 bits |
//
// The |lib| value is a library identifier: one of the |ERR_LIB_*| values.
// The |key| is a reason code, depending on the context.
// The |offset| is the number of bytes from the start of |string_data| where
// the (NUL terminated) string for this value can be found.
//
// Values are sorted based on treating the |lib| and |key| part as an
// unsigned integer.
if (lib >= (1 << 6) || key >= (1 << 11)) {
return NULL;
}
@@ -465,38 +465,38 @@ static const char *err_string_lookup(uint32_t lib, uint32_t key,

static const char *const kLibraryNames[ERR_NUM_LIBS] = {
"invalid library (0)",
"unknown library", /* ERR_LIB_NONE */
"system library", /* ERR_LIB_SYS */
"bignum routines", /* ERR_LIB_BN */
"RSA routines", /* ERR_LIB_RSA */
"Diffie-Hellman routines", /* ERR_LIB_DH */
"public key routines", /* ERR_LIB_EVP */
"memory buffer routines", /* ERR_LIB_BUF */
"object identifier routines", /* ERR_LIB_OBJ */
"PEM routines", /* ERR_LIB_PEM */
"DSA routines", /* ERR_LIB_DSA */
"X.509 certificate routines", /* ERR_LIB_X509 */
"ASN.1 encoding routines", /* ERR_LIB_ASN1 */
"configuration file routines", /* ERR_LIB_CONF */
"common libcrypto routines", /* ERR_LIB_CRYPTO */
"elliptic curve routines", /* ERR_LIB_EC */
"SSL routines", /* ERR_LIB_SSL */
"BIO routines", /* ERR_LIB_BIO */
"PKCS7 routines", /* ERR_LIB_PKCS7 */
"PKCS8 routines", /* ERR_LIB_PKCS8 */
"X509 V3 routines", /* ERR_LIB_X509V3 */
"random number generator", /* ERR_LIB_RAND */
"ENGINE routines", /* ERR_LIB_ENGINE */
"OCSP routines", /* ERR_LIB_OCSP */
"UI routines", /* ERR_LIB_UI */
"COMP routines", /* ERR_LIB_COMP */
"ECDSA routines", /* ERR_LIB_ECDSA */
"ECDH routines", /* ERR_LIB_ECDH */
"HMAC routines", /* ERR_LIB_HMAC */
"Digest functions", /* ERR_LIB_DIGEST */
"Cipher functions", /* ERR_LIB_CIPHER */
"HKDF functions", /* ERR_LIB_HKDF */
"User defined functions", /* ERR_LIB_USER */
"unknown library", // ERR_LIB_NONE
"system library", // ERR_LIB_SYS
"bignum routines", // ERR_LIB_BN
"RSA routines", // ERR_LIB_RSA
"Diffie-Hellman routines", // ERR_LIB_DH
"public key routines", // ERR_LIB_EVP
"memory buffer routines", // ERR_LIB_BUF
"object identifier routines", // ERR_LIB_OBJ
"PEM routines", // ERR_LIB_PEM
"DSA routines", // ERR_LIB_DSA
"X.509 certificate routines", // ERR_LIB_X509
"ASN.1 encoding routines", // ERR_LIB_ASN1
"configuration file routines", // ERR_LIB_CONF
"common libcrypto routines", // ERR_LIB_CRYPTO
"elliptic curve routines", // ERR_LIB_EC
"SSL routines", // ERR_LIB_SSL
"BIO routines", // ERR_LIB_BIO
"PKCS7 routines", // ERR_LIB_PKCS7
"PKCS8 routines", // ERR_LIB_PKCS8
"X509 V3 routines", // ERR_LIB_X509V3
"random number generator", // ERR_LIB_RAND
"ENGINE routines", // ERR_LIB_ENGINE
"OCSP routines", // ERR_LIB_OCSP
"UI routines", // ERR_LIB_UI
"COMP routines", // ERR_LIB_COMP
"ECDSA routines", // ERR_LIB_ECDSA
"ECDH routines", // ERR_LIB_ECDH
"HMAC routines", // ERR_LIB_HMAC
"Digest functions", // ERR_LIB_DIGEST
"Cipher functions", // ERR_LIB_CIPHER
"HKDF functions", // ERR_LIB_HKDF
"User defined functions", // ERR_LIB_USER
};

const char *ERR_lib_error_string(uint32_t packed_error) {
@@ -555,8 +555,8 @@ void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) {
int line, flags;
uint32_t packed_error;

/* thread_hash is the least-significant bits of the |ERR_STATE| pointer value
* for this thread. */
// thread_hash is the least-significant bits of the |ERR_STATE| pointer value
// for this thread.
const unsigned long thread_hash = (uintptr_t) err_get_state();

for (;;) {
@@ -585,8 +585,8 @@ void ERR_print_errors_fp(FILE *file) {
ERR_print_errors_cb(print_errors_to_file, file);
}

/* err_set_error_data sets the data on the most recent error. The |flags|
* argument is a combination of the |ERR_FLAG_*| values. */
// err_set_error_data sets the data on the most recent error. The |flags|
// argument is a combination of the |ERR_FLAG_*| values.
static void err_set_error_data(char *data, int flags) {
ERR_STATE *const state = err_get_state();
struct err_error_st *error;
@@ -634,9 +634,9 @@ void ERR_put_error(int library, int unused, int reason, const char *file,
error->packed = ERR_PACK(library, reason);
}

/* ERR_add_error_data_vdata takes a variable number of const char* pointers,
* concatenates them and sets the result as the data on the most recent
* error. */
// ERR_add_error_data_vdata takes a variable number of const char* pointers,
// concatenates them and sets the result as the data on the most recent
// error.
static void err_add_error_vdata(unsigned num, va_list args) {
size_t alloced, new_len, len = 0, substr_len;
char *buf;
@@ -661,7 +661,7 @@ static void err_add_error_vdata(unsigned num, va_list args) {
char *new_buf;

if (alloced + 20 + 1 < alloced) {
/* overflow. */
// overflow.
OPENSSL_free(buf);
return;
}
@@ -695,9 +695,9 @@ void ERR_add_error_dataf(const char *format, ...) {
char *buf;
static const unsigned buf_len = 256;

/* A fixed-size buffer is used because va_copy (which would be needed in
* order to call vsnprintf twice and measure the buffer) wasn't defined until
* C99. */
// A fixed-size buffer is used because va_copy (which would be needed in
// order to call vsnprintf twice and measure the buffer) wasn't defined until
// C99.
buf = OPENSSL_malloc(buf_len + 1);
if (buf == NULL) {
return;


+ 3
- 3
crypto/err/err_test.cc Parādīt failu

@@ -30,9 +30,9 @@ TEST(ErrTest, Overflow) {
for (unsigned i = 0; i < ERR_NUM_ERRORS - 1; i++) {
SCOPED_TRACE(i);
uint32_t err = ERR_get_error();
/* Errors are returned in order they were pushed, with the least recent ones
* removed, up to |ERR_NUM_ERRORS - 1| errors. So the errors returned are
* |ERR_NUM_ERRORS + 2| through |ERR_NUM_ERRORS * 2|, inclusive. */
// Errors are returned in order they were pushed, with the least recent ones
// removed, up to |ERR_NUM_ERRORS - 1| errors. So the errors returned are
// |ERR_NUM_ERRORS + 2| through |ERR_NUM_ERRORS * 2|, inclusive.
EXPECT_NE(0u, err);
EXPECT_EQ(static_cast<int>(i + ERR_NUM_ERRORS + 2), ERR_GET_REASON(err));
}


+ 2
- 2
crypto/evp/digestsign.c Parādīt failu

@@ -196,8 +196,8 @@ int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig,
int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len,
const uint8_t *data, size_t data_len) {
if (uses_prehash(ctx, evp_sign)) {
/* If |out_sig| is NULL, the caller is only querying the maximum output
* length. |data| should only be incorporated in the final call. */
// If |out_sig| is NULL, the caller is only querying the maximum output
// length. |data| should only be incorporated in the final call.
if (out_sig != NULL &&
!EVP_DigestSignUpdate(ctx, data, data_len)) {
return 0;


+ 4
- 4
crypto/evp/evp.c Parādīt failu

@@ -127,7 +127,7 @@ int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b) {

if (a->ameth) {
int ret;
/* Compare parameters if the algorithm has them */
// Compare parameters if the algorithm has them
if (a->ameth->param_cmp) {
ret = a->ameth->param_cmp(a, b);
if (ret <= 0) {
@@ -187,9 +187,9 @@ int EVP_PKEY_id(const EVP_PKEY *pkey) {
return pkey->type;
}

/* evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which
* should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is
* unknown. */
// evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which
// should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is
// unknown.
static const EVP_PKEY_ASN1_METHOD *evp_pkey_asn1_find(int nid) {
switch (nid) {
case EVP_PKEY_RSA:


+ 15
- 15
crypto/evp/evp_asn1.c Parādīt failu

@@ -94,7 +94,7 @@ static int parse_key_type(CBS *cbs, int *out_type) {
}

EVP_PKEY *EVP_parse_public_key(CBS *cbs) {
/* Parse the SubjectPublicKeyInfo. */
// Parse the SubjectPublicKeyInfo.
CBS spki, algorithm, key;
int type;
uint8_t padding;
@@ -103,22 +103,22 @@ EVP_PKEY *EVP_parse_public_key(CBS *cbs) {
!parse_key_type(&algorithm, &type) ||
!CBS_get_asn1(&spki, &key, CBS_ASN1_BITSTRING) ||
CBS_len(&spki) != 0 ||
/* Every key type defined encodes the key as a byte string with the same
* conversion to BIT STRING. */
// Every key type defined encodes the key as a byte string with the same
// conversion to BIT STRING.
!CBS_get_u8(&key, &padding) ||
padding != 0) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
return NULL;
}

/* Set up an |EVP_PKEY| of the appropriate type. */
// Set up an |EVP_PKEY| of the appropriate type.
EVP_PKEY *ret = EVP_PKEY_new();
if (ret == NULL ||
!EVP_PKEY_set_type(ret, type)) {
goto err;
}

/* Call into the type-specific SPKI decoding function. */
// Call into the type-specific SPKI decoding function.
if (ret->ameth->pub_decode == NULL) {
OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM);
goto err;
@@ -144,7 +144,7 @@ int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key) {
}

EVP_PKEY *EVP_parse_private_key(CBS *cbs) {
/* Parse the PrivateKeyInfo. */
// Parse the PrivateKeyInfo.
CBS pkcs8, algorithm, key;
uint64_t version;
int type;
@@ -158,16 +158,16 @@ EVP_PKEY *EVP_parse_private_key(CBS *cbs) {
return NULL;
}

/* A PrivateKeyInfo ends with a SET of Attributes which we ignore. */
// A PrivateKeyInfo ends with a SET of Attributes which we ignore.

/* Set up an |EVP_PKEY| of the appropriate type. */
// Set up an |EVP_PKEY| of the appropriate type.
EVP_PKEY *ret = EVP_PKEY_new();
if (ret == NULL ||
!EVP_PKEY_set_type(ret, type)) {
goto err;
}

/* Call into the type-specific PrivateKeyInfo decoding function. */
// Call into the type-specific PrivateKeyInfo decoding function.
if (ret->ameth->priv_decode == NULL) {
OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM);
goto err;
@@ -240,12 +240,12 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp,
return NULL;
}

/* Parse with the legacy format. */
// Parse with the legacy format.
CBS cbs;
CBS_init(&cbs, *inp, (size_t)len);
EVP_PKEY *ret = old_priv_decode(&cbs, type);
if (ret == NULL) {
/* Try again with PKCS#8. */
// Try again with PKCS#8.
ERR_clear_error();
CBS_init(&cbs, *inp, (size_t)len);
ret = EVP_parse_private_key(&cbs);
@@ -267,8 +267,8 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp,
return ret;
}

/* num_elements parses one SEQUENCE from |in| and returns the number of elements
* in it. On parse error, it returns zero. */
// num_elements parses one SEQUENCE from |in| and returns the number of elements
// in it. On parse error, it returns zero.
static size_t num_elements(const uint8_t *in, size_t in_len) {
CBS cbs, sequence;
CBS_init(&cbs, in, (size_t)in_len);
@@ -295,7 +295,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) {
return NULL;
}

/* Parse the input as a PKCS#8 PrivateKeyInfo. */
// Parse the input as a PKCS#8 PrivateKeyInfo.
CBS cbs;
CBS_init(&cbs, *inp, (size_t)len);
EVP_PKEY *ret = EVP_parse_private_key(&cbs);
@@ -309,7 +309,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) {
}
ERR_clear_error();

/* Count the elements to determine the legacy key format. */
// Count the elements to determine the legacy key format.
switch (num_elements(*inp, (size_t)len)) {
case 4:
return d2i_PrivateKey(EVP_PKEY_EC, out, inp, len);


+ 5
- 5
crypto/evp/evp_ctx.c Parādīt failu

@@ -369,11 +369,11 @@ int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer) {
return 0;
}

/* ran@cryptocom.ru: For clarity. The error is if parameters in peer are
* present (!missing) but don't match. EVP_PKEY_cmp_parameters may return
* 1 (match), 0 (don't match) and -2 (comparison is not defined). -1
* (different key types) is impossible here because it is checked earlier.
* -2 is OK for us here, as well as 1, so we can check for 0 only. */
// ran@cryptocom.ru: For clarity. The error is if parameters in peer are
// present (!missing) but don't match. EVP_PKEY_cmp_parameters may return
// 1 (match), 0 (don't match) and -2 (comparison is not defined). -1
// (different key types) is impossible here because it is checked earlier.
// -2 is OK for us here, as well as 1, so we can check for 0 only.
if (!EVP_PKEY_missing_parameters(peer) &&
!EVP_PKEY_cmp_parameters(ctx->pkey, peer)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_PARAMETERS);


+ 46
- 46
crypto/evp/internal.h Parādīt failu

@@ -71,33 +71,33 @@ struct evp_pkey_asn1_method_st {
uint8_t oid[9];
uint8_t oid_len;

/* pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo
* and writes the result into |out|. It returns one on success and zero on
* error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER
* type field, and |key| is the contents of the subjectPublicKey with the
* leading padding byte checked and removed. Although X.509 uses BIT STRINGs
* to represent SubjectPublicKeyInfo, every key type defined encodes the key
* as a byte string with the same conversion to BIT STRING. */
// pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo
// and writes the result into |out|. It returns one on success and zero on
// error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER
// type field, and |key| is the contents of the subjectPublicKey with the
// leading padding byte checked and removed. Although X.509 uses BIT STRINGs
// to represent SubjectPublicKeyInfo, every key type defined encodes the key
// as a byte string with the same conversion to BIT STRING.
int (*pub_decode)(EVP_PKEY *out, CBS *params, CBS *key);

/* pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result
* to |out|. It returns one on success and zero on error. */
// pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result
// to |out|. It returns one on success and zero on error.
int (*pub_encode)(CBB *out, const EVP_PKEY *key);

int (*pub_cmp)(const EVP_PKEY *a, const EVP_PKEY *b);

/* priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the
* result into |out|. It returns one on success and zero on error. |params| is
* the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key|
* is the contents of the OCTET STRING privateKey field. */
// priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the
// result into |out|. It returns one on success and zero on error. |params| is
// the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key|
// is the contents of the OCTET STRING privateKey field.
int (*priv_decode)(EVP_PKEY *out, CBS *params, CBS *key);

/* priv_encode encodes |key| as a PrivateKeyInfo and appends the result to
* |out|. It returns one on success and zero on error. */
// priv_encode encodes |key| as a PrivateKeyInfo and appends the result to
// |out|. It returns one on success and zero on error.
int (*priv_encode)(CBB *out, const EVP_PKEY *key);

/* pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by
* custom implementations which do not expose key material and parameters.*/
// pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by
// custom implementations which do not expose key material and parameters.
int (*pkey_opaque)(const EVP_PKEY *pk);

int (*pkey_size)(const EVP_PKEY *pk);
@@ -130,33 +130,33 @@ struct evp_pkey_asn1_method_st {

#define EVP_PKEY_OP_TYPE_GEN EVP_PKEY_OP_KEYGEN

/* EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype|
* arguments can be -1 to specify that any type and operation are acceptable,
* otherwise |keytype| must match the type of |ctx| and the bits of |optype|
* must intersect the operation flags set on |ctx|.
*
* The |p1| and |p2| arguments depend on the value of |cmd|.
*
* It returns one on success and zero on error. */
// EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype|
// arguments can be -1 to specify that any type and operation are acceptable,
// otherwise |keytype| must match the type of |ctx| and the bits of |optype|
// must intersect the operation flags set on |ctx|.
//
// The |p1| and |p2| arguments depend on the value of |cmd|.
//
// It returns one on success and zero on error.
OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype,
int cmd, int p1, void *p2);

#define EVP_PKEY_CTRL_MD 1
#define EVP_PKEY_CTRL_GET_MD 2

/* EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|:
* 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key.
* If the return value is <= 0, the key is rejected.
* 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a
* peer key. If the return value is <= 0, the key is rejected.
* 2: Is called with |p2| == NULL to test whether the peer's key was used.
* (EC)DH always return one in this case.
* 3: Is called with |p2| == NULL to set whether the peer's key was used.
* (EC)DH always return one in this case. This was only used for GOST. */
// EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|:
// 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key.
// If the return value is <= 0, the key is rejected.
// 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a
// peer key. If the return value is <= 0, the key is rejected.
// 2: Is called with |p2| == NULL to test whether the peer's key was used.
// (EC)DH always return one in this case.
// 3: Is called with |p2| == NULL to set whether the peer's key was used.
// (EC)DH always return one in this case. This was only used for GOST.
#define EVP_PKEY_CTRL_PEER_KEY 3

/* EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl
* commands are numbered. */
// EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl
// commands are numbered.
#define EVP_PKEY_ALG_CTRL 0x1000

#define EVP_PKEY_CTRL_RSA_PADDING (EVP_PKEY_ALG_CTRL + 1)
@@ -173,17 +173,17 @@ OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype,
#define EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 12)

struct evp_pkey_ctx_st {
/* Method associated with this operation */
// Method associated with this operation
const EVP_PKEY_METHOD *pmeth;
/* Engine that implements this method or NULL if builtin */
// Engine that implements this method or NULL if builtin
ENGINE *engine;
/* Key: may be NULL */
// Key: may be NULL
EVP_PKEY *pkey;
/* Peer key for key agreement, may be NULL */
// Peer key for key agreement, may be NULL
EVP_PKEY *peerkey;
/* operation contains one of the |EVP_PKEY_OP_*| values. */
// operation contains one of the |EVP_PKEY_OP_*| values.
int operation;
/* Algorithm specific data */
// Algorithm specific data
void *data;
} /* EVP_PKEY_CTX */;

@@ -226,8 +226,8 @@ typedef struct {
union {
uint8_t priv[64];
struct {
/* Shift the location of the public key to align with where it is in the
* private key representation. */
// Shift the location of the public key to align with where it is in the
// private key representation.
uint8_t pad[32];
uint8_t value[32];
} pub;
@@ -246,7 +246,7 @@ extern const EVP_PKEY_METHOD ed25519_pkey_meth;


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_EVP_INTERNAL_H */
#endif // OPENSSL_HEADER_EVP_INTERNAL_H

+ 9
- 9
crypto/evp/p_dsa_asn1.c Parādīt failu

@@ -65,9 +65,9 @@


static int dsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See RFC 3279, section 2.3.2. */
// See RFC 3279, section 2.3.2.

/* Parameters may or may not be present. */
// Parameters may or may not be present.
DSA *dsa;
if (CBS_len(params) == 0) {
dsa = DSA_new();
@@ -105,7 +105,7 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) {
const DSA *dsa = key->pkey.dsa;
const int has_params = dsa->p != NULL && dsa->q != NULL && dsa->g != NULL;

/* See RFC 5480, section 2. */
// See RFC 5480, section 2.
CBB spki, algorithm, oid, key_bitstring;
if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) ||
@@ -125,9 +125,9 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) {
}

static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See PKCS#11, v2.40, section 2.5. */
// See PKCS#11, v2.40, section 2.5.

/* Decode parameters. */
// Decode parameters.
BN_CTX *ctx = NULL;
DSA *dsa = DSA_parse_parameters(params);
if (dsa == NULL || CBS_len(params) != 0) {
@@ -141,14 +141,14 @@ static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
goto err;
}

/* Decode the key. */
// Decode the key.
if (!BN_parse_asn1_unsigned(key, dsa->priv_key) ||
CBS_len(key) != 0) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
goto err;
}

/* Calculate the public key. */
// Calculate the public key.
ctx = BN_CTX_new();
if (ctx == NULL ||
!BN_mod_exp_mont(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, ctx,
@@ -173,7 +173,7 @@ static int dsa_priv_encode(CBB *out, const EVP_PKEY *key) {
return 0;
}

/* See PKCS#11, v2.40, section 2.5. */
// See PKCS#11, v2.40, section 2.5.
CBB pkcs8, algorithm, oid, private_key;
if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1_uint64(&pkcs8, 0 /* version */) ||
@@ -245,7 +245,7 @@ static void int_dsa_free(EVP_PKEY *pkey) { DSA_free(pkey->pkey.dsa); }

const EVP_PKEY_ASN1_METHOD dsa_asn1_meth = {
EVP_PKEY_DSA,
/* 1.2.840.10040.4.1 */
// 1.2.840.10040.4.1
{0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x01}, 7,

dsa_pub_decode,


+ 4
- 4
crypto/evp/p_ec.c Parādīt failu

@@ -74,7 +74,7 @@


typedef struct {
/* message digest */
// message digest
const EVP_MD *md;
} EC_PKEY_CTX;

@@ -161,8 +161,8 @@ static int pkey_ec_derive(EVP_PKEY_CTX *ctx, uint8_t *key,
}
pubkey = EC_KEY_get0_public_key(ctx->peerkey->pkey.ec);

/* NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is
* not an error, the result is truncated. */
// NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is
// not an error, the result is truncated.

outlen = *keylen;

@@ -196,7 +196,7 @@ static int pkey_ec_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) {
return 1;

case EVP_PKEY_CTRL_PEER_KEY:
/* Default behaviour is OK */
// Default behaviour is OK
return 1;

default:


+ 11
- 11
crypto/evp/p_ec_asn1.c Parādīt failu

@@ -70,7 +70,7 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) {
const EC_GROUP *group = EC_KEY_get0_group(ec_key);
const EC_POINT *public_key = EC_KEY_get0_public_key(ec_key);

/* See RFC 5480, section 2. */
// See RFC 5480, section 2.
CBB spki, algorithm, oid, key_bitstring;
if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) ||
@@ -90,9 +90,9 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) {
}

static int eckey_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See RFC 5480, section 2. */
// See RFC 5480, section 2.

/* The parameters are a named curve. */
// The parameters are a named curve.
EC_POINT *point = NULL;
EC_KEY *eckey = NULL;
EC_GROUP *group = EC_KEY_parse_curve_name(params);
@@ -141,7 +141,7 @@ static int eckey_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) {
}

static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See RFC 5915. */
// See RFC 5915.
EC_GROUP *group = EC_KEY_parse_parameters(params);
if (group == NULL || CBS_len(params) != 0) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
@@ -164,13 +164,13 @@ static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
static int eckey_priv_encode(CBB *out, const EVP_PKEY *key) {
const EC_KEY *ec_key = key->pkey.ec;

/* Omit the redundant copy of the curve name. This contradicts RFC 5915 but
* aligns with PKCS #11. SEC 1 only says they may be omitted if known by other
* means. Both OpenSSL and NSS omit the redundant parameters, so we omit them
* as well. */
// Omit the redundant copy of the curve name. This contradicts RFC 5915 but
// aligns with PKCS #11. SEC 1 only says they may be omitted if known by other
// means. Both OpenSSL and NSS omit the redundant parameters, so we omit them
// as well.
unsigned enc_flags = EC_KEY_get_enc_flags(ec_key) | EC_PKEY_NO_PARAMETERS;

/* See RFC 5915. */
// See RFC 5915.
CBB pkcs8, algorithm, oid, private_key;
if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1_uint64(&pkcs8, 0 /* version */) ||
@@ -219,7 +219,7 @@ static int ec_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) {
const EC_GROUP *group_a = EC_KEY_get0_group(a->pkey.ec),
*group_b = EC_KEY_get0_group(b->pkey.ec);
if (EC_GROUP_cmp(group_a, group_b, NULL) != 0) {
/* mismatch */
// mismatch
return 0;
}
return 1;
@@ -233,7 +233,7 @@ static int eckey_opaque(const EVP_PKEY *pkey) {

const EVP_PKEY_ASN1_METHOD ec_asn1_meth = {
EVP_PKEY_EC,
/* 1.2.840.10045.2.1 */
// 1.2.840.10045.2.1
{0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01}, 7,

eckey_pub_decode,


+ 1
- 1
crypto/evp/p_ed25519.c Parādīt failu

@@ -20,7 +20,7 @@
#include "internal.h"


/* Ed25519 has no parameters to copy. */
// Ed25519 has no parameters to copy.
static int pkey_ed25519_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { return 1; }

static int pkey_ed25519_sign_message(EVP_PKEY_CTX *ctx, uint8_t *sig,


+ 11
- 11
crypto/evp/p_ed25519_asn1.c Parādīt failu

@@ -61,9 +61,9 @@ static int set_privkey(EVP_PKEY *pkey, const uint8_t privkey[64]) {
}

static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See draft-ietf-curdle-pkix-04, section 4. */
// See draft-ietf-curdle-pkix-04, section 4.

/* The parameters must be omitted. Public keys have length 32. */
// The parameters must be omitted. Public keys have length 32.
if (CBS_len(params) != 0 ||
CBS_len(key) != 32) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
@@ -76,7 +76,7 @@ static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
static int ed25519_pub_encode(CBB *out, const EVP_PKEY *pkey) {
const ED25519_KEY *key = pkey->pkey.ptr;

/* See draft-ietf-curdle-pkix-04, section 4. */
// See draft-ietf-curdle-pkix-04, section 4.
CBB spki, algorithm, oid, key_bitstring;
if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) ||
@@ -100,10 +100,10 @@ static int ed25519_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) {
}

static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* See draft-ietf-curdle-pkix-04, section 7. */
// See draft-ietf-curdle-pkix-04, section 7.

/* Parameters must be empty. The key is a 32-byte value wrapped in an extra
* OCTET STRING layer. */
// Parameters must be empty. The key is a 32-byte value wrapped in an extra
// OCTET STRING layer.
CBS inner;
if (CBS_len(params) != 0 ||
!CBS_get_asn1(key, &inner, CBS_ASN1_OCTETSTRING) ||
@@ -113,8 +113,8 @@ static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
return 0;
}

/* The PKCS#8 encoding stores only the 32-byte seed, so we must recover the
* full representation which we use from it. */
// The PKCS#8 encoding stores only the 32-byte seed, so we must recover the
// full representation which we use from it.
uint8_t pubkey[32], privkey[64];
ED25519_keypair_from_seed(pubkey, privkey, CBS_data(&inner));
return set_privkey(out, privkey);
@@ -127,7 +127,7 @@ static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) {
return 0;
}

/* See draft-ietf-curdle-pkix-04, section 7. */
// See draft-ietf-curdle-pkix-04, section 7.
CBB pkcs8, algorithm, oid, private_key, inner;
if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1_uint64(&pkcs8, 0 /* version */) ||
@@ -136,8 +136,8 @@ static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) {
!CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) ||
!CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) ||
!CBB_add_asn1(&private_key, &inner, CBS_ASN1_OCTETSTRING) ||
/* The PKCS#8 encoding stores only the 32-byte seed which is the first 32
* bytes of the private key. */
// The PKCS#8 encoding stores only the 32-byte seed which is the first 32
// bytes of the private key.
!CBB_add_bytes(&inner, key->key.priv, 32) ||
!CBB_flush(out)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR);


+ 10
- 10
crypto/evp/p_rsa.c Parādīt failu

@@ -73,21 +73,21 @@


typedef struct {
/* Key gen parameters */
// Key gen parameters
int nbits;
BIGNUM *pub_exp;
/* RSA padding mode */
// RSA padding mode
int pad_mode;
/* message digest */
// message digest
const EVP_MD *md;
/* message digest for MGF1 */
// message digest for MGF1
const EVP_MD *mgf1md;
/* PSS salt length */
// PSS salt length
int saltlen;
/* tbuf is a buffer which is either NULL, or is the size of the RSA modulus.
* It's used to store the output of RSA operations. */
// tbuf is a buffer which is either NULL, or is the size of the RSA modulus.
// It's used to store the output of RSA operations.
uint8_t *tbuf;
/* OAEP label */
// OAEP label
uint8_t *oaep_label;
size_t oaep_labellen;
} RSA_PKEY_CTX;
@@ -260,7 +260,7 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out,
return 0;
}

/* Assemble the encoded hash, using a placeholder hash value. */
// Assemble the encoded hash, using a placeholder hash value.
static const uint8_t kDummyHash[EVP_MAX_MD_SIZE] = {0};
const size_t hash_len = EVP_MD_size(rctx->md);
uint8_t *asn1_prefix;
@@ -278,7 +278,7 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out,
if (!RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, sig_len,
RSA_PKCS1_PADDING) ||
rslen != asn1_prefix_len ||
/* Compare all but the hash suffix. */
// Compare all but the hash suffix.
CRYPTO_memcmp(rctx->tbuf, asn1_prefix, asn1_prefix_len - hash_len) != 0) {
ok = 0;
}


+ 11
- 11
crypto/evp/p_rsa_asn1.c Parādīt failu

@@ -77,7 +77,7 @@ void EVP_set_buggy_rsa_parser(int buggy) {
}

static int rsa_pub_encode(CBB *out, const EVP_PKEY *key) {
/* See RFC 3279, section 2.3.1. */
// See RFC 3279, section 2.3.1.
CBB spki, algorithm, oid, null, key_bitstring;
if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) ||
@@ -101,9 +101,9 @@ static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
buggy = g_buggy;
CRYPTO_STATIC_MUTEX_unlock_read(&g_buggy_lock);

/* See RFC 3279, section 2.3.1. */
// See RFC 3279, section 2.3.1.

/* The parameters must be NULL. */
// The parameters must be NULL.
CBS null;
if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) ||
CBS_len(&null) != 0 ||
@@ -112,12 +112,12 @@ static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) {
return 0;
}

/* Estonian IDs issued between September 2014 to September 2015 are
* broken. See https://crbug.com/532048 and https://crbug.com/534766.
*
* TODO(davidben): Switch this to the strict version in March 2016 or when
* Chromium can force client certificates down a different codepath, whichever
* comes first. */
// Estonian IDs issued between September 2014 to September 2015 are
// broken. See https://crbug.com/532048 and https://crbug.com/534766.
//
// TODO(davidben): Switch this to the strict version in March 2016 or when
// Chromium can force client certificates down a different codepath, whichever
// comes first.
RSA *rsa = buggy ? RSA_parse_public_key_buggy(key) : RSA_parse_public_key(key);
if (rsa == NULL || CBS_len(key) != 0) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
@@ -153,7 +153,7 @@ static int rsa_priv_encode(CBB *out, const EVP_PKEY *key) {
}

static int rsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) {
/* Per RFC 3447, A.1, the parameters have type NULL. */
// Per RFC 3447, A.1, the parameters have type NULL.
CBS null;
if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) ||
CBS_len(&null) != 0 ||
@@ -189,7 +189,7 @@ static void int_rsa_free(EVP_PKEY *pkey) { RSA_free(pkey->pkey.rsa); }

const EVP_PKEY_ASN1_METHOD rsa_asn1_meth = {
EVP_PKEY_RSA,
/* 1.2.840.113549.1.1.1 */
// 1.2.840.113549.1.1.1
{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01}, 9,

rsa_pub_decode,


+ 14
- 14
crypto/evp/pbkdf.c Parādīt failu

@@ -65,7 +65,7 @@
int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len,
const uint8_t *salt, size_t salt_len, unsigned iterations,
const EVP_MD *digest, size_t key_len, uint8_t *out_key) {
/* See RFC 8018, section 5.2. */
// See RFC 8018, section 5.2.
int ret = 0;
size_t md_len = EVP_MD_size(digest);
uint32_t i = 1;
@@ -88,7 +88,7 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len,
i_buf[2] = (uint8_t)((i >> 8) & 0xff);
i_buf[3] = (uint8_t)(i & 0xff);

/* Compute U_1. */
// Compute U_1.
uint8_t digest_tmp[EVP_MAX_MD_SIZE];
if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) ||
!HMAC_Update(&hctx, salt, salt_len) ||
@@ -99,7 +99,7 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len,

OPENSSL_memcpy(out_key, digest_tmp, todo);
for (unsigned j = 1; j < iterations; j++) {
/* Compute the remaining U_* values and XOR. */
// Compute the remaining U_* values and XOR.
if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) ||
!HMAC_Update(&hctx, digest_tmp, md_len) ||
!HMAC_Final(&hctx, digest_tmp, NULL)) {
@@ -115,17 +115,17 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len,
i++;
}

/* RFC 8018 describes iterations (c) as being a "positive integer", so a
* value of 0 is an error.
*
* Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return
* value, expecting it to succeed and unconditionally using |out_key|. As a
* precaution for such callsites in external code, the old behavior of
* iterations < 1 being treated as iterations == 1 is preserved, but
* additionally an error result is returned.
*
* TODO(eroman): Figure out how to remove this compatibility hack, or change
* the default to something more sensible like 2048. */
// RFC 8018 describes iterations (c) as being a "positive integer", so a
// value of 0 is an error.
//
// Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return
// value, expecting it to succeed and unconditionally using |out_key|. As a
// precaution for such callsites in external code, the old behavior of
// iterations < 1 being treated as iterations == 1 is preserved, but
// additionally an error result is returned.
//
// TODO(eroman): Figure out how to remove this compatibility hack, or change
// the default to something more sensible like 2048.
if (iterations == 0) {
goto err;
}


+ 4
- 4
crypto/evp/print.c Parādīt failu

@@ -131,7 +131,7 @@ static void update_buflen(const BIGNUM *b, size_t *pbuflen) {
}
}

/* RSA keys. */
// RSA keys.

static int do_rsa_print(BIO *out, const RSA *rsa, int off,
int include_private) {
@@ -212,7 +212,7 @@ static int rsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent,
}


/* DSA keys. */
// DSA keys.

static int do_dsa_print(BIO *bp, const DSA *x, int off, int ptype) {
uint8_t *m = NULL;
@@ -288,7 +288,7 @@ static int dsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent,
}


/* EC keys. */
// EC keys.

static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) {
uint8_t *buffer = NULL;
@@ -379,7 +379,7 @@ static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) {
if (pub_key_bytes != NULL) {
BIO_hexdump(bp, pub_key_bytes, pub_key_bytes_len, off);
}
/* TODO(fork): implement */
// TODO(fork): implement
/*
if (!ECPKParameters_print(bp, group, off))
goto err; */


+ 41
- 41
crypto/evp/scrypt.c Parādīt failu

@@ -18,25 +18,25 @@
#include "../internal.h"


/* This file implements scrypt, described in RFC 7914.
*
* Note scrypt refers to both "blocks" and a "block size" parameter, r. These
* are two different notions of blocks. A Salsa20 block is 64 bytes long,
* represented in this implementation by 16 |uint32_t|s. |r| determines the
* number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r|
* Salsa20 blocks. This implementation refers to them as Salsa20 blocks and
* scrypt blocks, respectively. */
/* A block_t is a Salsa20 block. */
// This file implements scrypt, described in RFC 7914.
//
// Note scrypt refers to both "blocks" and a "block size" parameter, r. These
// are two different notions of blocks. A Salsa20 block is 64 bytes long,
// represented in this implementation by 16 |uint32_t|s. |r| determines the
// number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r|
// Salsa20 blocks. This implementation refers to them as Salsa20 blocks and
// scrypt blocks, respectively.
// A block_t is a Salsa20 block.
typedef struct { uint32_t words[16]; } block_t;

OPENSSL_COMPILE_ASSERT(sizeof(block_t) == 64, block_t_has_padding);

#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))

/* salsa208_word_specification implements the Salsa20/8 core function, also
* described in RFC 7914, section 3. It modifies the block at |inout|
* in-place. */
// salsa208_word_specification implements the Salsa20/8 core function, also
// described in RFC 7914, section 3. It modifies the block at |inout|
// in-place.
static void salsa208_word_specification(block_t *inout) {
block_t x;
OPENSSL_memcpy(&x, inout, sizeof(x));
@@ -81,16 +81,16 @@ static void salsa208_word_specification(block_t *inout) {
}
}

/* xor_block sets |*out| to be |*a| XOR |*b|. */
// xor_block sets |*out| to be |*a| XOR |*b|.
static void xor_block(block_t *out, const block_t *a, const block_t *b) {
for (size_t i = 0; i < 16; i++) {
out->words[i] = a->words[i] ^ b->words[i];
}
}

/* scryptBlockMix implements the function described in RFC 7914, section 4. B'
* is written to |out|. |out| and |B| may not alias and must be each one scrypt
* block (2 * |r| Salsa20 blocks) long. */
// scryptBlockMix implements the function described in RFC 7914, section 4. B'
// is written to |out|. |out| and |B| may not alias and must be each one scrypt
// block (2 * |r| Salsa20 blocks) long.
static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) {
assert(out != B);

@@ -100,19 +100,19 @@ static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) {
xor_block(&X, &X, &B[i]);
salsa208_word_specification(&X);

/* This implements the permutation in step 3. */
// This implements the permutation in step 3.
OPENSSL_memcpy(&out[i / 2 + (i & 1) * r], &X, sizeof(X));
}
}

/* scryptROMix implements the function described in RFC 7914, section 5. |B| is
* an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and
* |V| are scratch space allocated by the caller. |T| must have space for one
* scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt
* blocks (2 * |r| * |N| Salsa20 blocks). */
// scryptROMix implements the function described in RFC 7914, section 5. |B| is
// an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and
// |V| are scratch space allocated by the caller. |T| must have space for one
// scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt
// blocks (2 * |r| * |N| Salsa20 blocks).
static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T,
block_t *V) {
/* Steps 1 and 2. */
// Steps 1 and 2.
OPENSSL_memcpy(V, B, 2 * r * sizeof(block_t));
for (uint64_t i = 1; i < N; i++) {
scryptBlockMix(&V[2 * r * i /* scrypt block i */],
@@ -120,9 +120,9 @@ static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T,
}
scryptBlockMix(B, &V[2 * r * (N - 1) /* scrypt block N-1 */], r);

/* Step 3. */
// Step 3.
for (uint64_t i = 0; i < N; i++) {
/* Note this assumes |N| <= 2^32 and is a power of 2. */
// Note this assumes |N| <= 2^32 and is a power of 2.
uint32_t j = B[2 * r - 1].words[0] & (N - 1);
for (size_t k = 0; k < 2 * r; k++) {
xor_block(&T[k], &B[k], &V[2 * r * j + k]);
@@ -131,16 +131,16 @@ static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T,
}
}

/* SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the
* bounds on p in section 6:
*
* p <= ((2^32-1) * hLen) / MFLen iff
* p <= ((2^32-1) * 32) / (128 * r) iff
* p * r <= (2^30-1) */
// SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the
// bounds on p in section 6:
//
// p <= ((2^32-1) * hLen) / MFLen iff
// p <= ((2^32-1) * 32) / (128 * r) iff
// p * r <= (2^30-1)
#define SCRYPT_PR_MAX ((1 << 30) - 1)

/* SCRYPT_MAX_MEM is the default maximum memory that may be allocated by
* |EVP_PBE_scrypt|. */
// SCRYPT_MAX_MEM is the default maximum memory that may be allocated by
// |EVP_PBE_scrypt|.
#define SCRYPT_MAX_MEM (1024 * 1024 * 32)

int EVP_PBE_scrypt(const char *password, size_t password_len,
@@ -148,18 +148,18 @@ int EVP_PBE_scrypt(const char *password, size_t password_len,
uint64_t p, size_t max_mem, uint8_t *out_key,
size_t key_len) {
if (r == 0 || p == 0 || p > SCRYPT_PR_MAX / r ||
/* |N| must be a power of two. */
// |N| must be a power of two.
N < 2 || (N & (N - 1)) ||
/* We only support |N| <= 2^32 in |scryptROMix|. */
// We only support |N| <= 2^32 in |scryptROMix|.
N > UINT64_C(1) << 32 ||
/* Check that |N| < 2^(128×r / 8). */
// Check that |N| < 2^(128×r / 8).
(16 * r <= 63 && N >= UINT64_C(1) << (16 * r))) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PARAMETERS);
return 0;
}

/* Determine the amount of memory needed. B, T, and V are |p|, 1, and |N|
* scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s. */
// Determine the amount of memory needed. B, T, and V are |p|, 1, and |N|
// scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s.
if (max_mem == 0) {
max_mem = SCRYPT_MAX_MEM;
}
@@ -171,8 +171,8 @@ int EVP_PBE_scrypt(const char *password, size_t password_len,
return 0;
}

/* Allocate and divide up the scratch space. |max_mem| fits in a size_t, which
* is no bigger than uint64_t, so none of these operations may overflow. */
// Allocate and divide up the scratch space. |max_mem| fits in a size_t, which
// is no bigger than uint64_t, so none of these operations may overflow.
OPENSSL_COMPILE_ASSERT(UINT64_MAX >= ((size_t)-1), size_t_exceeds_u64);
size_t B_blocks = p * 2 * r;
size_t B_bytes = B_blocks * sizeof(block_t);


+ 12
- 12
crypto/ex_data.c Parādīt failu

@@ -124,8 +124,8 @@
DEFINE_STACK_OF(CRYPTO_EX_DATA_FUNCS)

struct crypto_ex_data_func_st {
long argl; /* Arbitary long */
void *argp; /* Arbitary void pointer */
long argl; // Arbitary long
void *argp; // Arbitary void pointer
CRYPTO_EX_free *free_func;
};

@@ -179,7 +179,7 @@ int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val) {

n = sk_void_num(ad->sk);

/* Add NULL values until the stack is long enough. */
// Add NULL values until the stack is long enough.
for (i = n; i <= index; i++) {
if (!sk_void_push(ad->sk, NULL)) {
OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE);
@@ -198,19 +198,19 @@ void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx) {
return sk_void_value(ad->sk, idx);
}

/* get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any,
* for the given class. If there are some pointers, it sets |*out| to point to
* a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on
* success or zero on error. */
// get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any,
// for the given class. If there are some pointers, it sets |*out| to point to
// a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on
// success or zero on error.
static int get_func_pointers(STACK_OF(CRYPTO_EX_DATA_FUNCS) **out,
CRYPTO_EX_DATA_CLASS *ex_data_class) {
size_t n;

*out = NULL;

/* CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a
* shallow copy of the list under lock and then use the structures without
* the lock held. */
// CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a
// shallow copy of the list under lock and then use the structures without
// the lock held.
CRYPTO_STATIC_MUTEX_lock_read(&ex_data_class->lock);
n = sk_CRYPTO_EX_DATA_FUNCS_num(ex_data_class->meth);
if (n > 0) {
@@ -233,13 +233,13 @@ void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad) {
void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj,
CRYPTO_EX_DATA *ad) {
if (ad->sk == NULL) {
/* Nothing to do. */
// Nothing to do.
return;
}

STACK_OF(CRYPTO_EX_DATA_FUNCS) *func_pointers;
if (!get_func_pointers(&func_pointers, ex_data_class)) {
/* TODO(davidben): This leaks memory on malloc error. */
// TODO(davidben): This leaks memory on malloc error.
return;
}



+ 61
- 65
crypto/fipsmodule/aes/aes.c Parādīt failu

@@ -59,16 +59,16 @@
#if defined(OPENSSL_NO_ASM) || \
(!defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) && !defined(OPENSSL_ARM))

/* Te0[x] = S [x].[02, 01, 01, 03];
* Te1[x] = S [x].[03, 02, 01, 01];
* Te2[x] = S [x].[01, 03, 02, 01];
* Te3[x] = S [x].[01, 01, 03, 02];
*
* Td0[x] = Si[x].[0e, 09, 0d, 0b];
* Td1[x] = Si[x].[0b, 0e, 09, 0d];
* Td2[x] = Si[x].[0d, 0b, 0e, 09];
* Td3[x] = Si[x].[09, 0d, 0b, 0e];
* Td4[x] = Si[x].[01]; */
// Te0[x] = S [x].[02, 01, 01, 03];
// Te1[x] = S [x].[03, 02, 01, 01];
// Te2[x] = S [x].[01, 03, 02, 01];
// Te3[x] = S [x].[01, 01, 03, 02];
//
// Td0[x] = Si[x].[0e, 09, 0d, 0b];
// Td1[x] = Si[x].[0b, 0e, 09, 0d];
// Td2[x] = Si[x].[0d, 0b, 0e, 09];
// Td3[x] = Si[x].[09, 0d, 0b, 0e];
// Td4[x] = Si[x].[01];

static const uint32_t Te0[256] = {
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU,
@@ -531,7 +531,7 @@ static const uint8_t Td4[256] = {
static const uint32_t rcon[] = {
0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000,
0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000,
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
// for 128-bit blocks, Rijndael never uses more than 10 rcon values
};

int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
@@ -634,7 +634,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
int i, j, status;
uint32_t temp;

/* first, start with an encryption schedule */
// first, start with an encryption schedule
status = AES_set_encrypt_key(key, bits, aeskey);
if (status < 0) {
return status;
@@ -642,7 +642,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {

rk = aeskey->rd_key;

/* invert the order of the round keys: */
// invert the order of the round keys:
for (i = 0, j = 4 * aeskey->rounds; i < j; i += 4, j -= 4) {
temp = rk[i];
rk[i] = rk[j];
@@ -657,8 +657,8 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
rk[i + 3] = rk[j + 3];
rk[j + 3] = temp;
}
/* apply the inverse MixColumn transform to all round keys but the first and
* the last: */
// apply the inverse MixColumn transform to all round keys but the first and
// the last:
for (i = 1; i < (int)aeskey->rounds; i++) {
rk += 4;
rk[0] =
@@ -682,19 +682,19 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
#endif /* ?FULL_UNROLL */
#endif // ?FULL_UNROLL

assert(in && out && key);
rk = key->rd_key;

/* map byte array block to cipher state
* and add initial round key: */
// map byte array block to cipher state
// and add initial round key:
s0 = GETU32(in) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
/* round 1: */
// round 1:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[4];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -703,7 +703,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[s1 & 0xff] ^ rk[6];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[7];
/* round 2: */
// round 2:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[8];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -712,7 +712,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[10];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[11];
/* round 3: */
// round 3:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[12];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -721,7 +721,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[s1 & 0xff] ^ rk[14];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[15];
/* round 4: */
// round 4:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[16];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -730,7 +730,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[18];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[19];
/* round 5: */
// round 5:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[20];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -739,7 +739,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[s1 & 0xff] ^ rk[22];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[23];
/* round 6: */
// round 6:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[24];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -748,7 +748,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[26];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[27];
/* round 7: */
// round 7:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[28];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -757,7 +757,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[s1 & 0xff] ^ rk[30];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[31];
/* round 8: */
// round 8:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[32];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -766,7 +766,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[34];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[35];
/* round 9: */
// round 9:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[36];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -776,7 +776,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[39];
if (key->rounds > 10) {
/* round 10: */
// round 10:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[40];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -785,7 +785,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[42];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[43];
/* round 11: */
// round 11:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[44];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -795,7 +795,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[47];
if (key->rounds > 12) {
/* round 12: */
// round 12:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[48];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -804,7 +804,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Te3[t1 & 0xff] ^ rk[50];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[51];
/* round 13: */
// round 13:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[52];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -816,10 +816,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
}
}
rk += key->rounds << 2;
#else /* !FULL_UNROLL */
/*
* Nr - 1 full rounds:
*/
#else // !FULL_UNROLL
// Nr - 1 full rounds:
r = key->rounds >> 1;
for (;;) {
t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
@@ -845,8 +843,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[(t2) & 0xff] ^ rk[3];
}
#endif /* ?FULL_UNROLL */
/* apply last round and map cipher state to byte array block: */
#endif // ?FULL_UNROLL
// apply last round and map cipher state to byte array block:
s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^
rk[0];
@@ -870,19 +868,19 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
#endif /* ?FULL_UNROLL */
#endif // ?FULL_UNROLL

assert(in && out && key);
rk = key->rd_key;

/* map byte array block to cipher state
* and add initial round key: */
// map byte array block to cipher state
// and add initial round key:
s0 = GETU32(in) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
/* round 1: */
// round 1:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[4];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -891,7 +889,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[s3 & 0xff] ^ rk[6];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[7];
/* round 2: */
// round 2:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[8];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -900,7 +898,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[10];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[11];
/* round 3: */
// round 3:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[12];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -909,7 +907,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[s3 & 0xff] ^ rk[14];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[15];
/* round 4: */
// round 4:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[16];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -918,7 +916,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[18];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[19];
/* round 5: */
// round 5:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[20];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -927,7 +925,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[s3 & 0xff] ^ rk[22];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[23];
/* round 6: */
// round 6:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[24];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -936,7 +934,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[26];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[27];
/* round 7: */
// round 7:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[28];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -945,7 +943,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[s3 & 0xff] ^ rk[30];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[31];
/* round 8: */
// round 8:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[32];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -954,7 +952,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[34];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[35];
/* round 9: */
// round 9:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[36];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -964,7 +962,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[39];
if (key->rounds > 10) {
/* round 10: */
// round 10:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[40];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -973,7 +971,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[42];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[43];
/* round 11: */
// round 11:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[44];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -983,7 +981,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[47];
if (key->rounds > 12) {
/* round 12: */
// round 12:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[48];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -992,7 +990,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
Td3[t3 & 0xff] ^ rk[50];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[51];
/* round 13: */
// round 13:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[52];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -1004,10 +1002,8 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
}
}
rk += key->rounds << 2;
#else /* !FULL_UNROLL */
/*
* Nr - 1 full rounds:
*/
#else // !FULL_UNROLL
// Nr - 1 full rounds:
r = key->rounds >> 1;
for (;;) {
t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
@@ -1033,9 +1029,9 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[(t0) & 0xff] ^ rk[3];
}
#endif /* ?FULL_UNROLL */
/* apply last round and
* map cipher state to byte array block: */
#endif // ?FULL_UNROLL
// apply last round and
// map cipher state to byte array block:
s0 = ((uint32_t)Td4[(t0 >> 24)] << 24) ^
((uint32_t)Td4[(t3 >> 16) & 0xff] << 16) ^
((uint32_t)Td4[(t2 >> 8) & 0xff] << 8) ^
@@ -1060,10 +1056,10 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {

#else

/* In this case several functions are provided by asm code. However, one cannot
* control asm symbol visibility with command line flags and such so they are
* always hidden and wrapped by these C functions, which can be so
* controlled. */
// In this case several functions are provided by asm code. However, one cannot
// control asm symbol visibility with command line flags and such so they are
// always hidden and wrapped by these C functions, which can be so
// controlled.

void asm_AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
@@ -1101,4 +1097,4 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
}
}

#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) */
#endif // OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM)

+ 7
- 7
crypto/fipsmodule/aes/internal.h Parādīt failu

@@ -30,7 +30,7 @@ extern "C" {
static int hwaes_capable(void) {
return CRYPTO_is_ARMv8_AES_capable();
}
#endif /* !NO_ASM && (AES || AARCH64) */
#endif // !NO_ASM && (AES || AARCH64)

#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE)
#define HWAES
@@ -38,7 +38,7 @@ static int hwaes_capable(void) {
static int hwaes_capable(void) {
return CRYPTO_is_PPC64LE_vcrypto_capable();
}
#endif /* !NO_ASM && PPC64LE */
#endif // !NO_ASM && PPC64LE


#if defined(HWAES)
@@ -56,8 +56,8 @@ void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,

#else

/* If HWAES isn't defined then we provide dummy functions for each of the hwaes
* functions. */
// If HWAES isn't defined then we provide dummy functions for each of the hwaes
// functions.
static int hwaes_capable(void) { return 0; }

static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits,
@@ -91,10 +91,10 @@ static void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
abort();
}

#endif /* !HWAES */
#endif // !HWAES

#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_AES_INTERNAL_H */
#endif // OPENSSL_HEADER_AES_INTERNAL_H

+ 3
- 3
crypto/fipsmodule/aes/key_wrap.c Parādīt failu

@@ -56,7 +56,7 @@
#include "../../internal.h"


/* kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. */
// kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1.
static const uint8_t kDefaultIV[] = {
0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6,
};
@@ -65,7 +65,7 @@ static const unsigned kBound = 6;

int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
const uint8_t *in, size_t in_len) {
/* See RFC 3394, section 2.2.1. */
// See RFC 3394, section 2.2.1.

if (in_len > INT_MAX - 8 || in_len < 8 || in_len % 8 != 0) {
return -1;
@@ -101,7 +101,7 @@ int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,

int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
const uint8_t *in, size_t in_len) {
/* See RFC 3394, section 2.2.2. */
// See RFC 3394, section 2.2.2.

if (in_len > INT_MAX || in_len < 16 || in_len % 8 != 0) {
return -1;


+ 1
- 1
crypto/fipsmodule/aes/mode_wrappers.c Parādīt failu

@@ -92,7 +92,7 @@ void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len,
asm_AES_cbc_encrypt(in, out, len, key, ivec, enc);
}

#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */
#endif // OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86)

void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t *ivec, int *num) {


+ 23
- 23
crypto/fipsmodule/bcm.c Parādīt failu

@@ -13,7 +13,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */

#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE /* needed for syscall() on Linux. */
#define _GNU_SOURCE // needed for syscall() on Linux.
#endif

#include <openssl/aead.h>
@@ -145,7 +145,7 @@ static RSA *self_test_rsa_key(void) {
0xa7, 0x10, 0x93, 0x43, 0x53, 0x4e, 0xe3, 0x16, 0x73, 0x55, 0xce, 0xf2,
0x94, 0xc0, 0xbe, 0xb3,
};
static const uint8_t kE[] = {0x01, 0x00, 0x01}; /* 65537 */
static const uint8_t kE[] = {0x01, 0x00, 0x01}; // 65537
static const uint8_t kD[] = {
0x2f, 0x2c, 0x1e, 0xd2, 0x3d, 0x2c, 0xb1, 0x9b, 0x21, 0x02, 0xce, 0xb8,
0x95, 0x5f, 0x4f, 0xd9, 0x21, 0x38, 0x11, 0x36, 0xb0, 0x9a, 0x36, 0xab,
@@ -288,8 +288,8 @@ static EC_KEY *self_test_ecdsa_key(void) {
}

#if !defined(OPENSSL_ASAN)
/* These symbols are filled in by delocate.go. They point to the start and end
* of the module, and the location of the integrity hash, respectively. */
// These symbols are filled in by delocate.go. They point to the start and end
// of the module, and the location of the integrity hash, respectively.
extern const uint8_t BORINGSSL_bcm_text_start[];
extern const uint8_t BORINGSSL_bcm_text_end[];
extern const uint8_t BORINGSSL_bcm_text_hash[];
@@ -300,8 +300,8 @@ BORINGSSL_bcm_power_on_self_test(void) {
CRYPTO_library_init();

#if !defined(OPENSSL_ASAN)
/* Integrity tests cannot run under ASAN because it involves reading the full
* .text section, which triggers the global-buffer overflow detection. */
// Integrity tests cannot run under ASAN because it involves reading the full
// .text section, which triggers the global-buffer overflow detection.
const uint8_t *const start = BORINGSSL_bcm_text_start;
const uint8_t *const end = BORINGSSL_bcm_text_end;

@@ -478,7 +478,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
uint8_t aes_iv[16];
uint8_t output[256];

/* AES-CBC Encryption KAT */
// AES-CBC Encryption KAT
memcpy(aes_iv, kAESIV, sizeof(kAESIV));
if (AES_set_encrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) {
goto err;
@@ -490,7 +490,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* AES-CBC Decryption KAT */
// AES-CBC Decryption KAT
memcpy(aes_iv, kAESIV, sizeof(kAESIV));
if (AES_set_decrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) {
goto err;
@@ -511,7 +511,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* AES-GCM Encryption KAT */
// AES-GCM Encryption KAT
if (!EVP_AEAD_CTX_seal(&aead_ctx, output, &out_len, sizeof(output), nonce,
EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()),
kPlaintext, sizeof(kPlaintext), NULL, 0) ||
@@ -520,7 +520,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* AES-GCM Decryption KAT */
// AES-GCM Decryption KAT
if (!EVP_AEAD_CTX_open(&aead_ctx, output, &out_len, sizeof(output), nonce,
EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()),
kAESGCMCiphertext, sizeof(kAESGCMCiphertext), NULL,
@@ -538,7 +538,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
DES_set_key(&kDESKey2, &des2);
DES_set_key(&kDESKey3, &des3);

/* 3DES Encryption KAT */
// 3DES Encryption KAT
memcpy(&des_iv, &kDESIV, sizeof(des_iv));
DES_ede3_cbc_encrypt(kPlaintext, output, sizeof(kPlaintext), &des1, &des2,
&des3, &des_iv, DES_ENCRYPT);
@@ -547,7 +547,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* 3DES Decryption KAT */
// 3DES Decryption KAT
memcpy(&des_iv, &kDESIV, sizeof(des_iv));
DES_ede3_cbc_encrypt(kDESCiphertext, output, sizeof(kDESCiphertext), &des1,
&des2, &des3, &des_iv, DES_DECRYPT);
@@ -556,21 +556,21 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* SHA-1 KAT */
// SHA-1 KAT
SHA1(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA1, output, sizeof(kPlaintextSHA1),
"SHA-1 KAT")) {
goto err;
}

/* SHA-256 KAT */
// SHA-256 KAT
SHA256(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA256, output, sizeof(kPlaintextSHA256),
"SHA-256 KAT")) {
goto err;
}

/* SHA-512 KAT */
// SHA-512 KAT
SHA512(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA512, output, sizeof(kPlaintextSHA512),
"SHA-512 KAT")) {
@@ -583,11 +583,11 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* RSA Sign KAT */
// RSA Sign KAT
unsigned sig_len;

/* Disable blinding for the power-on tests because it's not needed and
* triggers an entropy draw. */
// Disable blinding for the power-on tests because it's not needed and
// triggers an entropy draw.
rsa_key->flags |= RSA_FLAG_NO_BLINDING;

if (!RSA_sign(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), output,
@@ -597,7 +597,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* RSA Verify KAT */
// RSA Verify KAT
if (!RSA_verify(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256),
kRSASignature, sizeof(kRSASignature), rsa_key)) {
printf("RSA Verify KAT failed.\n");
@@ -612,9 +612,9 @@ BORINGSSL_bcm_power_on_self_test(void) {
goto err;
}

/* ECDSA Sign/Verify PWCT */
// ECDSA Sign/Verify PWCT

/* The 'k' value for ECDSA is fixed to avoid an entropy draw. */
// The 'k' value for ECDSA is fixed to avoid an entropy draw.
ec_key->fixed_k = BN_new();
if (ec_key->fixed_k == NULL ||
!BN_set_word(ec_key->fixed_k, 42)) {
@@ -641,7 +641,7 @@ BORINGSSL_bcm_power_on_self_test(void) {
ECDSA_SIG_free(sig);
EC_KEY_free(ec_key);

/* DBRG KAT */
// DBRG KAT
CTR_DRBG_STATE drbg;
if (!CTR_DRBG_init(&drbg, kDRBGEntropy, kDRBGPersonalization,
sizeof(kDRBGPersonalization)) ||
@@ -676,4 +676,4 @@ void BORINGSSL_FIPS_abort(void) {
exit(1);
}
}
#endif /* BORINGSSL_FIPS */
#endif // BORINGSSL_FIPS

+ 22
- 24
crypto/fipsmodule/bn/add.c Parādīt failu

@@ -68,20 +68,19 @@ int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
const BIGNUM *tmp;
int a_neg = a->neg, ret;

/* a + b a+b
* a + -b a-b
* -a + b b-a
* -a + -b -(a+b)
*/
// a + b a+b
// a + -b a-b
// -a + b b-a
// -a + -b -(a+b)
if (a_neg ^ b->neg) {
/* only one is negative */
// only one is negative
if (a_neg) {
tmp = a;
a = b;
b = tmp;
}

/* we are now a - b */
// we are now a - b
if (BN_ucmp(a, b) < 0) {
if (!BN_usub(r, b, a)) {
return 0;
@@ -142,7 +141,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
}
}
if (carry) {
/* carry != 0 => dif == 0 */
// carry != 0 => dif == 0
*rp = 1;
r->top++;
}
@@ -150,7 +149,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {

if (dif && rp != ap) {
while (dif--) {
/* copy remaining words if ap != rp */
// copy remaining words if ap != rp
*(rp++) = *(ap++);
}
}
@@ -165,17 +164,17 @@ int BN_add_word(BIGNUM *a, BN_ULONG w) {

w &= BN_MASK2;

/* degenerate case: w is zero */
// degenerate case: w is zero
if (!w) {
return 1;
}

/* degenerate case: a is zero */
// degenerate case: a is zero
if (BN_is_zero(a)) {
return BN_set_word(a, w);
}

/* handle 'a' when negative */
// handle 'a' when negative
if (a->neg) {
a->neg = 0;
i = BN_sub_word(a, w);
@@ -206,11 +205,10 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
int add = 0, neg = 0;
const BIGNUM *tmp;

/* a - b a-b
* a - -b a+b
* -a - b -(a+b)
* -a - -b b-a
*/
// a - b a-b
// a - -b a+b
// -a - b -(a+b)
// -a - -b b-a
if (a->neg) {
if (b->neg) {
tmp = a;
@@ -236,7 +234,7 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
return 1;
}

/* We are actually doing a - b :-) */
// We are actually doing a - b :-)

max = (a->top > b->top) ? a->top : b->top;
if (!bn_wexpand(r, max)) {
@@ -267,7 +265,7 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
min = b->top;
dif = max - min;

if (dif < 0) /* hmm... should not be happening */
if (dif < 0) // hmm... should not be happening
{
OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3);
return 0;
@@ -295,10 +293,10 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) {
*(rp++) = t1 & BN_MASK2;
}

if (carry) /* subtracted */
if (carry) // subtracted
{
if (!dif) {
/* error: a < b */
// error: a < b
return 0;
}

@@ -329,12 +327,12 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) {

w &= BN_MASK2;

/* degenerate case: w is zero */
// degenerate case: w is zero
if (!w) {
return 1;
}

/* degenerate case: a is zero */
// degenerate case: a is zero
if (BN_is_zero(a)) {
i = BN_set_word(a, w);
if (i != 0) {
@@ -343,7 +341,7 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) {
return i;
}

/* handle 'a' when negative */
// handle 'a' when negative
if (a->neg) {
a->neg = 0;
i = BN_add_word(a, w);


+ 13
- 16
crypto/fipsmodule/bn/asm/x86_64-gcc.c Parādīt failu

@@ -52,7 +52,7 @@

#include <openssl/bn.h>

/* TODO(davidben): Get this file working on Windows x64. */
// TODO(davidben): Get this file working on Windows x64.
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__GNUC__)

#include "../internal.h"
@@ -63,11 +63,9 @@

#define asm __asm__

/*
* "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
* "g"(0) let the compiler to decide where does it
* want to keep the value of zero;
*/
// "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
// "g"(0) let the compiler to decide where does it
// want to keep the value of zero;
#define mul_add(r, a, word, carry) \
do { \
register BN_ULONG high, low; \
@@ -197,7 +195,7 @@ BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
}

asm volatile (
" subq %0,%0 \n" /* clear carry */
" subq %0,%0 \n" // clear carry
" jmp 1f \n"
".p2align 4 \n"
"1:"
@@ -224,7 +222,7 @@ BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
}

asm volatile (
" subq %0,%0 \n" /* clear borrow */
" subq %0,%0 \n" // clear borrow
" jmp 1f \n"
".p2align 4 \n"
"1:"
@@ -241,14 +239,13 @@ BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
return ret & 1;
}

/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)
*/
// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0)
// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0)
// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0)
// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)

/* Keep in mind that carrying into high part of multiplication result can not
* overflow, because it cannot be all-ones. */
// Keep in mind that carrying into high part of multiplication result can not
// overflow, because it cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG t1, t2; \
@@ -539,4 +536,4 @@ void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) {
#undef mul_add_c2
#undef sqr_add_c2

#endif /* !NO_ASM && X86_64 && __GNUC__ */
#endif // !NO_ASM && X86_64 && __GNUC__

+ 3
- 3
crypto/fipsmodule/bn/bn.c Parādīt failu

@@ -175,8 +175,8 @@ DEFINE_METHOD_FUNCTION(BIGNUM, BN_value_one) {
out->flags = BN_FLG_STATIC_DATA;
}

/* BN_num_bits_word returns the minimum number of bits needed to represent the
* value in |l|. */
// BN_num_bits_word returns the minimum number of bits needed to represent the
// value in |l|.
unsigned BN_num_bits_word(BN_ULONG l) {
static const unsigned char bits[256] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -290,7 +290,7 @@ int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num) {
return 0;
}
OPENSSL_memmove(bn->d, words, num * sizeof(BN_ULONG));
/* |bn_wexpand| verified that |num| isn't too large. */
// |bn_wexpand| verified that |num| isn't too large.
bn->top = (int)num;
bn_correct_top(bn);
bn->neg = 0;


+ 3
- 3
crypto/fipsmodule/bn/bn_test.cc Parādīt failu

@@ -67,9 +67,9 @@
* Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems
* Laboratories. */

/* Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are
* unavailable in C++ unless some macros are defined. C++11 overruled this
* decision, but older Android NDKs still require it. */
// Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are
// unavailable in C++ unless some macros are defined. C++11 overruled this
// decision, but older Android NDKs still require it.
#if !defined(__STDC_CONSTANT_MACROS)
#define __STDC_CONSTANT_MACROS
#endif


+ 33
- 33
crypto/fipsmodule/bn/bytes.c Parādīt failu

@@ -90,8 +90,8 @@ BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) {
return NULL;
}

/* |bn_wexpand| must check bounds on |num_words| to write it into
* |ret->dmax|. */
// |bn_wexpand| must check bounds on |num_words| to write it into
// |ret->dmax|.
assert(num_words <= INT_MAX);
ret->top = (int)num_words;
ret->neg = 0;
@@ -105,8 +105,8 @@ BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) {
}
}

/* need to call this due to clear byte at top if avoiding having the top bit
* set (-ve number) */
// need to call this due to clear byte at top if avoiding having the top bit
// set (-ve number)
bn_correct_top(ret);
return ret;
}
@@ -128,7 +128,7 @@ BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) {
return ret;
}

/* Reserve enough space in |ret|. */
// Reserve enough space in |ret|.
size_t num_words = ((len - 1) / BN_BYTES) + 1;
if (!bn_wexpand(ret, num_words)) {
BN_free(bn);
@@ -136,11 +136,11 @@ BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) {
}
ret->top = num_words;

/* Make sure the top bytes will be zeroed. */
// Make sure the top bytes will be zeroed.
ret->d[num_words - 1] = 0;

/* We only support little-endian platforms, so we can simply memcpy the
* internal representation. */
// We only support little-endian platforms, so we can simply memcpy the
// internal representation.
OPENSSL_memcpy(ret->d, in, len);

bn_correct_top(ret);
@@ -160,24 +160,24 @@ size_t BN_bn2bin(const BIGNUM *in, uint8_t *out) {
}

int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) {
/* If we don't have enough space, fail out. */
// If we don't have enough space, fail out.
size_t num_bytes = BN_num_bytes(in);
if (len < num_bytes) {
return 0;
}

/* We only support little-endian platforms, so we can simply memcpy into the
* internal representation. */
// We only support little-endian platforms, so we can simply memcpy into the
// internal representation.
OPENSSL_memcpy(out, in->d, num_bytes);

/* Pad out the rest of the buffer with zeroes. */
// Pad out the rest of the buffer with zeroes.
OPENSSL_memset(out + num_bytes, 0, len - num_bytes);

return 1;
}

/* constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its
* behavior is undefined if |v| takes any other value. */
// constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its
// behavior is undefined if |v| takes any other value.
static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) {
BN_ULONG mask = v;
mask--;
@@ -185,35 +185,35 @@ static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) {
return (~mask & x) | (mask & y);
}

/* constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y|
* must not have their MSBs set. */
// constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y|
// must not have their MSBs set.
static int constant_time_le_size_t(size_t x, size_t y) {
return ((x - y - 1) >> (sizeof(size_t) * 8 - 1)) & 1;
}

/* read_word_padded returns the |i|'th word of |in|, if it is not out of
* bounds. Otherwise, it returns 0. It does so without branches on the size of
* |in|, however it necessarily does not have the same memory access pattern. If
* the access would be out of bounds, it reads the last word of |in|. |in| must
* not be zero. */
// read_word_padded returns the |i|'th word of |in|, if it is not out of
// bounds. Otherwise, it returns 0. It does so without branches on the size of
// |in|, however it necessarily does not have the same memory access pattern. If
// the access would be out of bounds, it reads the last word of |in|. |in| must
// not be zero.
static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) {
/* Read |in->d[i]| if valid. Otherwise, read the last word. */
// Read |in->d[i]| if valid. Otherwise, read the last word.
BN_ULONG l = in->d[constant_time_select_ulong(
constant_time_le_size_t(in->dmax, i), in->dmax - 1, i)];

/* Clamp to zero if above |d->top|. */
// Clamp to zero if above |d->top|.
return constant_time_select_ulong(constant_time_le_size_t(in->top, i), 0, l);
}

int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
/* Special case for |in| = 0. Just branch as the probability is negligible. */
// Special case for |in| = 0. Just branch as the probability is negligible.
if (BN_is_zero(in)) {
OPENSSL_memset(out, 0, len);
return 1;
}

/* Check if the integer is too big. This case can exit early in non-constant
* time. */
// Check if the integer is too big. This case can exit early in non-constant
// time.
if ((size_t)in->top > (len + (BN_BYTES - 1)) / BN_BYTES) {
return 0;
}
@@ -224,13 +224,13 @@ int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
}
}

/* Write the bytes out one by one. Serialization is done without branching on
* the bits of |in| or on |in->top|, but if the routine would otherwise read
* out of bounds, the memory access pattern can't be fixed. However, for an
* RSA key of size a multiple of the word size, the probability of BN_BYTES
* leading zero octets is low.
*
* See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */
// Write the bytes out one by one. Serialization is done without branching on
// the bits of |in| or on |in->top|, but if the routine would otherwise read
// out of bounds, the memory access pattern can't be fixed. However, for an
// RSA key of size a multiple of the word size, the probability of BN_BYTES
// leading zero octets is low.
//
// See Falko Stenzke, "Manger's Attack revisited", ICICS 2010.
size_t i = len;
while (i--) {
BN_ULONG l = read_word_padded(in, i / BN_BYTES);


+ 2
- 2
crypto/fipsmodule/bn/cmp.c Parādīt failu

@@ -159,14 +159,14 @@ int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) {
if (dl < 0) {
for (i = dl; i < 0; i++) {
if (b[n - i] != 0) {
return -1; /* a < b */
return -1; // a < b
}
}
}
if (dl > 0) {
for (i = dl; i > 0; i--) {
if (a[n + i] != 0) {
return 1; /* a > b */
return 1; // a > b
}
}
}


+ 35
- 37
crypto/fipsmodule/bn/ctx.c Parādīt failu

@@ -62,24 +62,24 @@
#include "../../internal.h"


/* How many bignums are in each "pool item"; */
// How many bignums are in each "pool item";
#define BN_CTX_POOL_SIZE 16
/* The stack frame info is resizing, set a first-time expansion size; */
// The stack frame info is resizing, set a first-time expansion size;
#define BN_CTX_START_FRAMES 32

/* A bundle of bignums that can be linked with other bundles */
// A bundle of bignums that can be linked with other bundles
typedef struct bignum_pool_item {
/* The bignum values */
// The bignum values
BIGNUM vals[BN_CTX_POOL_SIZE];
/* Linked-list admin */
// Linked-list admin
struct bignum_pool_item *prev, *next;
} BN_POOL_ITEM;


typedef struct bignum_pool {
/* Linked-list admin */
// Linked-list admin
BN_POOL_ITEM *head, *current, *tail;
/* Stack depth and allocation size */
// Stack depth and allocation size
unsigned used, size;
} BN_POOL;

@@ -88,15 +88,14 @@ static void BN_POOL_finish(BN_POOL *);
static BIGNUM *BN_POOL_get(BN_POOL *);
static void BN_POOL_release(BN_POOL *, unsigned int);

/************/
/* BN_STACK */
/************/

/* A wrapper to manage the "stack frames" */
// BN_STACK

// A wrapper to manage the "stack frames"
typedef struct bignum_ctx_stack {
/* Array of indexes into the bignum stack */
// Array of indexes into the bignum stack
unsigned int *indexes;
/* Number of stack frames, and the size of the allocated array */
// Number of stack frames, and the size of the allocated array
unsigned int depth, size;
} BN_STACK;

@@ -105,21 +104,20 @@ static void BN_STACK_finish(BN_STACK *);
static int BN_STACK_push(BN_STACK *, unsigned int);
static unsigned int BN_STACK_pop(BN_STACK *);

/**********/
/* BN_CTX */
/**********/

/* The opaque BN_CTX type */
// BN_CTX

// The opaque BN_CTX type
struct bignum_ctx {
/* The bignum bundles */
// The bignum bundles
BN_POOL pool;
/* The "stack frames", if you will */
// The "stack frames", if you will
BN_STACK stack;
/* The number of bignums currently assigned */
// The number of bignums currently assigned
unsigned int used;
/* Depth of stack overflow */
// Depth of stack overflow
int err_stack;
/* Block "gets" until an "end" (compatibility behaviour) */
// Block "gets" until an "end" (compatibility behaviour)
int too_many;
};

@@ -130,7 +128,7 @@ BN_CTX *BN_CTX_new(void) {
return NULL;
}

/* Initialise the structure */
// Initialise the structure
BN_POOL_init(&ret->pool);
BN_STACK_init(&ret->stack);
ret->used = 0;
@@ -150,11 +148,11 @@ void BN_CTX_free(BN_CTX *ctx) {
}

void BN_CTX_start(BN_CTX *ctx) {
/* If we're already overflowing ... */
// If we're already overflowing ...
if (ctx->err_stack || ctx->too_many) {
ctx->err_stack++;
} else if (!BN_STACK_push(&ctx->stack, ctx->used)) {
/* (Try to) get a new frame pointer */
// (Try to) get a new frame pointer
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES);
ctx->err_stack++;
}
@@ -168,14 +166,14 @@ BIGNUM *BN_CTX_get(BN_CTX *ctx) {

ret = BN_POOL_get(&ctx->pool);
if (ret == NULL) {
/* Setting too_many prevents repeated "get" attempts from
* cluttering the error stack. */
// Setting too_many prevents repeated "get" attempts from
// cluttering the error stack.
ctx->too_many = 1;
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES);
return NULL;
}

/* OK, make sure the returned bignum is "zero" */
// OK, make sure the returned bignum is "zero"
BN_zero(ret);
ctx->used++;
return ret;
@@ -186,20 +184,19 @@ void BN_CTX_end(BN_CTX *ctx) {
ctx->err_stack--;
} else {
unsigned int fp = BN_STACK_pop(&ctx->stack);
/* Does this stack frame have anything to release? */
// Does this stack frame have anything to release?
if (fp < ctx->used) {
BN_POOL_release(&ctx->pool, ctx->used - fp);
}

ctx->used = fp;
/* Unjam "too_many" in case "get" had failed */
// Unjam "too_many" in case "get" had failed
ctx->too_many = 0;
}
}

/************/
/* BN_STACK */
/************/

// BN_STACK

static void BN_STACK_init(BN_STACK *st) {
st->indexes = NULL;
@@ -212,7 +209,7 @@ static void BN_STACK_finish(BN_STACK *st) {

static int BN_STACK_push(BN_STACK *st, unsigned int idx) {
if (st->depth == st->size) {
/* Need to expand */
// Need to expand
unsigned int newsize =
(st->size ? (st->size * 3 / 2) : BN_CTX_START_FRAMES);
unsigned int *newitems = OPENSSL_malloc(newsize * sizeof(unsigned int));
@@ -235,6 +232,7 @@ static unsigned int BN_STACK_pop(BN_STACK *st) {
return st->indexes[--(st->depth)];
}


static void BN_POOL_init(BN_POOL *p) {
p->head = p->current = p->tail = NULL;
p->used = p->size = 0;
@@ -259,14 +257,14 @@ static BIGNUM *BN_POOL_get(BN_POOL *p) {
return NULL;
}

/* Initialise the structure */
// Initialise the structure
for (size_t i = 0; i < BN_CTX_POOL_SIZE; i++) {
BN_init(&item->vals[i]);
}

item->prev = p->tail;
item->next = NULL;
/* Link it in */
// Link it in
if (!p->head) {
p->head = p->current = p->tail = item;
} else {
@@ -277,7 +275,7 @@ static BIGNUM *BN_POOL_get(BN_POOL *p) {

p->size += BN_CTX_POOL_SIZE;
p->used++;
/* Return the first bignum from the new pool */
// Return the first bignum from the new pool
return item->vals;
}



+ 94
- 96
crypto/fipsmodule/bn/div.c Parādīt failu

@@ -65,8 +65,8 @@


#if !defined(BN_ULLONG)
/* bn_div_words divides a double-width |h|,|l| by |d| and returns the result,
* which must fit in a |BN_ULONG|. */
// bn_div_words divides a double-width |h|,|l| by |d| and returns the result,
// which must fit in a |BN_ULONG|.
static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
BN_ULONG dh, dl, q, ret = 0, th, tl, t;
int i, count = 2;
@@ -135,26 +135,26 @@ static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
ret |= q;
return ret;
}
#endif /* !defined(BN_ULLONG) */
#endif // !defined(BN_ULLONG)

static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out,
BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) {
/* GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when
* the |BN_ULLONG|-based C code is used.
*
* GCC bugs:
* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721
* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183
* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897
* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668
*
* Clang bugs:
* * https://llvm.org/bugs/show_bug.cgi?id=6397
* * https://llvm.org/bugs/show_bug.cgi?id=12418
*
* These issues aren't specific to x86 and x86_64, so it might be worthwhile
* to add more assembly language implementations. */
// GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when
// the |BN_ULLONG|-based C code is used.
//
// GCC bugs:
// * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
// * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721
// * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183
// * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897
// * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668
//
// Clang bugs:
// * https://llvm.org/bugs/show_bug.cgi?id=6397
// * https://llvm.org/bugs/show_bug.cgi?id=12418
//
// These issues aren't specific to x86 and x86_64, so it might be worthwhile
// to add more assembly language implementations.
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__GNUC__)
__asm__ volatile (
"divl %4"
@@ -178,17 +178,17 @@ static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out,
#endif
}

/* BN_div computes dv := num / divisor, rounding towards
* zero, and sets up rm such that dv*divisor + rm = num holds.
* Thus:
* dv->neg == num->neg ^ divisor->neg (unless the result is zero)
* rm->neg == num->neg (unless the remainder is zero)
* If 'dv' or 'rm' is NULL, the respective value is not returned.
*
* This was specifically designed to contain fewer branches that may leak
* sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL
* and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and
* Jean-Pierre Seifert. */
// BN_div computes dv := num / divisor, rounding towards
// zero, and sets up rm such that dv*divisor + rm = num holds.
// Thus:
// dv->neg == num->neg ^ divisor->neg (unless the result is zero)
// rm->neg == num->neg (unless the remainder is zero)
// If 'dv' or 'rm' is NULL, the respective value is not returned.
//
// This was specifically designed to contain fewer branches that may leak
// sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL
// and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and
// Jean-Pierre Seifert.
int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_CTX *ctx) {
int norm_shift, i, loop;
@@ -197,8 +197,8 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_ULONG d0, d1;
int num_n, div_n;

/* Invalid zero-padding would have particularly bad consequences
* so don't just rely on bn_check_top() here */
// Invalid zero-padding would have particularly bad consequences
// so don't just rely on bn_check_top() here
if ((num->top > 0 && num->d[num->top - 1] == 0) ||
(divisor->top > 0 && divisor->d[divisor->top - 1] == 0)) {
OPENSSL_PUT_ERROR(BN, BN_R_NOT_INITIALIZED);
@@ -223,7 +223,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
goto err;
}

/* First we normalise the numbers */
// First we normalise the numbers
norm_shift = BN_BITS2 - ((BN_num_bits(divisor)) % BN_BITS2);
if (!(BN_lshift(sdiv, divisor, norm_shift))) {
goto err;
@@ -235,9 +235,9 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
}
snum->neg = 0;

/* Since we don't want to have special-case logic for the case where snum is
* larger than sdiv, we pad snum with enough zeroes without changing its
* value. */
// Since we don't want to have special-case logic for the case where snum is
// larger than sdiv, we pad snum with enough zeroes without changing its
// value.
if (snum->top <= sdiv->top + 1) {
if (!bn_wexpand(snum, sdiv->top + 2)) {
goto err;
@@ -257,24 +257,24 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
div_n = sdiv->top;
num_n = snum->top;
loop = num_n - div_n;
/* Lets setup a 'window' into snum
* This is the part that corresponds to the current
* 'area' being divided */
// Lets setup a 'window' into snum
// This is the part that corresponds to the current
// 'area' being divided
wnum.neg = 0;
wnum.d = &(snum->d[loop]);
wnum.top = div_n;
/* only needed when BN_ucmp messes up the values between top and max */
wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */
// only needed when BN_ucmp messes up the values between top and max
wnum.dmax = snum->dmax - loop; // so we don't step out of bounds

/* Get the top 2 words of sdiv */
/* div_n=sdiv->top; */
// Get the top 2 words of sdiv
// div_n=sdiv->top;
d0 = sdiv->d[div_n - 1];
d1 = (div_n == 1) ? 0 : sdiv->d[div_n - 2];

/* pointer to the 'top' of snum */
// pointer to the 'top' of snum
wnump = &(snum->d[num_n - 1]);

/* Setup to 'res' */
// Setup to 'res'
res->neg = (num->neg ^ divisor->neg);
if (!bn_wexpand(res, (loop + 1))) {
goto err;
@@ -282,13 +282,13 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
res->top = loop - 1;
resp = &(res->d[loop - 1]);

/* space for temp */
// space for temp
if (!bn_wexpand(tmp, (div_n + 1))) {
goto err;
}

/* if res->top == 0 then clear the neg value otherwise decrease
* the resp pointer */
// if res->top == 0 then clear the neg value otherwise decrease
// the resp pointer
if (res->top == 0) {
res->neg = 0;
} else {
@@ -297,8 +297,8 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,

for (i = 0; i < loop - 1; i++, wnump--, resp--) {
BN_ULONG q, l0;
/* the first part of the loop uses the top two words of snum and sdiv to
* calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv */
// the first part of the loop uses the top two words of snum and sdiv to
// calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv
BN_ULONG n0, n1, rem = 0;

n0 = wnump[0];
@@ -306,7 +306,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
if (n0 == d0) {
q = BN_MASK2;
} else {
/* n0 < d0 */
// n0 < d0
bn_div_rem_words(&q, &rem, n0, n1, d0);

#ifdef BN_ULLONG
@@ -318,11 +318,11 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
q--;
rem += d0;
if (rem < d0) {
break; /* don't let rem overflow */
break; // don't let rem overflow
}
t2 -= d1;
}
#else /* !BN_ULLONG */
#else // !BN_ULLONG
BN_ULONG t2l, t2h;
BN_UMULT_LOHI(t2l, t2h, d1, q);
for (;;) {
@@ -332,43 +332,41 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
q--;
rem += d0;
if (rem < d0) {
break; /* don't let rem overflow */
break; // don't let rem overflow
}
if (t2l < d1) {
t2h--;
}
t2l -= d1;
}
#endif /* !BN_ULLONG */
#endif // !BN_ULLONG
}

l0 = bn_mul_words(tmp->d, sdiv->d, div_n, q);
tmp->d[div_n] = l0;
wnum.d--;
/* ingore top values of the bignums just sub the two
* BN_ULONG arrays with bn_sub_words */
// ingore top values of the bignums just sub the two
// BN_ULONG arrays with bn_sub_words
if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n + 1)) {
/* Note: As we have considered only the leading
* two BN_ULONGs in the calculation of q, sdiv * q
* might be greater than wnum (but then (q-1) * sdiv
* is less or equal than wnum)
*/
// Note: As we have considered only the leading
// two BN_ULONGs in the calculation of q, sdiv * q
// might be greater than wnum (but then (q-1) * sdiv
// is less or equal than wnum)
q--;
if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n)) {
/* we can't have an overflow here (assuming
* that q != 0, but if q == 0 then tmp is
* zero anyway) */
// we can't have an overflow here (assuming
// that q != 0, but if q == 0 then tmp is
// zero anyway)
(*wnump)++;
}
}
/* store part of the result */
// store part of the result
*resp = q;
}
bn_correct_top(snum);
if (rm != NULL) {
/* Keep a copy of the neg flag in num because if rm==num
* BN_rshift() will overwrite it.
*/
// Keep a copy of the neg flag in num because if rm==num
// BN_rshift() will overwrite it.
int neg = num->neg;
if (!BN_rshift(rm, snum, norm_shift)) {
goto err;
@@ -394,7 +392,7 @@ int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) {
return 1;
}

/* now -|d| < r < 0, so we have to set r := r + |d|. */
// now -|d| < r < 0, so we have to set r := r + |d|.
return (d->neg ? BN_sub : BN_add)(r, r, d);
}

@@ -425,8 +423,8 @@ int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m,
return BN_nnmod(r, r, m, ctx);
}

/* BN_mod_sub variant that may be used if both a and b are non-negative
* and less than m */
// BN_mod_sub variant that may be used if both a and b are non-negative
// and less than m
int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
const BIGNUM *m) {
if (!BN_sub(r, a, b)) {
@@ -475,7 +473,7 @@ int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) {
return 0;
}

/* r->neg == 0, thus we don't need BN_nnmod */
// r->neg == 0, thus we don't need BN_nnmod
return BN_mod(r, r, m, ctx);
}

@@ -512,9 +510,9 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) {
while (n > 0) {
int max_shift;

/* 0 < r < m */
// 0 < r < m
max_shift = BN_num_bits(m) - BN_num_bits(r);
/* max_shift >= 0 */
// max_shift >= 0

if (max_shift < 0) {
OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED);
@@ -537,7 +535,7 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) {
--n;
}

/* BN_num_bits(r) <= BN_num_bits(m) */
// BN_num_bits(r) <= BN_num_bits(m)
if (BN_cmp(r, m) >= 0) {
if (!BN_sub(r, r, m)) {
return 0;
@@ -574,7 +572,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) {
w &= BN_MASK2;

if (!w) {
/* actually this an error (division by zero) */
// actually this an error (division by zero)
return (BN_ULONG) - 1;
}

@@ -582,7 +580,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) {
return 0;
}

/* normalize input for |bn_div_rem_words|. */
// normalize input for |bn_div_rem_words|.
j = BN_BITS2 - BN_num_bits_word(w);
w <<= j;
if (!BN_lshift(a, a, j)) {
@@ -623,8 +621,8 @@ BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) {
}

#ifndef BN_ULLONG
/* If |w| is too long and we don't have |BN_ULLONG| then we need to fall back
* to using |BN_div_word|. */
// If |w| is too long and we don't have |BN_ULLONG| then we need to fall back
// to using |BN_div_word|.
if (w > ((BN_ULONG)1 << BN_BITS4)) {
BIGNUM *tmp = BN_dup(a);
if (tmp == NULL) {
@@ -656,27 +654,27 @@ int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) {

size_t num_words = 1 + ((e - 1) / BN_BITS2);

/* If |a| definitely has less than |e| bits, just BN_copy. */
// If |a| definitely has less than |e| bits, just BN_copy.
if ((size_t) a->top < num_words) {
return BN_copy(r, a) != NULL;
}

/* Otherwise, first make sure we have enough space in |r|.
* Note that this will fail if num_words > INT_MAX. */
// Otherwise, first make sure we have enough space in |r|.
// Note that this will fail if num_words > INT_MAX.
if (!bn_wexpand(r, num_words)) {
return 0;
}

/* Copy the content of |a| into |r|. */
// Copy the content of |a| into |r|.
OPENSSL_memcpy(r->d, a->d, num_words * sizeof(BN_ULONG));

/* If |e| isn't word-aligned, we have to mask off some of our bits. */
// If |e| isn't word-aligned, we have to mask off some of our bits.
size_t top_word_exponent = e % (sizeof(BN_ULONG) * 8);
if (top_word_exponent != 0) {
r->d[num_words - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1;
}

/* Fill in the remaining fields of |r|. */
// Fill in the remaining fields of |r|.
r->neg = a->neg;
r->top = (int) num_words;
bn_correct_top(r);
@@ -688,41 +686,41 @@ int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) {
return 0;
}

/* If the returned value was non-negative, we're done. */
// If the returned value was non-negative, we're done.
if (BN_is_zero(r) || !r->neg) {
return 1;
}

size_t num_words = 1 + (e - 1) / BN_BITS2;

/* Expand |r| to the size of our modulus. */
// Expand |r| to the size of our modulus.
if (!bn_wexpand(r, num_words)) {
return 0;
}

/* Clear the upper words of |r|. */
// Clear the upper words of |r|.
OPENSSL_memset(&r->d[r->top], 0, (num_words - r->top) * BN_BYTES);

/* Set parameters of |r|. */
// Set parameters of |r|.
r->neg = 0;
r->top = (int) num_words;

/* Now, invert every word. The idea here is that we want to compute 2^e-|x|,
* which is actually equivalent to the twos-complement representation of |x|
* in |e| bits, which is -x = ~x + 1. */
// Now, invert every word. The idea here is that we want to compute 2^e-|x|,
// which is actually equivalent to the twos-complement representation of |x|
// in |e| bits, which is -x = ~x + 1.
for (int i = 0; i < r->top; i++) {
r->d[i] = ~r->d[i];
}

/* If our exponent doesn't span the top word, we have to mask the rest. */
// If our exponent doesn't span the top word, we have to mask the rest.
size_t top_word_exponent = e % BN_BITS2;
if (top_word_exponent != 0) {
r->d[r->top - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1;
}

/* Keep the correct_top invariant for BN_add. */
// Keep the correct_top invariant for BN_add.
bn_correct_top(r);

/* Finally, add one, for the reason described above. */
// Finally, add one, for the reason described above.
return BN_add(r, r, BN_value_one());
}

+ 164
- 169
crypto/fipsmodule/bn/exponentiation.c Parādīt failu

@@ -188,12 +188,12 @@ err:
return ret;
}

/* maximum precomputation table size for *variable* sliding windows */
// maximum precomputation table size for *variable* sliding windows
#define TABLE_SIZE 32

typedef struct bn_recp_ctx_st {
BIGNUM N; /* the divisor */
BIGNUM Nr; /* the reciprocal */
BIGNUM N; // the divisor
BIGNUM Nr; // the reciprocal
int num_bits;
int shift;
int flags;
@@ -227,10 +227,10 @@ static int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *d, BN_CTX *ctx) {
return 1;
}

/* len is the expected size of the result We actually calculate with an extra
* word of precision, so we can do faster division if the remainder is not
* required.
* r := 2^len / m */
// len is the expected size of the result We actually calculate with an extra
// word of precision, so we can do faster division if the remainder is not
// required.
// r := 2^len / m
static int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx) {
int ret = -1;
BIGNUM *t;
@@ -289,34 +289,34 @@ static int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
return 1;
}

/* We want the remainder
* Given input of ABCDEF / ab
* we need multiply ABCDEF by 3 digests of the reciprocal of ab */
// We want the remainder
// Given input of ABCDEF / ab
// we need multiply ABCDEF by 3 digests of the reciprocal of ab

/* i := max(BN_num_bits(m), 2*BN_num_bits(N)) */
// i := max(BN_num_bits(m), 2*BN_num_bits(N))
i = BN_num_bits(m);
j = recp->num_bits << 1;
if (j > i) {
i = j;
}

/* Nr := round(2^i / N) */
// Nr := round(2^i / N)
if (i != recp->shift) {
recp->shift =
BN_reciprocal(&(recp->Nr), &(recp->N), i,
ctx); /* BN_reciprocal returns i, or -1 for an error */
ctx); // BN_reciprocal returns i, or -1 for an error
}

if (recp->shift == -1) {
goto err;
}

/* d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i -
* BN_num_bits(N)))|
* = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i -
* BN_num_bits(N)))|
* <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)|
* = |m/N| */
// d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i -
// BN_num_bits(N)))|
// = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i -
// BN_num_bits(N)))|
// <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)|
// = |m/N|
if (!BN_rshift(a, m, recp->num_bits)) {
goto err;
}
@@ -383,7 +383,7 @@ static int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y,
}
ca = a;
} else {
ca = x; /* Just do the mod */
ca = x; // Just do the mod
}

ret = BN_div_recp(NULL, r, ca, recp, ctx);
@@ -393,29 +393,29 @@ err:
return ret;
}

/* BN_window_bits_for_exponent_size -- macro for sliding window mod_exp
* functions
*
* For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of
* multiplications is a constant plus on average
*
* 2^(w-1) + (b-w)/(w+1);
*
* here 2^(w-1) is for precomputing the table (we actually need entries only
* for windows that have the lowest bit set), and (b-w)/(w+1) is an
* approximation for the expected number of w-bit windows, not counting the
* first one.
*
* Thus we should use
*
* w >= 6 if b > 671
* w = 5 if 671 > b > 239
* w = 4 if 239 > b > 79
* w = 3 if 79 > b > 23
* w <= 2 if 23 > b
*
* (with draws in between). Very small exponents are often selected
* with low Hamming weight, so we use w = 1 for b <= 23. */
// BN_window_bits_for_exponent_size -- macro for sliding window mod_exp
// functions
//
// For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of
// multiplications is a constant plus on average
//
// 2^(w-1) + (b-w)/(w+1);
//
// here 2^(w-1) is for precomputing the table (we actually need entries only
// for windows that have the lowest bit set), and (b-w)/(w+1) is an
// approximation for the expected number of w-bit windows, not counting the
// first one.
//
// Thus we should use
//
// w >= 6 if b > 671
// w = 5 if 671 > b > 239
// w = 4 if 239 > b > 79
// w = 3 if 79 > b > 23
// w <= 2 if 23 > b
//
// (with draws in between). Very small exponents are often selected
// with low Hamming weight, so we use w = 1 for b <= 23.
#define BN_window_bits_for_exponent_size(b) \
((b) > 671 ? 6 : \
(b) > 239 ? 5 : \
@@ -427,14 +427,14 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
int i, j, bits, ret = 0, wstart, window;
int start = 1;
BIGNUM *aa;
/* Table of variables obtained from 'ctx' */
// Table of variables obtained from 'ctx'
BIGNUM *val[TABLE_SIZE];
BN_RECP_CTX recp;

bits = BN_num_bits(p);

if (bits == 0) {
/* x**0 mod 1 is still zero. */
// x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(r);
return 1;
@@ -451,7 +451,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,

BN_RECP_CTX_init(&recp);
if (m->neg) {
/* ignore sign of 'm' */
// ignore sign of 'm'
if (!BN_copy(aa, m)) {
goto err;
}
@@ -466,7 +466,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
}

if (!BN_nnmod(val[0], a, m, ctx)) {
goto err; /* 1 */
goto err; // 1
}
if (BN_is_zero(val[0])) {
BN_zero(r);
@@ -477,7 +477,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
window = BN_window_bits_for_exponent_size(bits);
if (window > 1) {
if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) {
goto err; /* 2 */
goto err; // 2
}
j = 1 << (window - 1);
for (i = 1; i < j; i++) {
@@ -488,18 +488,18 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
}
}

start = 1; /* This is used to avoid multiplication etc
* when there is only the value '1' in the
* buffer. */
wstart = bits - 1; /* The top bit of the window */
start = 1; // This is used to avoid multiplication etc
// when there is only the value '1' in the
// buffer.
wstart = bits - 1; // The top bit of the window

if (!BN_one(r)) {
goto err;
}

for (;;) {
int wvalue; /* The 'value' of the window */
int wend; /* The bottom bit of the window */
int wvalue; // The 'value' of the window
int wend; // The bottom bit of the window

if (BN_is_bit_set(p, wstart) == 0) {
if (!start) {
@@ -514,10 +514,10 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
continue;
}

/* We now have wstart on a 'set' bit, we now need to work out
* how bit a window to do. To do this we need to scan
* forward until the last set bit before the end of the
* window */
// We now have wstart on a 'set' bit, we now need to work out
// how bit a window to do. To do this we need to scan
// forward until the last set bit before the end of the
// window
wvalue = 1;
wend = 0;
for (i = 1; i < window; i++) {
@@ -531,9 +531,9 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
}
}

/* wend is the size of the current window */
// wend is the size of the current window
j = wend + 1;
/* add the 'bytes above' */
// add the 'bytes above'
if (!start) {
for (i = 0; i < j; i++) {
if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) {
@@ -542,12 +542,12 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
}
}

/* wvalue will be an odd number < 2^window */
// wvalue will be an odd number < 2^window
if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) {
goto err;
}

/* move the 'window' down further */
// move the 'window' down further
wstart -= wend + 1;
start = 0;
if (wstart < 0) {
@@ -577,7 +577,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
int start = 1;
BIGNUM *d, *r;
const BIGNUM *aa;
/* Table of variables obtained from 'ctx' */
// Table of variables obtained from 'ctx'
BIGNUM *val[TABLE_SIZE];
BN_MONT_CTX *new_mont = NULL;

@@ -587,7 +587,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
bits = BN_num_bits(p);
if (bits == 0) {
/* x**0 mod 1 is still zero. */
// x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(rr);
return 1;
@@ -603,7 +603,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
goto err;
}

/* Allocate a montgomery context if it was not supplied by the caller. */
// Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -627,13 +627,13 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
goto err;
}
if (!BN_to_montgomery(val[0], aa, mont, ctx)) {
goto err; /* 1 */
goto err; // 1
}

window = BN_window_bits_for_exponent_size(bits);
if (window > 1) {
if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx)) {
goto err; /* 2 */
goto err; // 2
}
j = 1 << (window - 1);
for (i = 1; i < j; i++) {
@@ -644,32 +644,32 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
}

start = 1; /* This is used to avoid multiplication etc
* when there is only the value '1' in the
* buffer. */
wstart = bits - 1; /* The top bit of the window */
start = 1; // This is used to avoid multiplication etc
// when there is only the value '1' in the
// buffer.
wstart = bits - 1; // The top bit of the window

j = m->top; /* borrow j */
j = m->top; // borrow j
if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
if (!bn_wexpand(r, j)) {
goto err;
}
/* 2^(top*BN_BITS2) - m */
// 2^(top*BN_BITS2) - m
r->d[0] = (0 - m->d[0]) & BN_MASK2;
for (i = 1; i < j; i++) {
r->d[i] = (~m->d[i]) & BN_MASK2;
}
r->top = j;
/* Upper words will be zero if the corresponding words of 'm'
* were 0xfff[...], so decrement r->top accordingly. */
// Upper words will be zero if the corresponding words of 'm'
// were 0xfff[...], so decrement r->top accordingly.
bn_correct_top(r);
} else if (!BN_to_montgomery(r, BN_value_one(), mont, ctx)) {
goto err;
}

for (;;) {
int wvalue; /* The 'value' of the window */
int wend; /* The bottom bit of the window */
int wvalue; // The 'value' of the window
int wend; // The bottom bit of the window

if (BN_is_bit_set(p, wstart) == 0) {
if (!start && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) {
@@ -682,9 +682,9 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
continue;
}

/* We now have wstart on a 'set' bit, we now need to work out how bit a
* window to do. To do this we need to scan forward until the last set bit
* before the end of the window */
// We now have wstart on a 'set' bit, we now need to work out how bit a
// window to do. To do this we need to scan forward until the last set bit
// before the end of the window
wvalue = 1;
wend = 0;
for (i = 1; i < window; i++) {
@@ -698,9 +698,9 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
}

/* wend is the size of the current window */
// wend is the size of the current window
j = wend + 1;
/* add the 'bytes above' */
// add the 'bytes above'
if (!start) {
for (i = 0; i < j; i++) {
if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) {
@@ -709,12 +709,12 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
}

/* wvalue will be an odd number < 2^window */
// wvalue will be an odd number < 2^window
if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx)) {
goto err;
}

/* move the 'window' down further */
// move the 'window' down further
wstart -= wend + 1;
start = 0;
if (wstart < 0) {
@@ -733,10 +733,10 @@ err:
return ret;
}

/* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
* layout so that accessing any of these table values shows the same access
* pattern as far as cache lines are concerned. The following functions are
* used to transfer a BIGNUM from/to that table. */
// BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
// layout so that accessing any of these table values shows the same access
// pattern as far as cache lines are concerned. The following functions are
// used to transfer a BIGNUM from/to that table.
static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx,
int window) {
int i, j;
@@ -744,7 +744,7 @@ static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx,
BN_ULONG *table = (BN_ULONG *) buf;

if (top > b->top) {
top = b->top; /* this works because 'buf' is explicitly zeroed */
top = b->top; // this works because 'buf' is explicitly zeroed
}

for (i = 0, j = idx; i < top; i++, j += width) {
@@ -778,8 +778,8 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx,
int xstride = 1 << (window - 2);
BN_ULONG y0, y1, y2, y3;

i = idx >> (window - 2); /* equivalent of idx / xstride */
idx &= xstride - 1; /* equivalent of idx % xstride */
i = idx >> (window - 2); // equivalent of idx / xstride
idx &= xstride - 1; // equivalent of idx % xstride

y0 = (BN_ULONG)0 - (constant_time_eq_int(i, 0) & 1);
y1 = (BN_ULONG)0 - (constant_time_eq_int(i, 1) & 1);
@@ -804,23 +804,23 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx,
return 1;
}

/* BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
* line width of the target processor is at least the following value. */
// BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
// line width of the target processor is at least the following value.
#define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH (64)
#define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK \
(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1)

/* Window sizes optimized for fixed window size modular exponentiation
* algorithm (BN_mod_exp_mont_consttime).
*
* To achieve the security goals of BN_mode_exp_mont_consttime, the maximum
* size of the window must not exceed
* log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH).
*
* Window size thresholds are defined for cache line sizes of 32 and 64, cache
* line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of
* 7 should only be used on processors that have a 128 byte or greater cache
* line size. */
// Window sizes optimized for fixed window size modular exponentiation
// algorithm (BN_mod_exp_mont_consttime).
//
// To achieve the security goals of BN_mode_exp_mont_consttime, the maximum
// size of the window must not exceed
// log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH).
//
// Window size thresholds are defined for cache line sizes of 32 and 64, cache
// line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of
// 7 should only be used on processors that have a 128 byte or greater cache
// line size.
#if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64

#define BN_window_bits_for_ctime_exponent_size(b) \
@@ -835,19 +835,18 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx,

#endif

/* Given a pointer value, compute the next address that is a cache line
* multiple. */
// Given a pointer value, compute the next address that is a cache line
// multiple.
#define MOD_EXP_CTIME_ALIGN(x_) \
((unsigned char *)(x_) + \
(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - \
(((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))

/* This variant of BN_mod_exp_mont() uses fixed windows and the special
* precomputation memory layout to limit data-dependency to a minimum
* to protect secret exponents (cf. the hyper-threading timing attacks
* pointed out by Colin Percival,
* http://www.daemonology.net/hyperthreading-considered-harmful/)
*/
// This variant of BN_mod_exp_mont() uses fixed windows and the special
// precomputation memory layout to limit data-dependency to a minimum
// to protect secret exponents (cf. the hyper-threading timing attacks
// pointed out by Colin Percival,
// http://www.daemonology.net/hyperthreading-considered-harmful/)
int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx,
const BN_MONT_CTX *mont) {
@@ -871,7 +870,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,

bits = BN_num_bits(p);
if (bits == 0) {
/* x**0 mod 1 is still zero. */
// x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(rr);
return 1;
@@ -879,7 +878,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
return BN_one(rr);
}

/* Allocate a montgomery context if it was not supplied by the caller. */
// Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -898,9 +897,9 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}

#ifdef RSAZ_ENABLED
/* If the size of the operands allow it, perform the optimized
* RSAZ exponentiation. For further information see
* crypto/bn/rsaz_exp.c and accompanying assembly modules. */
// If the size of the operands allow it, perform the optimized
// RSAZ exponentiation. For further information see
// crypto/bn/rsaz_exp.c and accompanying assembly modules.
if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) &&
rsaz_avx2_eligible()) {
if (!bn_wexpand(rr, 16)) {
@@ -915,19 +914,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
#endif

/* Get the window size to use with size of p. */
// Get the window size to use with size of p.
window = BN_window_bits_for_ctime_exponent_size(bits);
#if defined(OPENSSL_BN_ASM_MONT5)
if (window >= 5) {
window = 5; /* ~5% improvement for RSA2048 sign, and even for RSA4096 */
/* reserve space for mont->N.d[] copy */
window = 5; // ~5% improvement for RSA2048 sign, and even for RSA4096
// reserve space for mont->N.d[] copy
powerbufLen += top * sizeof(mont->N.d[0]);
}
#endif

/* Allocate a buffer large enough to hold all of the pre-computed
* powers of am, am itself and tmp.
*/
// Allocate a buffer large enough to hold all of the pre-computed
// powers of am, am itself and tmp.
numPowers = 1 << window;
powerbufLen +=
sizeof(m->d[0]) *
@@ -953,7 +951,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
#endif

/* lay down tmp and am right after powers table */
// lay down tmp and am right after powers table
tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
am.d = tmp.d + top;
tmp.top = am.top = 0;
@@ -961,10 +959,10 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
tmp.neg = am.neg = 0;
tmp.flags = am.flags = BN_FLG_STATIC_DATA;

/* prepare a^0 in Montgomery domain */
/* by Shay Gueron's suggestion */
// prepare a^0 in Montgomery domain
// by Shay Gueron's suggestion
if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
/* 2^(top*BN_BITS2) - m */
// 2^(top*BN_BITS2) - m
tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
for (i = 1; i < top; i++) {
tmp.d[i] = (~m->d[i]) & BN_MASK2;
@@ -974,7 +972,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
goto err;
}

/* prepare a^1 in Montgomery domain */
// prepare a^1 in Montgomery domain
assert(!a->neg);
assert(BN_ucmp(a, m) < 0);
if (!BN_to_montgomery(&am, a, mont, ctx)) {
@@ -982,18 +980,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}

#if defined(OPENSSL_BN_ASM_MONT5)
/* This optimization uses ideas from http://eprint.iacr.org/2011/239,
* specifically optimization of cache-timing attack countermeasures
* and pre-computation optimization. */
// This optimization uses ideas from http://eprint.iacr.org/2011/239,
// specifically optimization of cache-timing attack countermeasures
// and pre-computation optimization.

/* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
* 512-bit RSA is hardly relevant, we omit it to spare size... */
// Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
// 512-bit RSA is hardly relevant, we omit it to spare size...
if (window == 5 && top > 1) {
const BN_ULONG *n0 = mont->n0;
BN_ULONG *np;

/* BN_to_montgomery can contaminate words above .top
* [in BN_DEBUG[_DEBUG] build]... */
// BN_to_montgomery can contaminate words above .top
// [in BN_DEBUG[_DEBUG] build]...
for (i = am.top; i < top; i++) {
am.d[i] = 0;
}
@@ -1001,7 +999,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
tmp.d[i] = 0;
}

/* copy mont->N.d[] to improve cache locality */
// copy mont->N.d[] to improve cache locality
for (np = am.d + top, i = 0; i < top; i++) {
np[i] = mont->N.d[i];
}
@@ -1011,7 +1009,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, 2);

/* same as above, but uses squaring for 1/2 of operations */
// same as above, but uses squaring for 1/2 of operations
for (i = 4; i < 32; i *= 2) {
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, i);
@@ -1042,13 +1040,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
}
bn_gather5(tmp.d, top, powerbuf, wvalue);

/* At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit
* that has not been read yet.) */
// At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit
// that has not been read yet.)
assert(bits >= -1 && (bits == -1 || bits % 5 == 4));

/* Scan the exponent one window at a time starting from the most
* significant bits.
*/
// Scan the exponent one window at a time starting from the most
// significant bits.
if (top & 7) {
while (bits >= 0) {
for (wvalue = 0, i = 0; i < 5; i++, bits--) {
@@ -1066,16 +1063,16 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
const uint8_t *p_bytes = (const uint8_t *)p->d;
int max_bits = p->top * BN_BITS2;
assert(bits < max_bits);
/* |p = 0| has been handled as a special case, so |max_bits| is at least
* one word. */
// |p = 0| has been handled as a special case, so |max_bits| is at least
// one word.
assert(max_bits >= 64);

/* If the first bit to be read lands in the last byte, unroll the first
* iteration to avoid reading past the bounds of |p->d|. (After the first
* iteration, we are guaranteed to be past the last byte.) Note |bits|
* here is the top bit, inclusive. */
// If the first bit to be read lands in the last byte, unroll the first
// iteration to avoid reading past the bounds of |p->d|. (After the first
// iteration, we are guaranteed to be past the last byte.) Note |bits|
// here is the top bit, inclusive.
if (bits - 4 >= max_bits - 8) {
/* Read five bits from |bits-4| through |bits|, inclusive. */
// Read five bits from |bits-4| through |bits|, inclusive.
wvalue = p_bytes[p->top * BN_BYTES - 1];
wvalue >>= (bits - 4) & 7;
wvalue &= 0x1f;
@@ -1083,7 +1080,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue);
}
while (bits >= 0) {
/* Read five bits from |bits-4| through |bits|, inclusive. */
// Read five bits from |bits-4| through |bits|, inclusive.
int first_bit = bits - 4;
uint16_t val;
OPENSSL_memcpy(&val, p_bytes + (first_bit >> 3), sizeof(val));
@@ -1101,7 +1098,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
if (!BN_copy(rr, &tmp)) {
ret = 0;
}
goto err; /* non-zero ret means it's not error */
goto err; // non-zero ret means it's not error
}
} else
#endif
@@ -1111,18 +1108,17 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
goto err;
}

/* If the window size is greater than 1, then calculate
* val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
* (even powers could instead be computed as (a^(i/2))^2
* to use the slight performance advantage of sqr over mul).
*/
// If the window size is greater than 1, then calculate
// val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
// (even powers could instead be computed as (a^(i/2))^2
// to use the slight performance advantage of sqr over mul).
if (window > 1) {
if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx) ||
!copy_to_prebuf(&tmp, top, powerbuf, 2, window)) {
goto err;
}
for (i = 3; i < numPowers; i++) {
/* Calculate a^i = a^(i-1) * a */
// Calculate a^i = a^(i-1) * a
if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx) ||
!copy_to_prebuf(&tmp, top, powerbuf, i, window)) {
goto err;
@@ -1138,13 +1134,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
goto err;
}

/* Scan the exponent one window at a time starting from the most
* significant bits.
*/
// Scan the exponent one window at a time starting from the most
// significant bits.
while (bits >= 0) {
wvalue = 0; /* The 'value' of the window */
wvalue = 0; // The 'value' of the window

/* Scan the window, squaring the result as we go */
// Scan the window, squaring the result as we go
for (i = 0; i < window; i++, bits--) {
if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx)) {
goto err;
@@ -1152,19 +1147,19 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
}

/* Fetch the appropriate pre-computed value from the pre-buf */
// Fetch the appropriate pre-computed value from the pre-buf
if (!copy_from_prebuf(&am, top, powerbuf, wvalue, window)) {
goto err;
}

/* Multiply the result into the intermediate result */
// Multiply the result into the intermediate result
if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx)) {
goto err;
}
}
}

/* Convert the final result from montgomery to standard format */
// Convert the final result from montgomery to standard format
if (!BN_from_montgomery(rr, &tmp, mont, ctx)) {
goto err;
}
@@ -1212,7 +1207,7 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1,
int ret = 0;
BN_MONT_CTX *new_mont = NULL;

/* Allocate a montgomery context if it was not supplied by the caller. */
// Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -1221,9 +1216,9 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1,
mont = new_mont;
}

/* BN_mod_mul_montgomery removes one Montgomery factor, so passing one
* Montgomery-encoded and one non-Montgomery-encoded value gives a
* non-Montgomery-encoded result. */
// BN_mod_mul_montgomery removes one Montgomery factor, so passing one
// Montgomery-encoded and one non-Montgomery-encoded value gives a
// non-Montgomery-encoded result.
if (!BN_mod_exp_mont(rr, a1, p1, m, ctx, mont) ||
!BN_mod_exp_mont(&tmp, a2, p2, m, ctx, mont) ||
!BN_to_montgomery(rr, rr, mont, ctx) ||


+ 98
- 106
crypto/fipsmodule/bn/gcd.c Parādīt failu

@@ -118,9 +118,9 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) {
BIGNUM *t;
int shifts = 0;

/* 0 <= b <= a */
// 0 <= b <= a
while (!BN_is_zero(b)) {
/* 0 < b <= a */
// 0 < b <= a

if (BN_is_odd(a)) {
if (BN_is_odd(b)) {
@@ -136,7 +136,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) {
b = t;
}
} else {
/* a odd - b even */
// a odd - b even
if (!BN_rshift1(b, b)) {
goto err;
}
@@ -147,7 +147,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) {
}
}
} else {
/* a is even */
// a is even
if (BN_is_odd(b)) {
if (!BN_rshift1(a, a)) {
goto err;
@@ -158,7 +158,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) {
b = t;
}
} else {
/* a even - b even */
// a even - b even
if (!BN_rshift1(a, a)) {
goto err;
}
@@ -168,7 +168,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) {
shifts++;
}
}
/* 0 <= b <= a */
// 0 <= b <= a
}

if (shifts) {
@@ -224,7 +224,7 @@ err:
return ret;
}

/* solves ax == 1 (mod n) */
// solves ax == 1 (mod n)
static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
const BIGNUM *a, const BIGNUM *n,
BN_CTX *ctx);
@@ -264,30 +264,29 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
}
A->neg = 0;
sign = -1;
/* From B = a mod |n|, A = |n| it follows that
*
* 0 <= B < A,
* -sign*X*a == B (mod |n|),
* sign*Y*a == A (mod |n|).
*/

/* Binary inversion algorithm; requires odd modulus. This is faster than the
* general algorithm if the modulus is sufficiently small (about 400 .. 500
* bits on 32-bit systems, but much more on 64-bit systems) */
// From B = a mod |n|, A = |n| it follows that
//
// 0 <= B < A,
// -sign*X*a == B (mod |n|),
// sign*Y*a == A (mod |n|).

// Binary inversion algorithm; requires odd modulus. This is faster than the
// general algorithm if the modulus is sufficiently small (about 400 .. 500
// bits on 32-bit systems, but much more on 64-bit systems)
int shift;

while (!BN_is_zero(B)) {
/* 0 < B < |n|,
* 0 < A <= |n|,
* (1) -sign*X*a == B (mod |n|),
* (2) sign*Y*a == A (mod |n|) */
/* Now divide B by the maximum possible power of two in the integers,
* and divide X by the same value mod |n|.
* When we're done, (1) still holds. */
// 0 < B < |n|,
// 0 < A <= |n|,
// (1) -sign*X*a == B (mod |n|),
// (2) sign*Y*a == A (mod |n|)
// Now divide B by the maximum possible power of two in the integers,
// and divide X by the same value mod |n|.
// When we're done, (1) still holds.
shift = 0;
while (!BN_is_bit_set(B, shift)) {
/* note that 0 < B */
// note that 0 < B
shift++;

if (BN_is_odd(X)) {
@@ -295,7 +294,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
goto err;
}
}
/* now X is even, so we can easily divide it by two */
// now X is even, so we can easily divide it by two
if (!BN_rshift1(X, X)) {
goto err;
}
@@ -306,10 +305,10 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
}
}

/* Same for A and Y. Afterwards, (2) still holds. */
// Same for A and Y. Afterwards, (2) still holds.
shift = 0;
while (!BN_is_bit_set(A, shift)) {
/* note that 0 < A */
// note that 0 < A
shift++;

if (BN_is_odd(Y)) {
@@ -317,7 +316,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
goto err;
}
}
/* now Y is even */
// now Y is even
if (!BN_rshift1(Y, Y)) {
goto err;
}
@@ -328,32 +327,32 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
}
}

/* We still have (1) and (2).
* Both A and B are odd.
* The following computations ensure that
*
* 0 <= B < |n|,
* 0 < A < |n|,
* (1) -sign*X*a == B (mod |n|),
* (2) sign*Y*a == A (mod |n|),
*
* and that either A or B is even in the next iteration. */
// We still have (1) and (2).
// Both A and B are odd.
// The following computations ensure that
//
// 0 <= B < |n|,
// 0 < A < |n|,
// (1) -sign*X*a == B (mod |n|),
// (2) sign*Y*a == A (mod |n|),
//
// and that either A or B is even in the next iteration.
if (BN_ucmp(B, A) >= 0) {
/* -sign*(X + Y)*a == B - A (mod |n|) */
// -sign*(X + Y)*a == B - A (mod |n|)
if (!BN_uadd(X, X, Y)) {
goto err;
}
/* NB: we could use BN_mod_add_quick(X, X, Y, n), but that
* actually makes the algorithm slower */
// NB: we could use BN_mod_add_quick(X, X, Y, n), but that
// actually makes the algorithm slower
if (!BN_usub(B, B, A)) {
goto err;
}
} else {
/* sign*(X + Y)*a == A - B (mod |n|) */
// sign*(X + Y)*a == A - B (mod |n|)
if (!BN_uadd(Y, Y, X)) {
goto err;
}
/* as above, BN_mod_add_quick(Y, Y, X, n) would slow things down */
// as above, BN_mod_add_quick(Y, Y, X, n) would slow things down
if (!BN_usub(A, A, B)) {
goto err;
}
@@ -366,20 +365,20 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a,
goto err;
}

/* The while loop (Euclid's algorithm) ends when
* A == gcd(a,n);
* we have
* sign*Y*a == A (mod |n|),
* where Y is non-negative. */
// The while loop (Euclid's algorithm) ends when
// A == gcd(a,n);
// we have
// sign*Y*a == A (mod |n|),
// where Y is non-negative.

if (sign < 0) {
if (!BN_sub(Y, n, Y)) {
goto err;
}
}
/* Now Y*a == A (mod |n|). */
// Now Y*a == A (mod |n|).

/* Y*a == 1 (mod |n|) */
// Y*a == 1 (mod |n|)
if (!Y->neg && BN_ucmp(Y, n) < 0) {
if (!BN_copy(R, Y)) {
goto err;
@@ -470,11 +469,11 @@ err:
return ret;
}

/* bn_mod_inverse_general is the general inversion algorithm that works for
* both even and odd |n|. It was specifically designed to contain fewer
* branches that may leak sensitive information; see "New Branch Prediction
* Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by
* Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. */
// bn_mod_inverse_general is the general inversion algorithm that works for
// both even and odd |n|. It was specifically designed to contain fewer
// branches that may leak sensitive information; see "New Branch Prediction
// Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by
// Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert.
static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
const BIGNUM *a, const BIGNUM *n,
BN_CTX *ctx) {
@@ -505,58 +504,53 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
A->neg = 0;

sign = -1;
/* From B = a mod |n|, A = |n| it follows that
*
* 0 <= B < A,
* -sign*X*a == B (mod |n|),
* sign*Y*a == A (mod |n|).
*/
// From B = a mod |n|, A = |n| it follows that
//
// 0 <= B < A,
// -sign*X*a == B (mod |n|),
// sign*Y*a == A (mod |n|).

while (!BN_is_zero(B)) {
BIGNUM *tmp;

/*
* 0 < B < A,
* (*) -sign*X*a == B (mod |n|),
* sign*Y*a == A (mod |n|)
*/
// 0 < B < A,
// (*) -sign*X*a == B (mod |n|),
// sign*Y*a == A (mod |n|)

/* (D, M) := (A/B, A%B) ... */
// (D, M) := (A/B, A%B) ...
if (!BN_div(D, M, A, B, ctx)) {
goto err;
}

/* Now
* A = D*B + M;
* thus we have
* (**) sign*Y*a == D*B + M (mod |n|).
*/
// Now
// A = D*B + M;
// thus we have
// (**) sign*Y*a == D*B + M (mod |n|).

tmp = A; /* keep the BIGNUM object, the value does not matter */
tmp = A; // keep the BIGNUM object, the value does not matter

/* (A, B) := (B, A mod B) ... */
// (A, B) := (B, A mod B) ...
A = B;
B = M;
/* ... so we have 0 <= B < A again */

/* Since the former M is now B and the former B is now A,
* (**) translates into
* sign*Y*a == D*A + B (mod |n|),
* i.e.
* sign*Y*a - D*A == B (mod |n|).
* Similarly, (*) translates into
* -sign*X*a == A (mod |n|).
*
* Thus,
* sign*Y*a + D*sign*X*a == B (mod |n|),
* i.e.
* sign*(Y + D*X)*a == B (mod |n|).
*
* So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
* -sign*X*a == B (mod |n|),
* sign*Y*a == A (mod |n|).
* Note that X and Y stay non-negative all the time.
*/
// ... so we have 0 <= B < A again

// Since the former M is now B and the former B is now A,
// (**) translates into
// sign*Y*a == D*A + B (mod |n|),
// i.e.
// sign*Y*a - D*A == B (mod |n|).
// Similarly, (*) translates into
// -sign*X*a == A (mod |n|).
//
// Thus,
// sign*Y*a + D*sign*X*a == B (mod |n|),
// i.e.
// sign*(Y + D*X)*a == B (mod |n|).
//
// So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
// -sign*X*a == B (mod |n|),
// sign*Y*a == A (mod |n|).
// Note that X and Y stay non-negative all the time.

if (!BN_mul(tmp, D, X, ctx)) {
goto err;
@@ -565,7 +559,7 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
goto err;
}

M = Y; /* keep the BIGNUM object, the value does not matter */
M = Y; // keep the BIGNUM object, the value does not matter
Y = X;
X = tmp;
sign = -sign;
@@ -577,22 +571,20 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
goto err;
}

/*
* The while loop (Euclid's algorithm) ends when
* A == gcd(a,n);
* we have
* sign*Y*a == A (mod |n|),
* where Y is non-negative.
*/
// The while loop (Euclid's algorithm) ends when
// A == gcd(a,n);
// we have
// sign*Y*a == A (mod |n|),
// where Y is non-negative.

if (sign < 0) {
if (!BN_sub(Y, n, Y)) {
goto err;
}
}
/* Now Y*a == A (mod |n|). */
// Now Y*a == A (mod |n|).

/* Y*a == 1 (mod |n|) */
// Y*a == 1 (mod |n|)
if (!Y->neg && BN_ucmp(Y, n) < 0) {
if (!BN_copy(R, Y)) {
goto err;


+ 14
- 14
crypto/fipsmodule/bn/generic.c Parādīt failu

@@ -61,8 +61,8 @@
#include "internal.h"


/* This file has two other implementations: x86 assembly language in
* asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. */
// This file has two other implementations: x86 assembly language in
// asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c.
#if defined(OPENSSL_NO_ASM) || \
!(defined(OPENSSL_X86) || (defined(OPENSSL_X86_64) && defined(__GNUC__)))

@@ -122,7 +122,7 @@
BN_UMULT_LOHI(r0, r1, tmp, tmp); \
} while (0)

#endif /* !BN_ULLONG */
#endif // !BN_ULLONG

BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
BN_ULONG w) {
@@ -242,7 +242,7 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
return (BN_ULONG)ll;
}

#else /* !BN_ULLONG */
#else // !BN_ULLONG

BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int n) {
@@ -299,7 +299,7 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
return (BN_ULONG)c;
}

#endif /* !BN_ULLONG */
#endif // !BN_ULLONG

BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int n) {
@@ -356,15 +356,15 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
return c;
}

/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0)
// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0)
// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0)
// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)

#ifdef BN_ULLONG

/* Keep in mind that additions to multiplication result can not overflow,
* because its high half cannot be all-ones. */
// Keep in mind that additions to multiplication result can not overflow,
// because its high half cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG hi; \
@@ -415,8 +415,8 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,

#else

/* Keep in mind that additions to hi can not overflow, because the high word of
* a multiplication result cannot be all-ones. */
// Keep in mind that additions to hi can not overflow, because the high word of
// a multiplication result cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG ta = (a), tb = (b); \
@@ -456,7 +456,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,

#define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)

#endif /* !BN_ULLONG */
#endif // !BN_ULLONG

void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
BN_ULONG c1, c2, c3;


+ 31
- 31
crypto/fipsmodule/bn/internal.h Parādīt failu

@@ -141,7 +141,7 @@ extern "C" {
#if defined(OPENSSL_64_BIT)

#if !defined(_MSC_VER)
/* MSVC doesn't support two-word integers on 64-bit. */
// MSVC doesn't support two-word integers on 64-bit.
#define BN_ULLONG uint128_t
#endif

@@ -168,11 +168,11 @@ extern "C" {
#define BN_MASK2l (0xffffUL)
#define BN_MASK2h1 (0xffff8000UL)
#define BN_MASK2h (0xffff0000UL)
/* On some 32-bit platforms, Montgomery multiplication is done using 64-bit
* arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0|
* needs to be two words long. Only certain 32-bit platforms actually make use
* of n0[1] and shorter R value would suffice for the others. However,
* currently only the assembly files know which is which. */
// On some 32-bit platforms, Montgomery multiplication is done using 64-bit
// arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0|
// needs to be two words long. Only certain 32-bit platforms actually make use
// of n0[1] and shorter R value would suffice for the others. However,
// currently only the assembly files know which is which.
#define BN_MONT_CTX_N0_LIMBS 2
#define BN_TBIT (0x80000000UL)
#define BN_DEC_CONV (1000000000UL)
@@ -195,21 +195,21 @@ extern "C" {
#define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2)
#endif

/* bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or
* until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. */
// bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or
// until |top| is zero. If |bn| is zero, |bn->neg| is set to zero.
void bn_correct_top(BIGNUM *bn);

/* bn_wexpand ensures that |bn| has at least |words| works of space without
* altering its value. It returns one on success or zero on allocation
* failure. */
// bn_wexpand ensures that |bn| has at least |words| works of space without
// altering its value. It returns one on success or zero on allocation
// failure.
int bn_wexpand(BIGNUM *bn, size_t words);

/* bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather
* than a number of words. */
// bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather
// than a number of words.
int bn_expand(BIGNUM *bn, size_t bits);

/* bn_set_words sets |bn| to the value encoded in the |num| words in |words|,
* least significant word first. */
// bn_set_words sets |bn| to the value encoded in the |num| words in |words|,
// least significant word first.
int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num);

BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
@@ -223,14 +223,14 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b);
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a);
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a);

/* bn_cmp_words returns a value less than, equal to or greater than zero if
* the, length |n|, array |a| is less than, equal to or greater than |b|. */
// bn_cmp_words returns a value less than, equal to or greater than zero if
// the, length |n|, array |a| is less than, equal to or greater than |b|.
int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n);

/* bn_cmp_words returns a value less than, equal to or greater than zero if the
* array |a| is less than, equal to or greater than |b|. The arrays can be of
* different lengths: |cl| gives the minimum of the two lengths and |dl| gives
* the length of |a| minus the length of |b|. */
// bn_cmp_words returns a value less than, equal to or greater than zero if the
// array |a| is less than, equal to or greater than |b|. The arrays can be of
// different lengths: |cl| gives the minimum of the two lengths and |dl| gives
// the length of |a| minus the length of |b|.
int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl);

int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
@@ -247,25 +247,25 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n);
#error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform."
#endif

/* bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|,
* computed with Fermat's Little Theorem. It returns one on success and zero on
* error. If |mont_p| is NULL, one will be computed temporarily. */
// bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|,
// computed with Fermat's Little Theorem. It returns one on success and zero on
// error. If |mont_p| is NULL, one will be computed temporarily.
int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p,
BN_CTX *ctx, const BN_MONT_CTX *mont_p);

/* bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses
* |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of
* protecting the exponent. */
// bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses
// |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of
// protecting the exponent.
int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p,
BN_CTX *ctx, const BN_MONT_CTX *mont_p);

/* bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or
* -2 on error. */
// bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or
// -2 on error.
int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);


#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_BN_INTERNAL_H */
#endif // OPENSSL_HEADER_BN_INTERNAL_H

+ 19
- 19
crypto/fipsmodule/bn/jacobi.c Parādīt failu

@@ -57,24 +57,24 @@
#include "internal.h"


/* least significant word */
// least significant word
#define BN_lsw(n) (((n)->top == 0) ? (BN_ULONG) 0 : (n)->d[0])

int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) {
/* In 'tab', only odd-indexed entries are relevant:
* For any odd BIGNUM n,
* tab[BN_lsw(n) & 7]
* is $(-1)^{(n^2-1)/8}$ (using TeX notation).
* Note that the sign of n does not matter. */
// In 'tab', only odd-indexed entries are relevant:
// For any odd BIGNUM n,
// tab[BN_lsw(n) & 7]
// is $(-1)^{(n^2-1)/8}$ (using TeX notation).
// Note that the sign of n does not matter.
static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1};

/* The Jacobi symbol is only defined for odd modulus. */
// The Jacobi symbol is only defined for odd modulus.
if (!BN_is_odd(b)) {
OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS);
return -2;
}

/* Require b be positive. */
// Require b be positive.
if (BN_is_negative(b)) {
OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER);
return -2;
@@ -93,22 +93,22 @@ int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) {
goto end;
}

/* Adapted from logic to compute the Kronecker symbol, originally implemented
* according to Henri Cohen, "A Course in Computational Algebraic Number
* Theory" (algorithm 1.4.10). */
// Adapted from logic to compute the Kronecker symbol, originally implemented
// according to Henri Cohen, "A Course in Computational Algebraic Number
// Theory" (algorithm 1.4.10).

ret = 1;

while (1) {
/* Cohen's step 3: */
// Cohen's step 3:

/* B is positive and odd */
// B is positive and odd
if (BN_is_zero(A)) {
ret = BN_is_one(B) ? ret : 0;
goto end;
}

/* now A is non-zero */
// now A is non-zero
int i = 0;
while (!BN_is_bit_set(A, i)) {
i++;
@@ -118,18 +118,18 @@ int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) {
goto end;
}
if (i & 1) {
/* i is odd */
/* multiply 'ret' by $(-1)^{(B^2-1)/8}$ */
// i is odd
// multiply 'ret' by $(-1)^{(B^2-1)/8}$
ret = ret * tab[BN_lsw(B) & 7];
}

/* Cohen's step 4: */
/* multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ */
// Cohen's step 4:
// multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$
if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2) {
ret = -ret;
}

/* (A, B) := (B mod |A|, |A|) */
// (A, B) := (B mod |A|, |A|)
if (!BN_nnmod(B, B, A, ctx)) {
ret = -2;
goto end;


+ 22
- 22
crypto/fipsmodule/bn/montgomery.c Parādīt failu

@@ -187,18 +187,18 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) {
return 0;
}

/* Save the modulus. */
// Save the modulus.
if (!BN_copy(&mont->N, mod)) {
OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR);
return 0;
}

/* Find n0 such that n0 * N == -1 (mod r).
*
* Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the
* others, we could use a shorter R value and use faster |BN_ULONG|-based
* math instead of |uint64_t|-based math, which would be double-precision.
* However, currently only the assembler files know which is which. */
// Find n0 such that n0 * N == -1 (mod r).
//
// Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the
// others, we could use a shorter R value and use faster |BN_ULONG|-based
// math instead of |uint64_t|-based math, which would be double-precision.
// However, currently only the assembler files know which is which.
uint64_t n0 = bn_mont_n0(mod);
mont->n0[0] = (BN_ULONG)n0;
#if BN_MONT_CTX_N0_LIMBS == 2
@@ -207,14 +207,14 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) {
mont->n0[1] = 0;
#endif

/* Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R
* > mod. Even though the assembly on some 32-bit platforms works with 64-bit
* values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS *
* BN_BITS2|, is correct because R**2 will still be a multiple of the latter
* as |BN_MONT_CTX_N0_LIMBS| is either one or two.
*
* XXX: This is not constant time with respect to |mont->N|, but it should
* be. */
// Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R
// > mod. Even though the assembly on some 32-bit platforms works with 64-bit
// values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS *
// BN_BITS2|, is correct because R**2 will still be a multiple of the latter
// as |BN_MONT_CTX_N0_LIMBS| is either one or two.
//
// XXX: This is not constant time with respect to |mont->N|, but it should
// be.
unsigned lgBigR = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2;
if (!bn_mod_exp_base_2_vartime(&mont->RR, lgBigR * 2, &mont->N)) {
return 0;
@@ -272,7 +272,7 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r,
return 1;
}

max = (2 * nl); /* carry is stored separately */
max = (2 * nl); // carry is stored separately
if (!bn_wexpand(r, max)) {
return 0;
}
@@ -281,7 +281,7 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r,
np = n->d;
rp = r->d;

/* clear the top words of T */
// clear the top words of T
if (max > r->top) {
OPENSSL_memset(&rp[r->top], 0, (max - r->top) * sizeof(BN_ULONG));
}
@@ -311,8 +311,8 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r,
uintptr_t m;

v = bn_sub_words(rp, ap, np, nl) - carry;
/* if subtraction result is real, then trick unconditional memcpy below to
* perform in-place "refresh" instead of actual copy. */
// if subtraction result is real, then trick unconditional memcpy below to
// perform in-place "refresh" instead of actual copy.
m = (0u - (uintptr_t)v);
nrp = (BN_ULONG *)(((uintptr_t)rp & ~m) | ((uintptr_t)ap & m));

@@ -371,7 +371,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
#else
int num = mont->N.top;

/* |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. */
// |bn_mul_mont| requires at least 128 bits of limbs, at least for x86.
if (num < (128 / BN_BITS2) ||
a->top != num ||
b->top != num) {
@@ -382,7 +382,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
return 0;
}
if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) {
/* The check above ensures this won't happen. */
// The check above ensures this won't happen.
assert(0);
OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR);
return 0;
@@ -417,7 +417,7 @@ static int bn_mod_mul_montgomery_fallback(BIGNUM *r, const BIGNUM *a,
}
}

/* reduce from aRR to aR */
// reduce from aRR to aR
if (!BN_from_montgomery_word(r, tmp, mont)) {
goto err;
}


+ 95
- 95
crypto/fipsmodule/bn/montgomery_inv.c Parādīt failu

@@ -28,47 +28,47 @@ OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) ==
BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG),
BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T);

/* LG_LITTLE_R is log_2(r). */
// LG_LITTLE_R is log_2(r).
#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2)

uint64_t bn_mont_n0(const BIGNUM *n) {
/* These conditions are checked by the caller, |BN_MONT_CTX_set|. */
// These conditions are checked by the caller, |BN_MONT_CTX_set|.
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
assert(BN_is_odd(n));

/* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
* ensures that we can do integer division by |r| by simply ignoring
* |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
* |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
* what makes Montgomery multiplication efficient.
*
* As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
* with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
* multi-limb Montgomery multiplication of |a * b (mod n)|, given the
* unreduced product |t == a * b|, we repeatedly calculate:
*
* t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
* t2 := t1*n0*n
* t3 := t + t2
* t := t3 / r copy all limbs of |t3| except the lowest to |t|.
*
* In the last step, it would only make sense to ignore the lowest limb of
* |t3| if it were zero. The middle steps ensure that this is the case:
*
* t3 == 0 (mod r)
* t + t2 == 0 (mod r)
* t + t1*n0*n == 0 (mod r)
* t1*n0*n == -t (mod r)
* t*n0*n == -t (mod r)
* n0*n == -1 (mod r)
* n0 == -1/n (mod r)
*
* Thus, in each iteration of the loop, we multiply by the constant factor
* |n0|, the negative inverse of n (mod r). */
/* n_mod_r = n % r. As explained above, this is done by taking the lowest
* |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */
// r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
// ensures that we can do integer division by |r| by simply ignoring
// |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
// |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
// what makes Montgomery multiplication efficient.
//
// As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
// with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
// multi-limb Montgomery multiplication of |a * b (mod n)|, given the
// unreduced product |t == a * b|, we repeatedly calculate:
//
// t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
// t2 := t1*n0*n
// t3 := t + t2
// t := t3 / r copy all limbs of |t3| except the lowest to |t|.
//
// In the last step, it would only make sense to ignore the lowest limb of
// |t3| if it were zero. The middle steps ensure that this is the case:
//
// t3 == 0 (mod r)
// t + t2 == 0 (mod r)
// t + t1*n0*n == 0 (mod r)
// t1*n0*n == -t (mod r)
// t*n0*n == -t (mod r)
// n0*n == -1 (mod r)
// n0 == -1/n (mod r)
//
// Thus, in each iteration of the loop, we multiply by the constant factor
// |n0|, the negative inverse of n (mod r).
// n_mod_r = n % r. As explained above, this is done by taking the lowest
// |BN_MONT_CTX_N0_LIMBS| limbs of |n|.
uint64_t n_mod_r = n->d[0];
#if BN_MONT_CTX_N0_LIMBS == 2
if (n->top > 1) {
@@ -79,32 +79,32 @@ uint64_t bn_mont_n0(const BIGNUM *n) {
return bn_neg_inv_mod_r_u64(n_mod_r);
}

/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
* such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
* must be odd.
*
* This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
* Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
* It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
* Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
* (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
*
* This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
* (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
* constant-time with respect to |n|. We assume uint64_t additions,
* subtractions, shifts, and bitwise operations are all constant time, which
* may be a large leap of faith on 32-bit targets. We avoid division and
* multiplication, which tend to be the most problematic in terms of timing
* leaks.
*
* Most GCD implementations return values such that |u*r + v*n == 1|, so the
* caller would have to negate the resultant |v| for the purpose of Montgomery
* multiplication. This implementation does the negation implicitly by doing
* the computations as a difference instead of a sum. */
// bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
// such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
// must be odd.
//
// This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
// Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
// It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
// Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
// (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
//
// This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
// (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
// constant-time with respect to |n|. We assume uint64_t additions,
// subtractions, shifts, and bitwise operations are all constant time, which
// may be a large leap of faith on 32-bit targets. We avoid division and
// multiplication, which tend to be the most problematic in terms of timing
// leaks.
//
// Most GCD implementations return values such that |u*r + v*n == 1|, so the
// caller would have to negate the resultant |v| for the purpose of Montgomery
// multiplication. This implementation does the negation implicitly by doing
// the computations as a difference instead of a sum.
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
assert(n % 2 == 1);

/* alpha == 2**(lg r - 1) == r / 2. */
// alpha == 2**(lg r - 1) == r / 2.
static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1);

const uint64_t beta = n;
@@ -112,46 +112,46 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
uint64_t u = 1;
uint64_t v = 0;

/* The invariant maintained from here on is:
* 2**(lg r - i) == u*2*alpha - v*beta. */
// The invariant maintained from here on is:
// 2**(lg r - i) == u*2*alpha - v*beta.
for (size_t i = 0; i < LG_LITTLE_R; ++i) {
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) ==
((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif

/* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
* |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */
uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */
/* The addition can overflow, so use Dietz's method for it.
*
* Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
* (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
* (embedded in 64 bits to so that overflow can be ignored):
*
* (declare-fun x () (_ BitVec 64))
* (declare-fun y () (_ BitVec 64))
* (assert (let (
* (one (_ bv1 64))
* (thirtyTwo (_ bv32 64)))
* (and
* (bvult x (bvshl one thirtyTwo))
* (bvult y (bvshl one thirtyTwo))
* (not (=
* (bvadd (bvlshr (bvxor x y) one) (bvand x y))
* (bvlshr (bvadd x y) one)))
* )))
* (check-sat) */
uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */
// Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
// |u = (u + beta) / 2| and |v = (v / 2) + alpha|.
uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0.
// The addition can overflow, so use Dietz's method for it.
//
// Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
// (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
// (embedded in 64 bits to so that overflow can be ignored):
//
// (declare-fun x () (_ BitVec 64))
// (declare-fun y () (_ BitVec 64))
// (assert (let (
// (one (_ bv1 64))
// (thirtyTwo (_ bv32 64)))
// (and
// (bvult x (bvshl one thirtyTwo))
// (bvult y (bvshl one thirtyTwo))
// (not (=
// (bvadd (bvlshr (bvxor x y) one) (bvand x y))
// (bvlshr (bvadd x y) one)))
// )))
// (check-sat)
uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0.
u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd);

uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */
uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0.
v = (v >> 1) + alpha_if_u_is_odd;
}

/* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */
// The invariant now shows that u*r - v*n == 1 since r == 2 * alpha.
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif
@@ -159,9 +159,9 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
return v;
}

/* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
* than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
* odd. */
// bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
// than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
// odd.
int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
@@ -175,13 +175,13 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
return 1;
}

/* Set |r| to the smallest power of two larger than |n|. */
// Set |r| to the smallest power of two larger than |n|.
assert(p > n_bits);
if (!BN_set_bit(r, n_bits)) {
return 0;
}

/* Unconditionally reduce |r|. */
// Unconditionally reduce |r|.
assert(BN_cmp(r, n) > 0);
if (!BN_usub(r, r, n)) {
return 0;
@@ -189,10 +189,10 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
assert(BN_cmp(r, n) < 0);

for (unsigned i = n_bits; i < p; ++i) {
/* This is like |BN_mod_lshift1_quick| except using |BN_usub|.
*
* TODO: Replace this with the use of a constant-time variant of
* |BN_mod_lshift1_quick|. */
// This is like |BN_mod_lshift1_quick| except using |BN_usub|.
//
// TODO: Replace this with the use of a constant-time variant of
// |BN_mod_lshift1_quick|.
if (!BN_lshift1(r, r)) {
return 0;
}


+ 104
- 108
crypto/fipsmodule/bn/mul.c Parādīt failu

@@ -113,15 +113,15 @@ static void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b,
}

#if !defined(OPENSSL_X86) || defined(OPENSSL_NO_ASM)
/* Here follows specialised variants of bn_add_words() and bn_sub_words(). They
* have the property performing operations on arrays of different sizes. The
* sizes of those arrays is expressed through cl, which is the common length (
* basicall, min(len(a),len(b)) ), and dl, which is the delta between the two
* lengths, calculated as len(a)-len(b). All lengths are the number of
* BN_ULONGs... For the operations that require a result array as parameter,
* it must have the length cl+abs(dl). These functions should probably end up
* in bn_asm.c as soon as there are assembler counterparts for the systems that
* use assembler files. */
// Here follows specialised variants of bn_add_words() and bn_sub_words(). They
// have the property performing operations on arrays of different sizes. The
// sizes of those arrays is expressed through cl, which is the common length (
// basicall, min(len(a),len(b)) ), and dl, which is the delta between the two
// lengths, calculated as len(a)-len(b). All lengths are the number of
// BN_ULONGs... For the operations that require a result array as parameter,
// it must have the length cl+abs(dl). These functions should probably end up
// in bn_asm.c as soon as there are assembler counterparts for the systems that
// use assembler files.

static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a,
const BN_ULONG *b, int cl, int dl) {
@@ -274,25 +274,24 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a,
return c;
}
#else
/* On other platforms the function is defined in asm. */
// On other platforms the function is defined in asm.
BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int cl, int dl);
#endif

/* Karatsuba recursive multiplication algorithm
* (cf. Knuth, The Art of Computer Programming, Vol. 2) */

/* r is 2*n2 words in size,
* a and b are both n2 words in size.
* n2 must be a power of 2.
* We multiply and return the result.
* t must be 2*n2 words in size
* We calculate
* a[0]*b[0]
* a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
* a[1]*b[1]
*/
/* dnX may not be positive, but n2/2+dnX has to be */
// Karatsuba recursive multiplication algorithm
// (cf. Knuth, The Art of Computer Programming, Vol. 2)

// r is 2*n2 words in size,
// a and b are both n2 words in size.
// n2 must be a power of 2.
// We multiply and return the result.
// t must be 2*n2 words in size
// We calculate
// a[0]*b[0]
// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
// a[1]*b[1]
// dnX may not be positive, but n2/2+dnX has to be
static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
int dna, int dnb, BN_ULONG *t) {
int n = n2 / 2, c1, c2;
@@ -300,15 +299,14 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
unsigned int neg, zero;
BN_ULONG ln, lo, *p;

/* Only call bn_mul_comba 8 if n2 == 8 and the
* two arrays are complete [steve]
*/
// Only call bn_mul_comba 8 if n2 == 8 and the
// two arrays are complete [steve]
if (n2 == 8 && dna == 0 && dnb == 0) {
bn_mul_comba8(r, a, b);
return;
}

/* Else do normal multiply */
// Else do normal multiply
if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL) {
bn_mul_normal(r, a, n2 + dna, b, n2 + dnb);
if ((dna + dnb) < 0) {
@@ -318,21 +316,21 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
return;
}

/* r=(a[0]-a[1])*(b[1]-b[0]) */
// r=(a[0]-a[1])*(b[1]-b[0])
c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna);
c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n);
zero = neg = 0;
switch (c1 * 3 + c2) {
case -4:
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
break;
case -3:
zero = 1;
break;
case -2:
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // +
neg = 1;
break;
case -1:
@@ -341,8 +339,8 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
zero = 1;
break;
case 2:
bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // +
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
neg = 1;
break;
case 3:
@@ -355,7 +353,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
}

if (n == 4 && dna == 0 && dnb == 0) {
/* XXX: bn_mul_comba4 could take extra args to do this well */
// XXX: bn_mul_comba4 could take extra args to do this well
if (!zero) {
bn_mul_comba4(&(t[n2]), t, &(t[n]));
} else {
@@ -365,7 +363,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
bn_mul_comba4(r, a, b);
bn_mul_comba4(&(r[n2]), &(a[n]), &(b[n]));
} else if (n == 8 && dna == 0 && dnb == 0) {
/* XXX: bn_mul_comba8 could take extra args to do this well */
// XXX: bn_mul_comba8 could take extra args to do this well
if (!zero) {
bn_mul_comba8(&(t[n2]), t, &(t[n]));
} else {
@@ -385,24 +383,24 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p);
}

/* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
* r[10] holds (a[0]*b[0])
* r[32] holds (b[1]*b[1]) */
// t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
// r[10] holds (a[0]*b[0])
// r[32] holds (b[1]*b[1])

c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));

if (neg) {
/* if t[32] is negative */
// if t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
} else {
/* Might have a carry */
// Might have a carry
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}

/* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
* r[10] holds (a[0]*b[0])
* r[32] holds (b[1]*b[1])
* c1 holds the carry bits */
// t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
// r[10] holds (a[0]*b[0])
// r[32] holds (b[1]*b[1])
// c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -410,8 +408,8 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
ln = (lo + c1) & BN_MASK2;
*p = ln;

/* The overflow will stop before we over write
* words we should not overwrite */
// The overflow will stop before we over write
// words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -423,9 +421,9 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
}
}

/* n+tn is the word length
* t needs to be n*4 is size, as does r */
/* tnX may not be negative but less than n */
// n+tn is the word length
// t needs to be n*4 is size, as does r
// tnX may not be negative but less than n
static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
int tna, int tnb, BN_ULONG *t) {
int i, j, n2 = n * 2;
@@ -437,33 +435,33 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
return;
}

/* r=(a[0]-a[1])*(b[1]-b[0]) */
// r=(a[0]-a[1])*(b[1]-b[0])
c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna);
c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n);
neg = 0;
switch (c1 * 3 + c2) {
case -4:
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
break;
case -3:
/* break; */
// break;
case -2:
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */
bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // +
neg = 1;
break;
case -1:
case 0:
case 1:
/* break; */
// break;
case 2:
bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // +
bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
neg = 1;
break;
case 3:
/* break; */
// break;
case 4:
bn_sub_part_words(t, a, &(a[n]), tna, n - tna);
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n);
@@ -480,8 +478,8 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
bn_mul_recursive(&(t[n2]), t, &(t[n]), n, 0, 0, p);
bn_mul_recursive(r, a, b, n, 0, 0, p);
i = n / 2;
/* If there is only a bottom half to the number,
* just do it */
// If there is only a bottom half to the number,
// just do it
if (tna > tnb) {
j = tna - i;
} else {
@@ -492,12 +490,12 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p);
OPENSSL_memset(&(r[n2 + i * 2]), 0, sizeof(BN_ULONG) * (n2 - i * 2));
} else if (j > 0) {
/* eg, n == 16, i == 8 and tn == 11 */
// eg, n == 16, i == 8 and tn == 11
bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p);
OPENSSL_memset(&(r[n2 + tna + tnb]), 0,
sizeof(BN_ULONG) * (n2 - tna - tnb));
} else {
/* (j < 0) eg, n == 16, i == 8 and tn == 5 */
// (j < 0) eg, n == 16, i == 8 and tn == 5
OPENSSL_memset(&(r[n2]), 0, sizeof(BN_ULONG) * n2);
if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL &&
tnb < BN_MUL_RECURSIVE_SIZE_NORMAL) {
@@ -505,9 +503,9 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
} else {
for (;;) {
i /= 2;
/* these simplified conditions work
* exclusively because difference
* between tna and tnb is 1 or 0 */
// these simplified conditions work
// exclusively because difference
// between tna and tnb is 1 or 0
if (i < tna || i < tnb) {
bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i,
tnb - i, p);
@@ -522,25 +520,24 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
}
}

/* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
* r[10] holds (a[0]*b[0])
* r[32] holds (b[1]*b[1])
*/
// t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
// r[10] holds (a[0]*b[0])
// r[32] holds (b[1]*b[1])

c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));

if (neg) {
/* if t[32] is negative */
// if t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
} else {
/* Might have a carry */
// Might have a carry
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}

/* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
* r[10] holds (a[0]*b[0])
* r[32] holds (b[1]*b[1])
* c1 holds the carry bits */
// t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
// r[10] holds (a[0]*b[0])
// r[32] holds (b[1]*b[1])
// c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -548,8 +545,8 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
ln = (lo + c1) & BN_MASK2;
*p = ln;

/* The overflow will stop before we over write
* words we should not overwrite */
// The overflow will stop before we over write
// words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -627,7 +624,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) {
}
bn_mul_part_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d);
} else {
/* al <= j || bl <= j */
// al <= j || bl <= j
if (!bn_wexpand(t, k * 2)) {
goto err;
}
@@ -659,7 +656,7 @@ err:
return ret;
}

/* tmp must have 2*n words */
// tmp must have 2*n words
static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp) {
int i, j, max;
const BN_ULONG *ap;
@@ -687,23 +684,22 @@ static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp)

bn_add_words(r, r, r, max);

/* There will not be a carry */
// There will not be a carry

bn_sqr_words(tmp, a, n);

bn_add_words(r, r, tmp, max);
}

/* r is 2*n words in size,
* a and b are both n words in size. (There's not actually a 'b' here ...)
* n must be a power of 2.
* We multiply and return the result.
* t must be 2*n words in size
* We calculate
* a[0]*b[0]
* a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
* a[1]*b[1]
*/
// r is 2*n words in size,
// a and b are both n words in size. (There's not actually a 'b' here ...)
// n must be a power of 2.
// We multiply and return the result.
// t must be 2*n words in size
// We calculate
// a[0]*b[0]
// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
// a[1]*b[1]
static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t) {
int n = n2 / 2;
int zero, c1;
@@ -720,7 +716,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t
bn_sqr_normal(r, a, n2, t);
return;
}
/* r=(a[0]-a[1])*(a[1]-a[0]) */
// r=(a[0]-a[1])*(a[1]-a[0])
c1 = bn_cmp_words(a, &(a[n]), n);
zero = 0;
if (c1 > 0) {
@@ -731,7 +727,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t
zero = 1;
}

/* The result will always be negative unless it is zero */
// The result will always be negative unless it is zero
p = &(t[n2 * 2]);

if (!zero) {
@@ -742,19 +738,19 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t
bn_sqr_recursive(r, a, n, p);
bn_sqr_recursive(&(r[n2]), &(a[n]), n, p);

/* t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
* r[10] holds (a[0]*b[0])
* r[32] holds (b[1]*b[1]) */
// t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
// r[10] holds (a[0]*b[0])
// r[32] holds (b[1]*b[1])

c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));

/* t[32] is negative */
// t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));

/* t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
* r[10] holds (a[0]*a[0])
* r[32] holds (a[1]*a[1])
* c1 holds the carry bits */
// t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
// r[10] holds (a[0]*a[0])
// r[32] holds (a[1]*a[1])
// c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -762,8 +758,8 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t
ln = (lo + c1) & BN_MASK2;
*p = ln;

/* The overflow will stop before we over write
* words we should not overwrite */
// The overflow will stop before we over write
// words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -818,7 +814,7 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) {
goto err;
}

max = 2 * al; /* Non-zero (from above) */
max = 2 * al; // Non-zero (from above)
if (!bn_wexpand(rr, max)) {
goto err;
}
@@ -852,8 +848,8 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) {
}

rr->neg = 0;
/* If the most-significant half of the top word of 'a' is zero, then
* the square of 'a' will max-1 words. */
// If the most-significant half of the top word of 'a' is zero, then
// the square of 'a' will max-1 words.
if (a->d[al - 1] == (a->d[al - 1] & BN_MASK2l)) {
rr->top = max - 1;
} else {


+ 58
- 58
crypto/fipsmodule/bn/prime.c Parādīt failu

@@ -113,13 +113,13 @@

#include "internal.h"

/* The quick sieve algorithm approach to weeding out primes is Philip
* Zimmermann's, as implemented in PGP. I have had a read of his comments and
* implemented my own version. */
// The quick sieve algorithm approach to weeding out primes is Philip
// Zimmermann's, as implemented in PGP. I have had a read of his comments and
// implemented my own version.

#define NUMPRIMES 2048

/* primes contains all the primes that fit into a uint16_t. */
// primes contains all the primes that fit into a uint16_t.
static const uint16_t primes[NUMPRIMES] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79,
@@ -310,12 +310,12 @@ static const uint16_t primes[NUMPRIMES] = {
17851, 17863,
};

/* BN_prime_checks_for_size returns the number of Miller-Rabin iterations
* necessary for a 'bits'-bit prime, in order to maintain an error rate greater
* than the security level for an RSA prime of that many bits (calculated using
* the FIPS SP 800-57 security level and 186-4 Section F.1; original paper:
* Damgaard, Landrock, Pomerance: Average case error estimates for the strong
* probable prime test. -- Math. Comp. 61 (1993) 177-194) */
// BN_prime_checks_for_size returns the number of Miller-Rabin iterations
// necessary for a 'bits'-bit prime, in order to maintain an error rate greater
// than the security level for an RSA prime of that many bits (calculated using
// the FIPS SP 800-57 security level and 186-4 Section F.1; original paper:
// Damgaard, Landrock, Pomerance: Average case error estimates for the strong
// probable prime test. -- Math. Comp. 61 (1993) 177-194)
static int BN_prime_checks_for_size(int bits) {
if (bits >= 3747) {
return 3;
@@ -371,11 +371,11 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add,
int checks = BN_prime_checks_for_size(bits);

if (bits < 2) {
/* There are no prime numbers this small. */
// There are no prime numbers this small.
OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL);
return 0;
} else if (bits == 2 && safe) {
/* The smallest safe prime (7) is three bits. */
// The smallest safe prime (7) is three bits.
OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL);
return 0;
}
@@ -391,7 +391,7 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add,
}

loop:
/* make a random number and set the top and bottom bits */
// make a random number and set the top and bottom bits
if (add == NULL) {
if (!probable_prime(ret, bits)) {
goto err;
@@ -409,7 +409,7 @@ loop:
}

if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, c1++)) {
/* aborted */
// aborted
goto err;
}

@@ -421,8 +421,8 @@ loop:
goto loop;
}
} else {
/* for "safe prime" generation, check that (p-1)/2 is prime. Since a prime
* is odd, We just need to divide by 2 */
// for "safe prime" generation, check that (p-1)/2 is prime. Since a prime
// is odd, We just need to divide by 2
if (!BN_rshift1(t, ret)) {
goto err;
}
@@ -445,11 +445,11 @@ loop:
if (!BN_GENCB_call(cb, i, c1 - 1)) {
goto err;
}
/* We have a safe prime test pass */
// We have a safe prime test pass
}
}

/* we have a prime :-) */
// we have a prime :-)
found = 1;

err:
@@ -487,13 +487,13 @@ int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx,
return 0;
}

/* first look for small factors */
// first look for small factors
if (!BN_is_odd(a)) {
/* a is even => a is prime if and only if a == 2 */
// a is even => a is prime if and only if a == 2
return BN_is_word(a, 2);
}

/* Enhanced Miller-Rabin does not work for three. */
// Enhanced Miller-Rabin does not work for three.
if (BN_is_word(a, 3)) {
return 1;
}
@@ -539,7 +539,7 @@ err:
int BN_enhanced_miller_rabin_primality_test(
enum bn_primality_result_t *out_result, const BIGNUM *w, int iterations,
BN_CTX *ctx, BN_GENCB *cb) {
/* Enhanced Miller-Rabin is only valid on odd integers greater than 3. */
// Enhanced Miller-Rabin is only valid on odd integers greater than 3.
if (!BN_is_odd(w) || BN_cmp_word(w, 3) <= 0) {
OPENSSL_PUT_ERROR(BN, BN_R_INVALID_INPUT);
return 0;
@@ -561,7 +561,7 @@ int BN_enhanced_miller_rabin_primality_test(
goto err;
}

/* Write w1 as m*2^a (Steps 1 and 2). */
// Write w1 as m*2^a (Steps 1 and 2).
int a = 0;
while (!BN_is_bit_set(w1, a)) {
a++;
@@ -585,22 +585,22 @@ int BN_enhanced_miller_rabin_primality_test(
goto err;
}

/* Montgomery setup for computations mod A */
// Montgomery setup for computations mod A
mont = BN_MONT_CTX_new();
if (mont == NULL ||
!BN_MONT_CTX_set(mont, w, ctx)) {
goto err;
}

/* The following loop performs in inner iteration of the Enhanced Miller-Rabin
* Primality test (Step 4). */
// The following loop performs in inner iteration of the Enhanced Miller-Rabin
// Primality test (Step 4).
for (int i = 1; i <= iterations; i++) {
/* Step 4.1-4.2 */
// Step 4.1-4.2
if (!BN_rand_range_ex(b, 2, w1)) {
goto err;
}

/* Step 4.3-4.4 */
// Step 4.3-4.4
if (!BN_gcd(g, b, w, ctx)) {
goto err;
}
@@ -610,17 +610,17 @@ int BN_enhanced_miller_rabin_primality_test(
goto err;
}

/* Step 4.5 */
// Step 4.5
if (!BN_mod_exp_mont(z, b, m, w, ctx, mont)) {
goto err;
}

/* Step 4.6 */
// Step 4.6
if (BN_is_one(z) || BN_cmp(z, w1) == 0) {
goto loop;
}

/* Step 4.7 */
// Step 4.7
for (int j = 1; j < a; j++) {
if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) {
goto err;
@@ -633,18 +633,18 @@ int BN_enhanced_miller_rabin_primality_test(
}
}

/* Step 4.8-4.9 */
// Step 4.8-4.9
if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) {
goto err;
}

/* Step 4.10-4.11 */
// Step 4.10-4.11
if (!BN_is_one(z) && !BN_copy(x, z)) {
goto err;
}

composite:
/* Step 4.12-4.14 */
// Step 4.12-4.14
if (!BN_copy(x1, x) ||
!BN_sub_word(x1, 1) ||
!BN_gcd(g, x1, w, ctx)) {
@@ -660,7 +660,7 @@ int BN_enhanced_miller_rabin_primality_test(
goto err;

loop:
/* Step 4.15 */
// Step 4.15
if (!BN_GENCB_call(cb, 1, i)) {
goto err;
}
@@ -688,7 +688,7 @@ again:
return 0;
}

/* we now have a random number 'rnd' to test. */
// we now have a random number 'rnd' to test.
for (i = 1; i < NUMPRIMES; i++) {
BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]);
if (mod == (BN_ULONG)-1) {
@@ -696,12 +696,12 @@ again:
}
mods[i] = (uint16_t)mod;
}
/* If bits is so small that it fits into a single word then we
* additionally don't want to exceed that many bits. */
// If bits is so small that it fits into a single word then we
// additionally don't want to exceed that many bits.
if (is_single_word) {
BN_ULONG size_limit;
if (bits == BN_BITS2) {
/* Avoid undefined behavior. */
// Avoid undefined behavior.
size_limit = ~((BN_ULONG)0) - BN_get_word(rnd);
} else {
size_limit = (((BN_ULONG)1) << bits) - BN_get_word(rnd) - 1;
@@ -716,15 +716,15 @@ loop:
if (is_single_word) {
BN_ULONG rnd_word = BN_get_word(rnd);

/* In the case that the candidate prime is a single word then
* we check that:
* 1) It's greater than primes[i] because we shouldn't reject
* 3 as being a prime number because it's a multiple of
* three.
* 2) That it's not a multiple of a known prime. We don't
* check that rnd-1 is also coprime to all the known
* primes because there aren't many small primes where
* that's true. */
// In the case that the candidate prime is a single word then
// we check that:
// 1) It's greater than primes[i] because we shouldn't reject
// 3 as being a prime number because it's a multiple of
// three.
// 2) That it's not a multiple of a known prime. We don't
// check that rnd-1 is also coprime to all the known
// primes because there aren't many small primes where
// that's true.
for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) {
if ((mods[i] + delta) % primes[i] == 0) {
delta += 2;
@@ -736,8 +736,8 @@ loop:
}
} else {
for (i = 1; i < NUMPRIMES; i++) {
/* check that rnd is not a prime and also
* that gcd(rnd-1,primes) == 1 (except for 2) */
// check that rnd is not a prime and also
// that gcd(rnd-1,primes) == 1 (except for 2)
if (((mods[i] + delta) % primes[i]) <= 1) {
delta += 2;
if (delta > maxdelta) {
@@ -772,7 +772,7 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add,
goto err;
}

/* we need ((rnd-rem) % add) == 0 */
// we need ((rnd-rem) % add) == 0

if (!BN_mod(t1, rnd, add, ctx)) {
goto err;
@@ -789,11 +789,11 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add,
goto err;
}
}
/* we now have a random number 'rand' to test. */
// we now have a random number 'rand' to test.

loop:
for (i = 1; i < NUMPRIMES; i++) {
/* check that rnd is a prime */
// check that rnd is a prime
BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]);
if (mod == (BN_ULONG)-1) {
goto err;
@@ -835,7 +835,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd,
goto err;
}

/* we need ((rnd-rem) % add) == 0 */
// we need ((rnd-rem) % add) == 0
if (!BN_mod(t1, q, qadd, ctx)) {
goto err;
}
@@ -857,7 +857,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd,
}
}

/* we now have a random number 'rand' to test. */
// we now have a random number 'rand' to test.
if (!BN_lshift1(p, q)) {
goto err;
}
@@ -867,9 +867,9 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd,

loop:
for (i = 1; i < NUMPRIMES; i++) {
/* check that p and q are prime */
/* check that for p and q
* gcd(p-1,primes) == 1 (except for 2) */
// check that p and q are prime
// check that for p and q
// gcd(p-1,primes) == 1 (except for 2)
BN_ULONG pmod = BN_mod_word(p, (BN_ULONG)primes[i]);
BN_ULONG qmod = BN_mod_word(q, (BN_ULONG)primes[i]);
if (pmod == (BN_ULONG)-1 || qmod == (BN_ULONG)-1) {


+ 19
- 19
crypto/fipsmodule/bn/random.c Parādīt failu

@@ -158,7 +158,7 @@ static int bn_rand_with_additional_data(BIGNUM *rnd, int bits, int top,
goto err;
}

/* Make a random number and set the top and bottom bits. */
// Make a random number and set the top and bottom bits.
RAND_bytes_with_additional_data(buf, bytes, additional_data);

if (top != BN_RAND_TOP_ANY) {
@@ -176,7 +176,7 @@ static int bn_rand_with_additional_data(BIGNUM *rnd, int bits, int top,

buf[0] &= ~mask;

/* Set the bottom bit if requested, */
// Set the bottom bit if requested,
if (bottom == BN_RAND_BOTTOM_ODD) {
buf[bytes - 1] |= 1;
}
@@ -212,28 +212,28 @@ static int bn_rand_range_with_additional_data(
return 0;
}

/* This function is used to implement steps 4 through 7 of FIPS 186-4
* appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive|
* is n and |min_inclusive| is one. */
// This function is used to implement steps 4 through 7 of FIPS 186-4
// appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive|
// is n and |min_inclusive| is one.
unsigned count = 100;
unsigned n = BN_num_bits(max_exclusive); /* n > 0 */
unsigned n = BN_num_bits(max_exclusive); // n > 0
do {
if (!--count) {
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS);
return 0;
}

if (/* steps 4 and 5 */
if (// steps 4 and 5
!bn_rand_with_additional_data(r, n, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY,
additional_data) ||
/* step 7 */
// step 7
!BN_add_word(r, min_inclusive)) {
return 0;
}

/* Step 6. This loops if |r| >= |max_exclusive|. This is identical to
* checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the
* formulation stated in FIPS 186-4. */
// Step 6. This loops if |r| >= |max_exclusive|. This is identical to
// checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the
// formulation stated in FIPS 186-4.
} while (BN_cmp(r, max_exclusive) >= 0);

return 1;
@@ -256,22 +256,22 @@ int BN_pseudo_rand_range(BIGNUM *r, const BIGNUM *range) {
int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv,
const uint8_t *message, size_t message_len,
BN_CTX *ctx) {
/* We copy |priv| into a local buffer to avoid furthur exposing its
* length. */
// We copy |priv| into a local buffer to avoid furthur exposing its
// length.
uint8_t private_bytes[96];
size_t todo = sizeof(priv->d[0]) * priv->top;
if (todo > sizeof(private_bytes)) {
/* No reasonable DSA or ECDSA key should have a private key
* this large and we don't handle this case in order to avoid
* leaking the length of the private key. */
// No reasonable DSA or ECDSA key should have a private key
// this large and we don't handle this case in order to avoid
// leaking the length of the private key.
OPENSSL_PUT_ERROR(BN, BN_R_PRIVATE_KEY_TOO_LARGE);
return 0;
}
OPENSSL_memcpy(private_bytes, priv->d, todo);
OPENSSL_memset(private_bytes + todo, 0, sizeof(private_bytes) - todo);

/* Pass a SHA512 hash of the private key and message as additional data into
* the RBG. This is a hardening measure against entropy failure. */
// Pass a SHA512 hash of the private key and message as additional data into
// the RBG. This is a hardening measure against entropy failure.
OPENSSL_COMPILE_ASSERT(SHA512_DIGEST_LENGTH >= 32,
additional_data_is_too_large_for_sha512);
SHA512_CTX sha;
@@ -281,6 +281,6 @@ int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv,
SHA512_Update(&sha, message, message_len);
SHA512_Final(digest, &sha);

/* Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2. */
// Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2.
return bn_rand_range_with_additional_data(out, 1, range, digest);
}

+ 1
- 1
crypto/fipsmodule/bn/shift.c Parādīt failu

@@ -157,7 +157,7 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n) {
}
} else {
if (n == 0) {
return 1; /* or the copying loop will go berserk */
return 1; // or the copying loop will go berserk
}
}



+ 102
- 108
crypto/fipsmodule/bn/sqrt.c Parādīt failu

@@ -60,9 +60,9 @@


BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
/* Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm
* (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory",
* algorithm 1.5.1). |p| is assumed to be a prime. */
// Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm
// (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory",
// algorithm 1.5.1). |p| is assumed to be a prime.

BIGNUM *ret = in;
int err = 1;
@@ -125,26 +125,25 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto end;
}

/* A = a mod p */
// A = a mod p
if (!BN_nnmod(A, a, p, ctx)) {
goto end;
}

/* now write |p| - 1 as 2^e*q where q is odd */
// now write |p| - 1 as 2^e*q where q is odd
e = 1;
while (!BN_is_bit_set(p, e)) {
e++;
}
/* we'll set q later (if needed) */
// we'll set q later (if needed)

if (e == 1) {
/* The easy case: (|p|-1)/2 is odd, so 2 has an inverse
* modulo (|p|-1)/2, and square roots can be computed
* directly by modular exponentiation.
* We have
* 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
* so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
*/
// The easy case: (|p|-1)/2 is odd, so 2 has an inverse
// modulo (|p|-1)/2, and square roots can be computed
// directly by modular exponentiation.
// We have
// 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
// so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
if (!BN_rshift(q, p, 2)) {
goto end;
}
@@ -158,39 +157,38 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
}

if (e == 2) {
/* |p| == 5 (mod 8)
*
* In this case 2 is always a non-square since
* Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
* So if a really is a square, then 2*a is a non-square.
* Thus for
* b := (2*a)^((|p|-5)/8),
* i := (2*a)*b^2
* we have
* i^2 = (2*a)^((1 + (|p|-5)/4)*2)
* = (2*a)^((p-1)/2)
* = -1;
* so if we set
* x := a*b*(i-1),
* then
* x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
* = a^2 * b^2 * (-2*i)
* = a*(-i)*(2*a*b^2)
* = a*(-i)*i
* = a.
*
* (This is due to A.O.L. Atkin,
* <URL:
*http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
* November 1992.)
*/

/* t := 2*a */
// |p| == 5 (mod 8)
//
// In this case 2 is always a non-square since
// Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
// So if a really is a square, then 2*a is a non-square.
// Thus for
// b := (2*a)^((|p|-5)/8),
// i := (2*a)*b^2
// we have
// i^2 = (2*a)^((1 + (|p|-5)/4)*2)
// = (2*a)^((p-1)/2)
// = -1;
// so if we set
// x := a*b*(i-1),
// then
// x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
// = a^2 * b^2 * (-2*i)
// = a*(-i)*(2*a*b^2)
// = a*(-i)*i
// = a.
//
// (This is due to A.O.L. Atkin,
// <URL:
//http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
// November 1992.)

// t := 2*a
if (!BN_mod_lshift1_quick(t, A, p)) {
goto end;
}

/* b := (2*a)^((|p|-5)/8) */
// b := (2*a)^((|p|-5)/8)
if (!BN_rshift(q, p, 3)) {
goto end;
}
@@ -199,18 +197,18 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto end;
}

/* y := b^2 */
// y := b^2
if (!BN_mod_sqr(y, b, p, ctx)) {
goto end;
}

/* t := (2*a)*b^2 - 1*/
// t := (2*a)*b^2 - 1
if (!BN_mod_mul(t, t, y, p, ctx) ||
!BN_sub_word(t, 1)) {
goto end;
}

/* x = a*b*t */
// x = a*b*t
if (!BN_mod_mul(x, A, b, p, ctx) ||
!BN_mod_mul(x, x, t, p, ctx)) {
goto end;
@@ -223,17 +221,16 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto vrfy;
}

/* e > 2, so we really have to use the Tonelli/Shanks algorithm.
* First, find some y that is not a square. */
// e > 2, so we really have to use the Tonelli/Shanks algorithm.
// First, find some y that is not a square.
if (!BN_copy(q, p)) {
goto end; /* use 'q' as temp */
goto end; // use 'q' as temp
}
q->neg = 0;
i = 2;
do {
/* For efficiency, try small numbers first;
* if this fails, try random numbers.
*/
// For efficiency, try small numbers first;
// if this fails, try random numbers.
if (i < 22) {
if (!BN_set_word(y, i)) {
goto end;
@@ -247,7 +244,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto end;
}
}
/* now 0 <= y < |p| */
// now 0 <= y < |p|
if (BN_is_zero(y)) {
if (!BN_set_word(y, i)) {
goto end;
@@ -255,34 +252,33 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
}
}

r = bn_jacobi(y, q, ctx); /* here 'q' is |p| */
r = bn_jacobi(y, q, ctx); // here 'q' is |p|
if (r < -1) {
goto end;
}
if (r == 0) {
/* m divides p */
// m divides p
OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME);
goto end;
}
} while (r == 1 && ++i < 82);

if (r != -1) {
/* Many rounds and still no non-square -- this is more likely
* a bug than just bad luck.
* Even if p is not prime, we should have found some y
* such that r == -1.
*/
// Many rounds and still no non-square -- this is more likely
// a bug than just bad luck.
// Even if p is not prime, we should have found some y
// such that r == -1.
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS);
goto end;
}

/* Here's our actual 'q': */
// Here's our actual 'q':
if (!BN_rshift(q, q, e)) {
goto end;
}

/* Now that we have some non-square, we can find an element
* of order 2^e by computing its q'th power. */
// Now that we have some non-square, we can find an element
// of order 2^e by computing its q'th power.
if (!BN_mod_exp_mont(y, y, q, p, ctx, NULL)) {
goto end;
}
@@ -291,37 +287,36 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto end;
}

/* Now we know that (if p is indeed prime) there is an integer
* k, 0 <= k < 2^e, such that
*
* a^q * y^k == 1 (mod p).
*
* As a^q is a square and y is not, k must be even.
* q+1 is even, too, so there is an element
*
* X := a^((q+1)/2) * y^(k/2),
*
* and it satisfies
*
* X^2 = a^q * a * y^k
* = a,
*
* so it is the square root that we are looking for.
*/

/* t := (q-1)/2 (note that q is odd) */
// Now we know that (if p is indeed prime) there is an integer
// k, 0 <= k < 2^e, such that
//
// a^q * y^k == 1 (mod p).
//
// As a^q is a square and y is not, k must be even.
// q+1 is even, too, so there is an element
//
// X := a^((q+1)/2) * y^(k/2),
//
// and it satisfies
//
// X^2 = a^q * a * y^k
// = a,
//
// so it is the square root that we are looking for.

// t := (q-1)/2 (note that q is odd)
if (!BN_rshift1(t, q)) {
goto end;
}

/* x := a^((q-1)/2) */
if (BN_is_zero(t)) /* special case: p = 2^e + 1 */
// x := a^((q-1)/2)
if (BN_is_zero(t)) // special case: p = 2^e + 1
{
if (!BN_nnmod(t, A, p, ctx)) {
goto end;
}
if (BN_is_zero(t)) {
/* special case: a == 0 (mod p) */
// special case: a == 0 (mod p)
BN_zero(ret);
err = 0;
goto end;
@@ -333,33 +328,32 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
goto end;
}
if (BN_is_zero(x)) {
/* special case: a == 0 (mod p) */
// special case: a == 0 (mod p)
BN_zero(ret);
err = 0;
goto end;
}
}

/* b := a*x^2 (= a^q) */
// b := a*x^2 (= a^q)
if (!BN_mod_sqr(b, x, p, ctx) ||
!BN_mod_mul(b, b, A, p, ctx)) {
goto end;
}

/* x := a*x (= a^((q+1)/2)) */
// x := a*x (= a^((q+1)/2))
if (!BN_mod_mul(x, x, A, p, ctx)) {
goto end;
}

while (1) {
/* Now b is a^q * y^k for some even k (0 <= k < 2^E
* where E refers to the original value of e, which we
* don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
*
* We have a*b = x^2,
* y^2^(e-1) = -1,
* b^2^(e-1) = 1.
*/
// Now b is a^q * y^k for some even k (0 <= k < 2^E
// where E refers to the original value of e, which we
// don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
//
// We have a*b = x^2,
// y^2^(e-1) = -1,
// b^2^(e-1) = 1.

if (BN_is_one(b)) {
if (!BN_copy(ret, x)) {
@@ -370,7 +364,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
}


/* find smallest i such that b^(2^i) = 1 */
// find smallest i such that b^(2^i) = 1
i = 1;
if (!BN_mod_sqr(t, b, p, ctx)) {
goto end;
@@ -387,7 +381,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
}


/* t := y^2^(e - i - 1) */
// t := y^2^(e - i - 1)
if (!BN_copy(t, y)) {
goto end;
}
@@ -406,8 +400,8 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {

vrfy:
if (!err) {
/* verify the result -- the input might have been not a square
* (test added in 0.9.8) */
// verify the result -- the input might have been not a square
// (test added in 0.9.8)

if (!BN_mod_sqr(x, ret, p, ctx)) {
err = 1;
@@ -457,30 +451,30 @@ int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx) {
goto err;
}

/* We estimate that the square root of an n-bit number is 2^{n/2}. */
// We estimate that the square root of an n-bit number is 2^{n/2}.
if (!BN_lshift(estimate, BN_value_one(), BN_num_bits(in)/2)) {
goto err;
}

/* This is Newton's method for finding a root of the equation |estimate|^2 -
* |in| = 0. */
// This is Newton's method for finding a root of the equation |estimate|^2 -
// |in| = 0.
for (;;) {
/* |estimate| = 1/2 * (|estimate| + |in|/|estimate|) */
// |estimate| = 1/2 * (|estimate| + |in|/|estimate|)
if (!BN_div(tmp, NULL, in, estimate, ctx) ||
!BN_add(tmp, tmp, estimate) ||
!BN_rshift1(estimate, tmp) ||
/* |tmp| = |estimate|^2 */
// |tmp| = |estimate|^2
!BN_sqr(tmp, estimate, ctx) ||
/* |delta| = |in| - |tmp| */
// |delta| = |in| - |tmp|
!BN_sub(delta, in, tmp)) {
OPENSSL_PUT_ERROR(BN, ERR_R_BN_LIB);
goto err;
}

delta->neg = 0;
/* The difference between |in| and |estimate| squared is required to always
* decrease. This ensures that the loop always terminates, but I don't have
* a proof that it always finds the square root for a given square. */
// The difference between |in| and |estimate| squared is required to always
// decrease. This ensures that the loop always terminates, but I don't have
// a proof that it always finds the square root for a given square.
if (last_delta_valid && BN_cmp(delta, last_delta) >= 0) {
break;
}


+ 12
- 12
crypto/fipsmodule/cipher/aead.c Parādīt failu

@@ -101,8 +101,8 @@ void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) {
ctx->aead = NULL;
}

/* check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If
* |in| and |out| alias, we require that |in| == |out|. */
// check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If
// |in| and |out| alias, we require that |in| == |out|.
static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out,
size_t out_len) {
if (!buffers_alias(in, in_len, out, out_len)) {
@@ -140,8 +140,8 @@ int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
}

error:
/* In the event of an error, clear the output buffer so that a caller
* that doesn't check the return value doesn't send raw data. */
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
@@ -172,8 +172,8 @@ int EVP_AEAD_CTX_seal_scatter(
}

error:
/* In the event of an error, clear the output buffer so that a caller
* that doesn't check the return value doesn't send raw data. */
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, in_len);
OPENSSL_memset(out_tag, 0, max_out_tag_len);
*out_tag_len = 0;
@@ -218,9 +218,9 @@ int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
}

error:
/* In the event of an error, clear the output buffer so that a caller
* that doesn't check the return value doesn't try and process bad
* data. */
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't try and process bad
// data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
@@ -247,9 +247,9 @@ int EVP_AEAD_CTX_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out,
}

error:
/* In the event of an error, clear the output buffer so that a caller
* that doesn't check the return value doesn't try and process bad
* data. */
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't try and process bad
// data.
OPENSSL_memset(out, 0, in_len);
return 0;
}


+ 11
- 11
crypto/fipsmodule/cipher/cipher.c Parādīt failu

@@ -141,12 +141,12 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
}

if (cipher) {
/* Ensure a context left from last time is cleared (the previous check
* attempted to avoid this if the same ENGINE and EVP_CIPHER could be
* used). */
// Ensure a context left from last time is cleared (the previous check
// attempted to avoid this if the same ENGINE and EVP_CIPHER could be
// used).
if (ctx->cipher) {
EVP_CIPHER_CTX_cleanup(ctx);
/* Restore encrypt and flags */
// Restore encrypt and flags
ctx->encrypt = enc;
}

@@ -177,7 +177,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
return 0;
}

/* we assume block size is a power of 2 in *cryptUpdate */
// we assume block size is a power of 2 in *cryptUpdate
assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 ||
ctx->cipher->block_size == 16);

@@ -189,7 +189,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,

case EVP_CIPH_CFB_MODE:
ctx->num = 0;
/* fall-through */
// fall-through

case EVP_CIPH_CBC_MODE:
assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv));
@@ -202,7 +202,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
case EVP_CIPH_CTR_MODE:
case EVP_CIPH_OFB_MODE:
ctx->num = 0;
/* Don't reuse IV for CTR mode */
// Don't reuse IV for CTR mode
if (iv) {
OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx));
}
@@ -388,8 +388,8 @@ int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
return 0;
}

/* if we have 'decrypted' a multiple of block size, make sure
* we have a copy of this last block */
// if we have 'decrypted' a multiple of block size, make sure
// we have a copy of this last block
if (b > 1 && !ctx->buf_len) {
*out_len -= b;
ctx->final_used = 1;
@@ -437,8 +437,8 @@ int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) {
}
assert(b <= sizeof(ctx->final));

/* The following assumes that the ciphertext has been authenticated.
* Otherwise it provides a padding oracle. */
// The following assumes that the ciphertext has been authenticated.
// Otherwise it provides a padding oracle.
n = ctx->final[b - 1];
if (n == 0 || n > (int)b) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);


+ 35
- 35
crypto/fipsmodule/cipher/e_aes.c Parādīt failu

@@ -68,7 +68,7 @@
#endif


OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) /* Unreachable code. */
OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code.

typedef struct {
union {
@@ -86,14 +86,14 @@ typedef struct {
union {
double align;
AES_KEY ks;
} ks; /* AES key schedule to use */
int key_set; /* Set if key initialised */
int iv_set; /* Set if an iv is set */
} ks; // AES key schedule to use
int key_set; // Set if key initialised
int iv_set; // Set if an iv is set
GCM128_CONTEXT gcm;
uint8_t *iv; /* Temporary IV store */
int ivlen; /* IV length */
uint8_t *iv; // Temporary IV store
int ivlen; // IV length
int taglen;
int iv_gen; /* It is OK to generate IVs */
int iv_gen; // It is OK to generate IVs
ctr128_f ctr;
} EVP_AES_GCM_CTX;

@@ -125,8 +125,8 @@ static char bsaes_capable(void) {


#if defined(BSAES)
/* On platforms where BSAES gets defined (just above), then these functions are
* provided by asm. */
// On platforms where BSAES gets defined (just above), then these functions are
// provided by asm.
void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t ivec[16], int enc);
void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
@@ -136,8 +136,8 @@ static char bsaes_capable(void) {
return 0;
}

/* On other platforms, bsaes_capable() will always return false and so the
* following will never be called. */
// On other platforms, bsaes_capable() will always return false and so the
// following will never be called.
static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t ivec[16], int enc) {
abort();
@@ -151,8 +151,8 @@ static void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
#endif

#if defined(VPAES)
/* On platforms where VPAES gets defined (just above), then these functions are
* provided by asm. */
// On platforms where VPAES gets defined (just above), then these functions are
// provided by asm.
int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);

@@ -166,8 +166,8 @@ static char vpaes_capable(void) {
return 0;
}

/* On other platforms, vpaes_capable() will always return false and so the
* following will never be called. */
// On other platforms, vpaes_capable() will always return false and so the
// following will never be called.
static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits,
AES_KEY *key) {
abort();
@@ -203,8 +203,8 @@ void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,

#else

/* On other platforms, aesni_capable() will always return false and so the
* following will never be called. */
// On other platforms, aesni_capable() will always return false and so the
// following will never be called.
static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
abort();
}
@@ -404,7 +404,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
if (key) {
gctx->ctr =
aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len);
/* If we have an iv can set it directly, otherwise use saved IV. */
// If we have an iv can set it directly, otherwise use saved IV.
if (iv == NULL && gctx->iv_set) {
iv = gctx->iv;
}
@@ -414,7 +414,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
}
gctx->key_set = 1;
} else {
/* If key set use IV, otherwise copy */
// If key set use IV, otherwise copy
if (gctx->key_set) {
CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
} else {
@@ -434,7 +434,7 @@ static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) {
}
}

/* increment counter (64-bit int) by 1 */
// increment counter (64-bit int) by 1
static void ctr64_inc(uint8_t *counter) {
int n = 8;
uint8_t c;
@@ -467,7 +467,7 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) {
return 0;
}

/* Allocate memory for IV if needed */
// Allocate memory for IV if needed
if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) {
if (gctx->iv != c->iv) {
OPENSSL_free(gctx->iv);
@@ -496,14 +496,14 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) {
return 1;

case EVP_CTRL_GCM_SET_IV_FIXED:
/* Special case: -1 length restores whole IV */
// Special case: -1 length restores whole IV
if (arg == -1) {
OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen);
gctx->iv_gen = 1;
return 1;
}
/* Fixed field must be at least 4 bytes and invocation field
* at least 8. */
// Fixed field must be at least 4 bytes and invocation field
// at least 8.
if (arg < 4 || (gctx->ivlen - arg) < 8) {
return 0;
}
@@ -525,9 +525,9 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) {
arg = gctx->ivlen;
}
OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
/* Invocation field will be at least 8 bytes in size and
* so no need to check wrap around or increment more than
* last 8 bytes. */
// Invocation field will be at least 8 bytes in size and
// so no need to check wrap around or increment more than
// last 8 bytes.
ctr64_inc(gctx->iv + gctx->ivlen - 8);
gctx->iv_set = 1;
return 1;
@@ -565,7 +565,7 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t len) {
EVP_AES_GCM_CTX *gctx = ctx->cipher_data;

/* If not set up, return error */
// If not set up, return error
if (!gctx->key_set) {
return -1;
}
@@ -613,7 +613,7 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
}
CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
gctx->taglen = 16;
/* Don't reuse the IV */
// Don't reuse the IV
gctx->iv_set = 0;
return 0;
}
@@ -813,7 +813,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_gcm_generic) {
#if !defined(OPENSSL_NO_ASM) && \
(defined(OPENSSL_X86_64) || defined(OPENSSL_X86))

/* AES-NI section. */
// AES-NI section.

static char aesni_capable(void) {
return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0;
@@ -880,8 +880,8 @@ static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt, 1);
gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
/* If we have an iv can set it directly, otherwise use
* saved IV. */
// If we have an iv can set it directly, otherwise use
// saved IV.
if (iv == NULL && gctx->iv_set) {
iv = gctx->iv;
}
@@ -891,7 +891,7 @@ static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
}
gctx->key_set = 1;
} else {
/* If key set use IV, otherwise copy */
// If key set use IV, otherwise copy
if (gctx->key_set) {
CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
} else {
@@ -1104,7 +1104,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_gcm) {
} \
}

#else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */
#else // ^^^ OPENSSL_X86_64 || OPENSSL_X86

static char aesni_capable(void) {
return 0;
@@ -1158,7 +1158,7 @@ static int aead_aes_gcm_init_impl(struct aead_aes_gcm_ctx *gcm_ctx,

if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; /* EVP_AEAD_CTX_init should catch this. */
return 0; // EVP_AEAD_CTX_init should catch this.
}

if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {


+ 12
- 12
crypto/fipsmodule/cipher/internal.h Parādīt failu

@@ -70,10 +70,10 @@ extern "C" {
#endif


/* EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. */
// EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode.
#define EVP_CIPH_MODE_MASK 0x3f

/* EVP_AEAD represents a specific AEAD algorithm. */
// EVP_AEAD represents a specific AEAD algorithm.
struct evp_aead_st {
uint8_t key_len;
uint8_t nonce_len;
@@ -81,8 +81,8 @@ struct evp_aead_st {
uint8_t max_tag_len;
int seal_scatter_supports_extra_in;

/* init initialises an |EVP_AEAD_CTX|. If this call returns zero then
* |cleanup| will not be called for that context. */
// init initialises an |EVP_AEAD_CTX|. If this call returns zero then
// |cleanup| will not be called for that context.
int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
size_t tag_len);
int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
@@ -112,18 +112,18 @@ struct evp_aead_st {
size_t extra_in_len);
};

/* aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|,
* where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is
* set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is
* initialised to do GHASH with the given key. It returns a function for
* optimised CTR-mode, or NULL if CTR-mode should be built using
* |*out_block|. */
// aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|,
// where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is
// set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is
// initialised to do GHASH with the given key. It returns a function for
// optimised CTR-mode, or NULL if CTR-mode should be built using
// |*out_block|.
ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx,
block128_f *out_block, const uint8_t *key,
size_t key_bytes);

#if defined(__cplusplus)
} /* extern C */
} // extern C
#endif

#endif /* OPENSSL_HEADER_CIPHER_INTERNAL_H */
#endif // OPENSSL_HEADER_CIPHER_INTERNAL_H

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels

Notiek ielāde…
Atcelt
Saglabāt