diff --git a/crypto/base64/base64.c b/crypto/base64/base64.c index d5450fca..b701b0d1 100644 --- a/crypto/base64/base64.c +++ b/crypto/base64/base64.c @@ -65,29 +65,29 @@ #include "../internal.h" -/* constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t| - * arguments for a slightly simpler implementation. */ +// constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t| +// arguments for a slightly simpler implementation. static inline uint8_t constant_time_lt_args_8(uint8_t a, uint8_t b) { crypto_word_t aw = a; crypto_word_t bw = b; - /* |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same - * MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1. */ + // |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same + // MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1. return constant_time_msb_w(aw - bw); } -/* constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max| - * and |CONSTTIME_FALSE_8| otherwise. */ +// constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max| +// and |CONSTTIME_FALSE_8| otherwise. static inline uint8_t constant_time_in_range_8(uint8_t a, uint8_t min, uint8_t max) { a -= min; return constant_time_lt_args_8(a, max - min + 1); } -/* Encoding. */ +// Encoding. static uint8_t conv_bin2ascii(uint8_t a) { - /* Since PEM is sometimes used to carry private keys, we encode base64 data - * itself in constant-time. */ + // Since PEM is sometimes used to carry private keys, we encode base64 data + // itself in constant-time. a &= 0x3f; uint8_t ret = constant_time_select_8(constant_time_eq_8(a, 62), '+', '/'); ret = @@ -183,8 +183,8 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, ctx->data_used = (unsigned)in_len; if (total > INT_MAX) { - /* We cannot signal an error, but we can at least avoid making *out_len - * negative. */ + // We cannot signal an error, but we can at least avoid making *out_len + // negative. total = 0; } *out_len = (int)total; @@ -201,8 +201,8 @@ void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len) { out[encoded] = '\0'; ctx->data_used = 0; - /* ctx->data_used is bounded by sizeof(ctx->data), so this does not - * overflow. */ + // ctx->data_used is bounded by sizeof(ctx->data), so this does not + // overflow. assert(encoded <= INT_MAX); *out_len = (int)encoded; } @@ -240,7 +240,7 @@ size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { } -/* Decoding. */ +// Decoding. int EVP_DecodedLength(size_t *out_len, size_t len) { if (len % 4 != 0) { @@ -256,8 +256,8 @@ void EVP_DecodeInit(EVP_ENCODE_CTX *ctx) { } static uint8_t base64_ascii_to_bin(uint8_t a) { - /* Since PEM is sometimes used to carry private keys, we decode base64 data - * itself in constant-time. */ + // Since PEM is sometimes used to carry private keys, we decode base64 data + // itself in constant-time. const uint8_t is_upper = constant_time_in_range_8(a, 'A', 'Z'); const uint8_t is_lower = constant_time_in_range_8(a, 'a', 'z'); const uint8_t is_digit = constant_time_in_range_8(a, '0', '9'); @@ -265,21 +265,21 @@ static uint8_t base64_ascii_to_bin(uint8_t a) { const uint8_t is_slash = constant_time_eq_8(a, '/'); const uint8_t is_equals = constant_time_eq_8(a, '='); - uint8_t ret = 0xff; /* 0xff signals invalid. */ - ret = constant_time_select_8(is_upper, a - 'A', ret); /* [0,26) */ - ret = constant_time_select_8(is_lower, a - 'a' + 26, ret); /* [26,52) */ - ret = constant_time_select_8(is_digit, a - '0' + 52, ret); /* [52,62) */ + uint8_t ret = 0xff; // 0xff signals invalid. + ret = constant_time_select_8(is_upper, a - 'A', ret); // [0,26) + ret = constant_time_select_8(is_lower, a - 'a' + 26, ret); // [26,52) + ret = constant_time_select_8(is_digit, a - '0' + 52, ret); // [52,62) ret = constant_time_select_8(is_plus, 62, ret); ret = constant_time_select_8(is_slash, 63, ret); - /* Padding maps to zero, to be further handled by the caller. */ + // Padding maps to zero, to be further handled by the caller. ret = constant_time_select_8(is_equals, 0, ret); return ret; } -/* base64_decode_quad decodes a single “quad” (i.e. four characters) of base64 - * data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the - * number of bytes written, which will be less than three if the quad ended - * with padding. It returns one on success or zero on error. */ +// base64_decode_quad decodes a single “quad” (i.e. four characters) of base64 +// data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the +// number of bytes written, which will be less than three if the quad ended +// with padding. It returns one on success or zero on error. static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes, const uint8_t *in) { const uint8_t a = base64_ascii_to_bin(in[0]); @@ -300,20 +300,20 @@ static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes, switch (padding_pattern) { case 0: - /* The common case of no padding. */ + // The common case of no padding. *out_num_bytes = 3; out[0] = v >> 16; out[1] = v >> 8; out[2] = v; break; - case 1: /* xxx= */ + case 1: // xxx= *out_num_bytes = 2; out[0] = v >> 16; out[1] = v >> 8; break; - case 3: /* xx== */ + case 3: // xx== *out_num_bytes = 1; out[0] = v >> 16; break; @@ -424,7 +424,7 @@ int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out, } int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { - /* Trim spaces and tabs from the beginning of the input. */ + // Trim spaces and tabs from the beginning of the input. while (src_len > 0) { if (src[0] != ' ' && src[0] != '\t') { break; @@ -434,7 +434,7 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { src_len--; } - /* Trim newlines, spaces and tabs from the end of the line. */ + // Trim newlines, spaces and tabs from the end of the line. while (src_len > 0) { switch (src[src_len-1]) { case ' ': @@ -455,8 +455,8 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { return -1; } - /* EVP_DecodeBlock does not take padding into account, so put the - * NULs back in... so the caller can strip them back out. */ + // EVP_DecodeBlock does not take padding into account, so put the + // NULs back in... so the caller can strip them back out. while (dst_len % 3 != 0) { dst[dst_len++] = '\0'; } diff --git a/crypto/base64/base64_test.cc b/crypto/base64/base64_test.cc index 108c7829..28b5c8e0 100644 --- a/crypto/base64/base64_test.cc +++ b/crypto/base64/base64_test.cc @@ -280,9 +280,9 @@ TEST_P(Base64Test, DecodeUpdateStreaming) { out_len += bytes_written; if (i == encoded_len || (i + 1 == encoded_len && t.encoded[i] == '\n') || - /* If there was an '-' in the input (which means “EOF”) then - * this loop will continue to test that |EVP_DecodeUpdate| will - * ignore the remainder of the input. */ + // If there was an '-' in the input (which means “EOF”) then + // this loop will continue to test that |EVP_DecodeUpdate| will + // ignore the remainder of the input. strchr(t.encoded, '-') != nullptr) { break; } diff --git a/crypto/bio/bio.c b/crypto/bio/bio.c index cffd05b7..4e889662 100644 --- a/crypto/bio/bio.c +++ b/crypto/bio/bio.c @@ -409,14 +409,14 @@ void ERR_print_errors(BIO *bio) { ERR_print_errors_cb(print_bio, bio); } -/* bio_read_all reads everything from |bio| and prepends |prefix| to it. On - * success, |*out| is set to an allocated buffer (which should be freed with - * |OPENSSL_free|), |*out_len| is set to its length and one is returned. The - * buffer will contain |prefix| followed by the contents of |bio|. On failure, - * zero is returned. - * - * The function will fail if the size of the output would equal or exceed - * |max_len|. */ +// bio_read_all reads everything from |bio| and prepends |prefix| to it. On +// success, |*out| is set to an allocated buffer (which should be freed with +// |OPENSSL_free|), |*out_len| is set to its length and one is returned. The +// buffer will contain |prefix| followed by the contents of |bio|. On failure, +// zero is returned. +// +// The function will fail if the size of the output would equal or exceed +// |max_len|. static int bio_read_all(BIO *bio, uint8_t **out, size_t *out_len, const uint8_t *prefix, size_t prefix_len, size_t max_len) { @@ -480,20 +480,20 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) { const uint8_t length_byte = header[1]; if ((tag & 0x1f) == 0x1f) { - /* Long form tags are not supported. */ + // Long form tags are not supported. return 0; } size_t len, header_len; if ((length_byte & 0x80) == 0) { - /* Short form length. */ + // Short form length. len = length_byte; header_len = kInitialHeaderLen; } else { const size_t num_bytes = length_byte & 0x7f; if ((tag & 0x20 /* constructed */) != 0 && num_bytes == 0) { - /* indefinite length. */ + // indefinite length. return bio_read_all(bio, out, out_len, header, kInitialHeaderLen, max_len); } @@ -516,12 +516,12 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) { } if (len32 < 128) { - /* Length should have used short-form encoding. */ + // Length should have used short-form encoding. return 0; } if ((len32 >> ((num_bytes-1)*8)) == 0) { - /* Length should have been at least one byte shorter. */ + // Length should have been at least one byte shorter. return 0; } diff --git a/crypto/bio/bio_mem.c b/crypto/bio/bio_mem.c index 1cba8a89..08dd6e9d 100644 --- a/crypto/bio/bio_mem.c +++ b/crypto/bio/bio_mem.c @@ -82,16 +82,16 @@ BIO *BIO_new_mem_buf(const void *buf, int len) { } b = (BUF_MEM *)ret->ptr; - /* BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. */ + // BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. b->data = (void *)buf; b->length = size; b->max = size; ret->flags |= BIO_FLAGS_MEM_RDONLY; - /* |num| is used to store the value that this BIO will return when it runs - * out of data. If it's negative then the retry flags will also be set. Since - * this is static data, retrying wont help */ + // |num| is used to store the value that this BIO will return when it runs + // out of data. If it's negative then the retry flags will also be set. Since + // this is static data, retrying wont help ret->num = 0; return ret; @@ -105,8 +105,8 @@ static int mem_new(BIO *bio) { return 0; } - /* |shutdown| is used to store the close flag: whether the BIO has ownership - * of the BUF_MEM. */ + // |shutdown| is used to store the close flag: whether the BIO has ownership + // of the BUF_MEM. bio->shutdown = 1; bio->init = 1; bio->num = -1; @@ -214,8 +214,8 @@ static int mem_gets(BIO *bio, char *buf, int size) { } } - /* i is now the max num of bytes to copy, either j or up to and including the - * first newline */ + // i is now the max num of bytes to copy, either j or up to and including the + // first newline i = mem_read(bio, buf, i); if (i > 0) { @@ -233,7 +233,7 @@ static long mem_ctrl(BIO *bio, int cmd, long num, void *ptr) { switch (cmd) { case BIO_CTRL_RESET: if (b->data != NULL) { - /* For read only case reset to the start again */ + // For read only case reset to the start again if (bio->flags & BIO_FLAGS_MEM_RDONLY) { b->data -= b->max - b->length; b->length = b->max; diff --git a/crypto/bio/connect.c b/crypto/bio/connect.c index d40dd530..0b60f6a9 100644 --- a/crypto/bio/connect.c +++ b/crypto/bio/connect.c @@ -98,12 +98,12 @@ typedef struct bio_connect_st { struct sockaddr_storage them; socklen_t them_length; - /* the file descriptor is kept in bio->num in order to match the socket - * BIO. */ + // the file descriptor is kept in bio->num in order to match the socket + // BIO. - /* info_callback is called when the connection is initially made - * callback(BIO,state,ret); The callback should return 'ret', state is for - * compatibility with the SSL info_callback. */ + // info_callback is called when the connection is initially made + // callback(BIO,state,ret); The callback should return 'ret', state is for + // compatibility with the SSL info_callback. int (*info_callback)(const BIO *bio, int state, int ret); } BIO_CONNECT; @@ -113,9 +113,9 @@ static int closesocket(int sock) { } #endif -/* split_host_and_port sets |*out_host| and |*out_port| to the host and port - * parsed from |name|. It returns one on success or zero on error. Even when - * successful, |*out_port| may be NULL on return if no port was specified. */ +// split_host_and_port sets |*out_host| and |*out_port| to the host and port +// parsed from |name|. It returns one on success or zero on error. Even when +// successful, |*out_port| may be NULL on return if no port was specified. static int split_host_and_port(char **out_host, char **out_port, const char *name) { const char *host, *port = NULL; size_t host_len = 0; @@ -123,24 +123,24 @@ static int split_host_and_port(char **out_host, char **out_port, const char *nam *out_host = NULL; *out_port = NULL; - if (name[0] == '[') { /* bracketed IPv6 address */ + if (name[0] == '[') { // bracketed IPv6 address const char *close = strchr(name, ']'); if (close == NULL) { return 0; } host = name + 1; host_len = close - host; - if (close[1] == ':') { /* [IP]:port */ + if (close[1] == ':') { // [IP]:port port = close + 2; } else if (close[1] != 0) { return 0; } } else { const char *colon = strchr(name, ':'); - if (colon == NULL || strchr(colon + 1, ':') != NULL) { /* IPv6 address */ + if (colon == NULL || strchr(colon + 1, ':') != NULL) { // IPv6 address host = name; host_len = strlen(name); - } else { /* host:port */ + } else { // host:port host = name; host_len = colon - name; port = colon + 1; @@ -175,9 +175,9 @@ static int conn_state(BIO *bio, BIO_CONNECT *c) { for (;;) { switch (c->state) { case BIO_CONN_S_BEFORE: - /* If there's a hostname and a port, assume that both are - * exactly what they say. If there is only a hostname, try - * (just once) to split it into a hostname and port. */ + // If there's a hostname and a port, assume that both are + // exactly what they say. If there is only a hostname, try + // (just once) to split it into a hostname and port. if (c->param_hostname == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_HOSTNAME_SPECIFIED); @@ -330,7 +330,7 @@ static void conn_close_socket(BIO *bio) { return; } - /* Only do a shutdown if things were established */ + // Only do a shutdown if things were established if (c->state == BIO_CONN_S_OK) { shutdown(bio->num, 2); } @@ -415,7 +415,7 @@ static long conn_ctrl(BIO *bio, int cmd, long num, void *ptr) { bio->flags = 0; break; case BIO_C_DO_STATE_MACHINE: - /* use this one to start the connection */ + // use this one to start the connection if (data->state != BIO_CONN_S_OK) { ret = (long)conn_state(bio, data); } else { diff --git a/crypto/bio/fd.c b/crypto/bio/fd.c index 4e9eeacf..37840a7e 100644 --- a/crypto/bio/fd.c +++ b/crypto/bio/fd.c @@ -138,7 +138,7 @@ BIO *BIO_new_fd(int fd, int close_flag) { } static int fd_new(BIO *bio) { - /* num is used to store the file descriptor. */ + // num is used to store the file descriptor. bio->num = -1; return 1; } diff --git a/crypto/bio/file.c b/crypto/bio/file.c index 3580cd1c..25c1dbdb 100644 --- a/crypto/bio/file.c +++ b/crypto/bio/file.c @@ -55,18 +55,17 @@ * [including the GNU Public Licence.] */ #if defined(__linux) || defined(__sun) || defined(__hpux) -/* Following definition aliases fopen to fopen64 on above mentioned - * platforms. This makes it possible to open and sequentially access - * files larger than 2GB from 32-bit application. It does not allow to - * traverse them beyond 2GB with fseek/ftell, but on the other hand *no* - * 32-bit platform permits that, not with fseek/ftell. Not to mention - * that breaking 2GB limit for seeking would require surgery to *our* - * API. But sequential access suffices for practical cases when you - * can run into large files, such as fingerprinting, so we can let API - * alone. For reference, the list of 32-bit platforms which allow for - * sequential access of large files without extra "magic" comprise *BSD, - * Darwin, IRIX... - */ +// Following definition aliases fopen to fopen64 on above mentioned +// platforms. This makes it possible to open and sequentially access +// files larger than 2GB from 32-bit application. It does not allow to +// traverse them beyond 2GB with fseek/ftell, but on the other hand *no* +// 32-bit platform permits that, not with fseek/ftell. Not to mention +// that breaking 2GB limit for seeking would require surgery to *our* +// API. But sequential access suffices for practical cases when you +// can run into large files, such as fingerprinting, so we can let API +// alone. For reference, the list of 32-bit platforms which allow for +// sequential access of large files without extra "magic" comprise *BSD, +// Darwin, IRIX... #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif @@ -157,7 +156,7 @@ static int file_read(BIO *b, char *out, int outl) { return -1; } - /* fread reads at most |outl| bytes, so |ret| fits in an int. */ + // fread reads at most |outl| bytes, so |ret| fits in an int. return (int)ret; } @@ -232,7 +231,7 @@ static long file_ctrl(BIO *b, int cmd, long num, void *ptr) { b->init = 1; break; case BIO_C_GET_FILE_PTR: - /* the ptr parameter is actually a FILE ** in this case. */ + // the ptr parameter is actually a FILE ** in this case. if (ptr != NULL) { fpp = (FILE **)ptr; *fpp = (FILE *)b->ptr; diff --git a/crypto/bio/hexdump.c b/crypto/bio/hexdump.c index d55df620..6d928bc0 100644 --- a/crypto/bio/hexdump.c +++ b/crypto/bio/hexdump.c @@ -62,12 +62,12 @@ #include "../internal.h" -/* hexdump_ctx contains the state of a hexdump. */ +// hexdump_ctx contains the state of a hexdump. struct hexdump_ctx { BIO *bio; - char right_chars[18]; /* the contents of the right-hand side, ASCII dump. */ - unsigned used; /* number of bytes in the current line. */ - size_t n; /* number of bytes total. */ + char right_chars[18]; // the contents of the right-hand side, ASCII dump. + unsigned used; // number of bytes in the current line. + size_t n; // number of bytes total. unsigned indent; }; @@ -84,21 +84,20 @@ static char to_char(uint8_t b) { return b; } -/* hexdump_write adds |len| bytes of |data| to the current hex dump described by - * |ctx|. */ +// hexdump_write adds |len| bytes of |data| to the current hex dump described by +// |ctx|. static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, size_t len) { char buf[10]; unsigned l; - /* Output lines look like: - * 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=| - * ^ offset ^ extra space ^ ASCII of line - */ + // Output lines look like: + // 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=| + // ^ offset ^ extra space ^ ASCII of line for (size_t i = 0; i < len; i++) { if (ctx->used == 0) { - /* The beginning of a line. */ + // The beginning of a line. BIO_indent(ctx->bio, ctx->indent, UINT_MAX); hexbyte(&buf[0], ctx->n >> 24); @@ -115,12 +114,12 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, buf[2] = ' '; l = 3; if (ctx->used == 7) { - /* There's an additional space after the 8th byte. */ + // There's an additional space after the 8th byte. buf[3] = ' '; l = 4; } else if (ctx->used == 15) { - /* At the end of the line there's an extra space and the bar for the - * right column. */ + // At the end of the line there's an extra space and the bar for the + // right column. buf[3] = ' '; buf[4] = '|'; l = 5; @@ -145,9 +144,9 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, return 1; } -/* finish flushes any buffered data in |ctx|. */ +// finish flushes any buffered data in |ctx|. static int finish(struct hexdump_ctx *ctx) { - /* See the comments in |hexdump| for the details of this format. */ + // See the comments in |hexdump| for the details of this format. const unsigned n_bytes = ctx->used; unsigned l; char buf[5]; diff --git a/crypto/bio/internal.h b/crypto/bio/internal.h index 4ec77fad..8ed27dae 100644 --- a/crypto/bio/internal.h +++ b/crypto/bio/internal.h @@ -61,7 +61,7 @@ #if !defined(OPENSSL_WINDOWS) #if defined(OPENSSL_PNACL) -/* newlib uses u_short in socket.h without defining it. */ +// newlib uses u_short in socket.h without defining it. typedef unsigned short u_short; #endif #include @@ -78,34 +78,34 @@ extern "C" { #endif -/* BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr| - * and |*out_addr_length| with the correct values for connecting to |hostname| - * on |port_str|. It returns one on success or zero on error. */ +// BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr| +// and |*out_addr_length| with the correct values for connecting to |hostname| +// on |port_str|. It returns one on success or zero on error. int bio_ip_and_port_to_socket_and_addr(int *out_sock, struct sockaddr_storage *out_addr, socklen_t *out_addr_length, const char *hostname, const char *port_str); -/* BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on - * success and zero otherwise. */ +// BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on +// success and zero otherwise. int bio_socket_nbio(int sock, int on); -/* BIO_clear_socket_error clears the last system socket error. - * - * TODO(fork): remove all callers of this. */ +// BIO_clear_socket_error clears the last system socket error. +// +// TODO(fork): remove all callers of this. void bio_clear_socket_error(void); -/* BIO_sock_error returns the last socket error on |sock|. */ +// BIO_sock_error returns the last socket error on |sock|. int bio_sock_error(int sock); -/* BIO_fd_should_retry returns non-zero if |return_value| indicates an error - * and |errno| indicates that it's non-fatal. */ +// BIO_fd_should_retry returns non-zero if |return_value| indicates an error +// and |errno| indicates that it's non-fatal. int bio_fd_should_retry(int return_value); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BIO_INTERNAL_H */ +#endif // OPENSSL_HEADER_BIO_INTERNAL_H diff --git a/crypto/bio/pair.c b/crypto/bio/pair.c index 8ba382d1..f5057ed3 100644 --- a/crypto/bio/pair.c +++ b/crypto/bio/pair.c @@ -63,22 +63,22 @@ struct bio_bio_st { - BIO *peer; /* NULL if buf == NULL. - * If peer != NULL, then peer->ptr is also a bio_bio_st, - * and its "peer" member points back to us. - * peer != NULL iff init != 0 in the BIO. */ - - /* This is for what we write (i.e. reading uses peer's struct): */ - int closed; /* valid iff peer != NULL */ - size_t len; /* valid iff buf != NULL; 0 if peer == NULL */ - size_t offset; /* valid iff buf != NULL; 0 if len == 0 */ + BIO *peer; // NULL if buf == NULL. + // If peer != NULL, then peer->ptr is also a bio_bio_st, + // and its "peer" member points back to us. + // peer != NULL iff init != 0 in the BIO. + + // This is for what we write (i.e. reading uses peer's struct): + int closed; // valid iff peer != NULL + size_t len; // valid iff buf != NULL; 0 if peer == NULL + size_t offset; // valid iff buf != NULL; 0 if len == 0 size_t size; - uint8_t *buf; /* "size" elements (if != NULL) */ + uint8_t *buf; // "size" elements (if != NULL) - size_t request; /* valid iff peer != NULL; 0 if len != 0, - * otherwise set by peer to number of bytes - * it (unsuccessfully) tried to read, - * never more than buffer space (size-len) warrants. */ + size_t request; // valid iff peer != NULL; 0 if len != 0, + // otherwise set by peer to number of bytes + // it (unsuccessfully) tried to read, + // never more than buffer space (size-len) warrants. }; static int bio_new(BIO *bio) { @@ -90,7 +90,7 @@ static int bio_new(BIO *bio) { } OPENSSL_memset(b, 0, sizeof(struct bio_bio_st)); - b->size = 17 * 1024; /* enough for one TLS record (just a default) */ + b->size = 17 * 1024; // enough for one TLS record (just a default) bio->ptr = b; return 1; } @@ -165,7 +165,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { assert(peer_b != NULL); assert(peer_b->buf != NULL); - peer_b->request = 0; /* will be set in "retry_read" situation */ + peer_b->request = 0; // will be set in "retry_read" situation if (buf == NULL || size == 0) { return 0; @@ -173,30 +173,30 @@ static int bio_read(BIO *bio, char *buf, int size_) { if (peer_b->len == 0) { if (peer_b->closed) { - return 0; /* writer has closed, and no data is left */ + return 0; // writer has closed, and no data is left } else { - BIO_set_retry_read(bio); /* buffer is empty */ + BIO_set_retry_read(bio); // buffer is empty if (size <= peer_b->size) { peer_b->request = size; } else { - /* don't ask for more than the peer can - * deliver in one write */ + // don't ask for more than the peer can + // deliver in one write peer_b->request = peer_b->size; } return -1; } } - /* we can read */ + // we can read if (peer_b->len < size) { size = peer_b->len; } - /* now read "size" bytes */ + // now read "size" bytes rest = size; assert(rest > 0); - /* one or two iterations */ + // one or two iterations do { size_t chunk; @@ -204,7 +204,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { if (peer_b->offset + rest <= peer_b->size) { chunk = rest; } else { - /* wrap around ring buffer */ + // wrap around ring buffer chunk = peer_b->size - peer_b->offset; } assert(peer_b->offset + chunk <= peer_b->size); @@ -220,7 +220,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { } buf += chunk; } else { - /* buffer now empty, no need to advance "buf" */ + // buffer now empty, no need to advance "buf" assert(chunk == rest); peer_b->offset = 0; } @@ -248,7 +248,7 @@ static int bio_write(BIO *bio, const char *buf, int num_) { b->request = 0; if (b->closed) { - /* we already closed */ + // we already closed OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE); return -1; } @@ -256,20 +256,20 @@ static int bio_write(BIO *bio, const char *buf, int num_) { assert(b->len <= b->size); if (b->len == b->size) { - BIO_set_retry_write(bio); /* buffer is full */ + BIO_set_retry_write(bio); // buffer is full return -1; } - /* we can write */ + // we can write if (num > b->size - b->len) { num = b->size - b->len; } - /* now write "num" bytes */ + // now write "num" bytes rest = num; assert(rest > 0); - /* one or two iterations */ + // one or two iterations do { size_t write_offset; size_t chunk; @@ -280,12 +280,12 @@ static int bio_write(BIO *bio, const char *buf, int num_) { if (write_offset >= b->size) { write_offset -= b->size; } - /* b->buf[write_offset] is the first byte we can write to. */ + // b->buf[write_offset] is the first byte we can write to. if (write_offset + rest <= b->size) { chunk = rest; } else { - /* wrap around ring buffer */ + // wrap around ring buffer chunk = b->size - write_offset; } @@ -363,15 +363,15 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { assert(b != NULL); switch (cmd) { - /* specific CTRL codes */ + // specific CTRL codes case BIO_C_GET_WRITE_BUF_SIZE: ret = (long)b->size; break; case BIO_C_GET_WRITE_GUARANTEE: - /* How many bytes can the caller feed to the next write - * without having to keep any? */ + // How many bytes can the caller feed to the next write + // without having to keep any? if (b->peer == NULL || b->closed) { ret = 0; } else { @@ -380,28 +380,28 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { break; case BIO_C_GET_READ_REQUEST: - /* If the peer unsuccessfully tried to read, how many bytes - * were requested? (As with BIO_CTRL_PENDING, that number - * can usually be treated as boolean.) */ + // If the peer unsuccessfully tried to read, how many bytes + // were requested? (As with BIO_CTRL_PENDING, that number + // can usually be treated as boolean.) ret = (long)b->request; break; case BIO_C_RESET_READ_REQUEST: - /* Reset request. (Can be useful after read attempts - * at the other side that are meant to be non-blocking, - * e.g. when probing SSL_read to see if any data is - * available.) */ + // Reset request. (Can be useful after read attempts + // at the other side that are meant to be non-blocking, + // e.g. when probing SSL_read to see if any data is + // available.) b->request = 0; ret = 1; break; case BIO_C_SHUTDOWN_WR: - /* similar to shutdown(..., SHUT_WR) */ + // similar to shutdown(..., SHUT_WR) b->closed = 1; ret = 1; break; - /* standard CTRL codes follow */ + // standard CTRL codes follow case BIO_CTRL_GET_CLOSE: ret = bio->shutdown; @@ -453,7 +453,7 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { static const BIO_METHOD methods_biop = { BIO_TYPE_BIO, "BIO pair", bio_write, bio_read, NULL /* puts */, - NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */ + NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */, }; static const BIO_METHOD *bio_s_bio(void) { return &methods_biop; } diff --git a/crypto/bio/printf.c b/crypto/bio/printf.c index f59a15f6..28162e6d 100644 --- a/crypto/bio/printf.c +++ b/crypto/bio/printf.c @@ -55,7 +55,7 @@ * [including the GNU Public Licence.] */ #if !defined(_POSIX_C_SOURCE) -#define _POSIX_C_SOURCE 201410L /* for snprintf, vprintf etc */ +#define _POSIX_C_SOURCE 201410L // for snprintf, vprintf etc #endif #include @@ -77,8 +77,8 @@ int BIO_printf(BIO *bio, const char *format, ...) { va_end(args); #if defined(OPENSSL_WINDOWS) - /* On Windows, vsnprintf returns -1 rather than the requested length on - * truncation */ + // On Windows, vsnprintf returns -1 rather than the requested length on + // truncation if (out_len < 0) { va_start(args, format); out_len = _vscprintf(format, args); @@ -93,9 +93,9 @@ int BIO_printf(BIO *bio, const char *format, ...) { if ((size_t) out_len >= sizeof(buf)) { const int requested_len = out_len; - /* The output was truncated. Note that vsnprintf's return value - * does not include a trailing NUL, but the buffer must be sized - * for it. */ + // The output was truncated. Note that vsnprintf's return value + // does not include a trailing NUL, but the buffer must be sized + // for it. out = OPENSSL_malloc(requested_len + 1); out_malloced = 1; if (out == NULL) { diff --git a/crypto/bn_extra/bn_asn1.c b/crypto/bn_extra/bn_asn1.c index efb23355..8b939da5 100644 --- a/crypto/bn_extra/bn_asn1.c +++ b/crypto/bn_extra/bn_asn1.c @@ -31,7 +31,7 @@ int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret) { return 0; } - /* INTEGERs must be minimal. */ + // INTEGERs must be minimal. if (CBS_data(&child)[0] == 0x00 && CBS_len(&child) > 1 && !(CBS_data(&child)[1] & 0x80)) { @@ -50,16 +50,16 @@ int BN_parse_asn1_unsigned_buggy(CBS *cbs, BIGNUM *ret) { return 0; } - /* This function intentionally does not reject negative numbers or non-minimal - * encodings. Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Remove this code and callers in March 2016. */ + // This function intentionally does not reject negative numbers or non-minimal + // encodings. Estonian IDs issued between September 2014 to September 2015 are + // broken. See https://crbug.com/532048 and https://crbug.com/534766. + // + // TODO(davidben): Remove this code and callers in March 2016. return BN_bin2bn(CBS_data(&child), CBS_len(&child), ret) != NULL; } int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) { - /* Negative numbers are unsupported. */ + // Negative numbers are unsupported. if (BN_is_negative(bn)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; @@ -67,8 +67,8 @@ int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER) || - /* The number must be padded with a leading zero if the high bit would - * otherwise be set or if |bn| is zero. */ + // The number must be padded with a leading zero if the high bit would + // otherwise be set or if |bn| is zero. (BN_num_bits(bn) % 8 == 0 && !CBB_add_u8(&child, 0x00)) || !BN_bn2cbb_padded(&child, BN_num_bytes(bn), bn) || !CBB_flush(cbb)) { diff --git a/crypto/bn_extra/convert.c b/crypto/bn_extra/convert.c index b48a9711..6f3a062a 100644 --- a/crypto/bn_extra/convert.c +++ b/crypto/bn_extra/convert.c @@ -96,7 +96,7 @@ char *BN_bn2hex(const BIGNUM *bn) { int z = 0; for (int i = bn->top - 1; i >= 0; i--) { for (int j = BN_BITS2 - 8; j >= 0; j -= 8) { - /* strip leading zeros */ + // strip leading zeros int v = ((int)(bn->d[i] >> (long)j)) & 0xff; if (z || v != 0) { *(p++) = hextable[v >> 4]; @@ -110,20 +110,20 @@ char *BN_bn2hex(const BIGNUM *bn) { return buf; } -/* decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. */ +// decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. static int decode_hex(BIGNUM *bn, const char *in, int in_len) { if (in_len > INT_MAX/4) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } - /* |in_len| is the number of hex digits. */ + // |in_len| is the number of hex digits. if (!bn_expand(bn, in_len * 4)) { return 0; } int i = 0; while (in_len > 0) { - /* Decode one |BN_ULONG| at a time. */ + // Decode one |BN_ULONG| at a time. int todo = BN_BYTES * 2; if (todo > in_len) { todo = in_len; @@ -143,7 +143,7 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) { hex = c - 'A' + 10; } else { hex = 0; - /* This shouldn't happen. The caller checks |isxdigit|. */ + // This shouldn't happen. The caller checks |isxdigit|. assert(0); } word = (word << 4) | hex; @@ -157,12 +157,12 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) { return 1; } -/* decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. */ +// decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. static int decode_dec(BIGNUM *bn, const char *in, int in_len) { int i, j; BN_ULONG l = 0; - /* Decode |BN_DEC_NUM| digits at a time. */ + // Decode |BN_DEC_NUM| digits at a time. j = BN_DEC_NUM - (in_len % BN_DEC_NUM); if (j == BN_DEC_NUM) { j = 0; @@ -207,7 +207,7 @@ static int bn_x2bn(BIGNUM **outp, const char *in, decode_func decode, char_test_ return num; } - /* in is the start of the hex digits, and it is 'i' long */ + // in is the start of the hex digits, and it is 'i' long if (*outp == NULL) { ret = BN_new(); if (ret == NULL) { @@ -243,8 +243,8 @@ int BN_hex2bn(BIGNUM **outp, const char *in) { } char *BN_bn2dec(const BIGNUM *a) { - /* It is easier to print strings little-endian, so we assemble it in reverse - * and fix at the end. */ + // It is easier to print strings little-endian, so we assemble it in reverse + // and fix at the end. BIGNUM *copy = NULL; CBB cbb; if (!CBB_init(&cbb, 16) || @@ -290,7 +290,7 @@ char *BN_bn2dec(const BIGNUM *a) { goto cbb_err; } - /* Reverse the buffer. */ + // Reverse the buffer. for (size_t i = 0; i < len/2; i++) { uint8_t tmp = data[i]; data[i] = data[len - 1 - i]; @@ -349,7 +349,7 @@ int BN_print(BIO *bp, const BIGNUM *a) { for (i = a->top - 1; i >= 0; i--) { for (j = BN_BITS2 - 4; j >= 0; j -= 4) { - /* strip leading zeros */ + // strip leading zeros v = ((int)(a->d[i] >> (long)j)) & 0x0f; if (z || v != 0) { if (BIO_write(bp, &hextable[v], 1) != 1) { @@ -384,8 +384,8 @@ int BN_print_fp(FILE *fp, const BIGNUM *a) { size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) { const size_t bits = BN_num_bits(in); const size_t bytes = (bits + 7) / 8; - /* If the number of bits is a multiple of 8, i.e. if the MSB is set, - * prefix with a zero byte. */ + // If the number of bits is a multiple of 8, i.e. if the MSB is set, + // prefix with a zero byte. int extend = 0; if (bytes != 0 && (bits & 0x07) == 0) { extend = 1; @@ -395,8 +395,8 @@ size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) { if (len < bytes || 4 + len < len || (len & 0xffffffff) != len) { - /* If we cannot represent the number then we emit zero as the interface - * doesn't allow an error to be signalled. */ + // If we cannot represent the number then we emit zero as the interface + // doesn't allow an error to be signalled. if (out) { OPENSSL_memset(out, 0, 4); } diff --git a/crypto/buf/buf.c b/crypto/buf/buf.c index f1fcae64..1305c585 100644 --- a/crypto/buf/buf.c +++ b/crypto/buf/buf.c @@ -97,14 +97,14 @@ static int buf_mem_reserve(BUF_MEM *buf, size_t cap, int clean) { size_t n = cap + 3; if (n < cap) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return 0; } n = n / 3; size_t alloc_size = n * 4; if (alloc_size / 4 != n) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return 0; } @@ -185,7 +185,7 @@ char *BUF_strndup(const char *str, size_t size) { alloc_size = size + 1; if (alloc_size < size) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return NULL; } diff --git a/crypto/bytestring/ber.c b/crypto/bytestring/ber.c index 54bac596..4dc94f6f 100644 --- a/crypto/bytestring/ber.c +++ b/crypto/bytestring/ber.c @@ -21,13 +21,13 @@ #include "../internal.h" -/* kMaxDepth is a just a sanity limit. The code should be such that the length - * of the input being processes always decreases. None the less, a very large - * input could otherwise cause the stack to overflow. */ +// kMaxDepth is a just a sanity limit. The code should be such that the length +// of the input being processes always decreases. None the less, a very large +// input could otherwise cause the stack to overflow. static const unsigned kMaxDepth = 2048; -/* is_string_type returns one if |tag| is a string type and zero otherwise. It - * ignores the constructed bit. */ +// is_string_type returns one if |tag| is a string type and zero otherwise. It +// ignores the constructed bit. static int is_string_type(unsigned tag) { if ((tag & 0xc0) != 0) { return 0; @@ -52,10 +52,10 @@ static int is_string_type(unsigned tag) { } } -/* cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found| - * depending on whether an indefinite length element or constructed string was - * found. The value of |orig_in| is not changed. It returns one on success (i.e. - * |*ber_found| was set) and zero on error. */ +// cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found| +// depending on whether an indefinite length element or constructed string was +// found. The value of |orig_in| is not changed. It returns one on success (i.e. +// |*ber_found| was set) and zero on error. static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { CBS in; @@ -77,13 +77,13 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { if (CBS_len(&contents) == header_len && header_len > 0 && CBS_data(&contents)[header_len-1] == 0x80) { - /* Found an indefinite-length element. */ + // Found an indefinite-length element. *ber_found = 1; return 1; } if (tag & CBS_ASN1_CONSTRUCTED) { if (is_string_type(tag)) { - /* Constructed strings are only legal in BER and require conversion. */ + // Constructed strings are only legal in BER and require conversion. *ber_found = 1; return 1; } @@ -97,20 +97,20 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { return 1; } -/* is_eoc returns true if |header_len| and |contents|, as returned by - * |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value. */ +// is_eoc returns true if |header_len| and |contents|, as returned by +// |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value. static char is_eoc(size_t header_len, CBS *contents) { return header_len == 2 && CBS_len(contents) == 2 && OPENSSL_memcmp(CBS_data(contents), "\x00\x00", 2) == 0; } -/* cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If - * |string_tag| is non-zero, then all elements must match |string_tag| up to the - * constructed bit and primitive element bodies are written to |out| without - * element headers. This is used when concatenating the fragments of a - * constructed string. If |looking_for_eoc| is set then any EOC elements found - * will cause the function to return after consuming it. It returns one on - * success and zero on error. */ +// cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If +// |string_tag| is non-zero, then all elements must match |string_tag| up to the +// constructed bit and primitive element bodies are written to |out| without +// element headers. This is used when concatenating the fragments of a +// constructed string. If |looking_for_eoc| is set then any EOC elements found +// will cause the function to return after consuming it. It returns one on +// success and zero on error. static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, char looking_for_eoc, unsigned depth) { assert(!(string_tag & CBS_ASN1_CONSTRUCTED)); @@ -134,9 +134,9 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } if (string_tag != 0) { - /* This is part of a constructed string. All elements must match - * |string_tag| up to the constructed bit and get appended to |out| - * without a child element. */ + // This is part of a constructed string. All elements must match + // |string_tag| up to the constructed bit and get appended to |out| + // without a child element. if ((tag & ~CBS_ASN1_CONSTRUCTED) != string_tag) { return 0; } @@ -144,8 +144,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } else { unsigned out_tag = tag; if ((tag & CBS_ASN1_CONSTRUCTED) && is_string_type(tag)) { - /* If a constructed string, clear the constructed bit and inform - * children to concatenate bodies. */ + // If a constructed string, clear the constructed bit and inform + // children to concatenate bodies. out_tag &= ~CBS_ASN1_CONSTRUCTED; child_string_tag = out_tag; } @@ -157,7 +157,7 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, if (CBS_len(&contents) == header_len && header_len > 0 && CBS_data(&contents)[header_len - 1] == 0x80) { - /* This is an indefinite length element. */ + // This is an indefinite length element. if (!cbs_convert_ber(in, out_contents, child_string_tag, 1 /* looking for eoc */, depth + 1) || !CBB_flush(out)) { @@ -171,13 +171,13 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } if (tag & CBS_ASN1_CONSTRUCTED) { - /* Recurse into children. */ + // Recurse into children. if (!cbs_convert_ber(&contents, out_contents, child_string_tag, 0 /* not looking for eoc */, depth + 1)) { return 0; } } else { - /* Copy primitive contents as-is. */ + // Copy primitive contents as-is. if (!CBB_add_bytes(out_contents, CBS_data(&contents), CBS_len(&contents))) { return 0; @@ -195,8 +195,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len) { CBB cbb; - /* First, do a quick walk to find any indefinite-length elements. Most of the - * time we hope that there aren't any and thus we can quickly return. */ + // First, do a quick walk to find any indefinite-length elements. Most of the + // time we hope that there aren't any and thus we can quickly return. char conversion_needed; if (!cbs_find_ber(in, &conversion_needed, 0)) { return 0; @@ -225,14 +225,14 @@ int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, assert(is_string_type(inner_tag)); if (CBS_peek_asn1_tag(in, outer_tag)) { - /* Normal implicitly-tagged string. */ + // Normal implicitly-tagged string. *out_storage = NULL; return CBS_get_asn1(in, out, outer_tag); } - /* Otherwise, try to parse an implicitly-tagged constructed string. - * |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep - * of nesting. */ + // Otherwise, try to parse an implicitly-tagged constructed string. + // |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep + // of nesting. CBB result; CBS child; if (!CBB_init(&result, CBS_len(in)) || diff --git a/crypto/bytestring/cbb.c b/crypto/bytestring/cbb.c index 14116be5..5576fa96 100644 --- a/crypto/bytestring/cbb.c +++ b/crypto/bytestring/cbb.c @@ -27,7 +27,7 @@ void CBB_zero(CBB *cbb) { } static int cbb_init(CBB *cbb, uint8_t *buf, size_t cap) { - /* This assumes that |cbb| has already been zeroed. */ + // This assumes that |cbb| has already been zeroed. struct cbb_buffer_st *base; base = OPENSSL_malloc(sizeof(struct cbb_buffer_st)); @@ -75,8 +75,8 @@ int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len) { void CBB_cleanup(CBB *cbb) { if (cbb->base) { - /* Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They - * are implicitly discarded when the parent is flushed or cleaned up. */ + // Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They + // are implicitly discarded when the parent is flushed or cleaned up. assert(cbb->is_top_level); if (cbb->base->can_resize) { @@ -97,7 +97,7 @@ static int cbb_buffer_reserve(struct cbb_buffer_st *base, uint8_t **out, newlen = base->len + len; if (newlen < base->len) { - /* Overflow */ + // Overflow goto err; } @@ -137,7 +137,7 @@ static int cbb_buffer_add(struct cbb_buffer_st *base, uint8_t **out, if (!cbb_buffer_reserve(base, out, len)) { return 0; } - /* This will not overflow or |cbb_buffer_reserve| would have failed. */ + // This will not overflow or |cbb_buffer_reserve| would have failed. base->len += len; return 1; } @@ -176,7 +176,7 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) { } if (cbb->base->can_resize && (out_data == NULL || out_len == NULL)) { - /* |out_data| and |out_len| can only be NULL if the CBB is fixed. */ + // |out_data| and |out_len| can only be NULL if the CBB is fixed. return 0; } @@ -191,15 +191,15 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) { return 1; } -/* CBB_flush recurses and then writes out any pending length prefix. The - * current length of the underlying base is taken to be the length of the - * length-prefixed data. */ +// CBB_flush recurses and then writes out any pending length prefix. The +// current length of the underlying base is taken to be the length of the +// length-prefixed data. int CBB_flush(CBB *cbb) { size_t child_start, i, len; - /* If |cbb->base| has hit an error, the buffer is in an undefined state, so - * fail all following calls. In particular, |cbb->child| may point to invalid - * memory. */ + // If |cbb->base| has hit an error, the buffer is in an undefined state, so + // fail all following calls. In particular, |cbb->child| may point to invalid + // memory. if (cbb->base == NULL || cbb->base->error) { return 0; } @@ -219,16 +219,16 @@ int CBB_flush(CBB *cbb) { len = cbb->base->len - child_start; if (cbb->child->pending_is_asn1) { - /* For ASN.1 we assume that we'll only need a single byte for the length. - * If that turned out to be incorrect, we have to move the contents along - * in order to make space. */ + // For ASN.1 we assume that we'll only need a single byte for the length. + // If that turned out to be incorrect, we have to move the contents along + // in order to make space. uint8_t len_len; uint8_t initial_length_byte; assert (cbb->child->pending_len_len == 1); if (len > 0xfffffffe) { - /* Too large. */ + // Too large. goto err; } else if (len > 0xffffff) { len_len = 5; @@ -249,7 +249,7 @@ int CBB_flush(CBB *cbb) { } if (len_len != 1) { - /* We need to move the contents along in order to make space. */ + // We need to move the contents along in order to make space. size_t extra_bytes = len_len - 1; if (!cbb_buffer_add(cbb->base, NULL, extra_bytes)) { goto err; @@ -331,14 +331,14 @@ int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents) { int CBB_add_asn1(CBB *cbb, CBB *out_contents, unsigned tag) { if (tag > 0xff || (tag & 0x1f) == 0x1f) { - /* Long form identifier octets are not supported. Further, all current valid - * tag serializations are 8 bits. */ + // Long form identifier octets are not supported. Further, all current valid + // tag serializations are 8 bits. cbb->base->error = 1; return 0; } if (!CBB_flush(cbb) || - /* |tag|'s representation matches the DER encoding. */ + // |tag|'s representation matches the DER encoding. !CBB_add_u8(cbb, (uint8_t)tag)) { return 0; } @@ -451,11 +451,11 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) { uint8_t byte = (value >> 8*(7-i)) & 0xff; if (!started) { if (byte == 0) { - /* Don't encode leading zeros. */ + // Don't encode leading zeros. continue; } - /* If the high bit is set, add a padding byte to make it - * unsigned. */ + // If the high bit is set, add a padding byte to make it + // unsigned. if ((byte & 0x80) && !CBB_add_u8(&child, 0)) { return 0; } @@ -466,7 +466,7 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) { } } - /* 0 is encoded as a single 0, not the empty string. */ + // 0 is encoded as a single 0, not the empty string. if (!started && !CBB_add_u8(&child, 0)) { return 0; } diff --git a/crypto/bytestring/cbs.c b/crypto/bytestring/cbs.c index 8328b7fe..ec495d21 100644 --- a/crypto/bytestring/cbs.c +++ b/crypto/bytestring/cbs.c @@ -190,13 +190,13 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, return 0; } - /* ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag - * number no greater than 30. - * - * If the number portion is 31 (0x1f, the largest value that fits in the - * allotted bits), then the tag is more than one byte long and the - * continuation bytes contain the tag number. This parser only supports tag - * numbers less than 31 (and thus single-byte tags). */ + // ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag + // number no greater than 30. + // + // If the number portion is 31 (0x1f, the largest value that fits in the + // allotted bits), then the tag is more than one byte long and the + // continuation bytes contain the tag number. This parser only supports tag + // numbers less than 31 (and thus single-byte tags). if ((tag & 0x1f) == 0x1f) { return 0; } @@ -206,52 +206,51 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, } size_t len; - /* The format for the length encoding is specified in ITU-T X.690 section - * 8.1.3. */ + // The format for the length encoding is specified in ITU-T X.690 section + // 8.1.3. if ((length_byte & 0x80) == 0) { - /* Short form length. */ + // Short form length. len = ((size_t) length_byte) + 2; if (out_header_len != NULL) { *out_header_len = 2; } } else { - /* The high bit indicate that this is the long form, while the next 7 bits - * encode the number of subsequent octets used to encode the length (ITU-T - * X.690 clause 8.1.3.5.b). */ + // The high bit indicate that this is the long form, while the next 7 bits + // encode the number of subsequent octets used to encode the length (ITU-T + // X.690 clause 8.1.3.5.b). const size_t num_bytes = length_byte & 0x7f; uint32_t len32; if (ber_ok && (tag & CBS_ASN1_CONSTRUCTED) != 0 && num_bytes == 0) { - /* indefinite length */ + // indefinite length if (out_header_len != NULL) { *out_header_len = 2; } return CBS_get_bytes(cbs, out, 2); } - /* ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be - * used as the first byte of the length. If this parser encounters that - * value, num_bytes will be parsed as 127, which will fail the check below. - */ + // ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be + // used as the first byte of the length. If this parser encounters that + // value, num_bytes will be parsed as 127, which will fail the check below. if (num_bytes == 0 || num_bytes > 4) { return 0; } if (!cbs_get_u(&header, &len32, num_bytes)) { return 0; } - /* ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - * with the minimum number of octets. */ + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. if (len32 < 128) { - /* Length should have used short-form encoding. */ + // Length should have used short-form encoding. return 0; } if ((len32 >> ((num_bytes-1)*8)) == 0) { - /* Length should have been at least one byte shorter. */ + // Length should have been at least one byte shorter. return 0; } len = len32; if (len + 2 + num_bytes < len) { - /* Overflow. */ + // Overflow. return 0; } len += 2 + num_bytes; @@ -338,23 +337,23 @@ int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) { size_t len = CBS_len(&bytes); if (len == 0) { - /* An INTEGER is encoded with at least one octet. */ + // An INTEGER is encoded with at least one octet. return 0; } if ((data[0] & 0x80) != 0) { - /* Negative number. */ + // Negative number. return 0; } if (data[0] == 0 && len > 1 && (data[1] & 0x80) == 0) { - /* Extra leading zeros. */ + // Extra leading zeros. return 0; } for (size_t i = 0; i < len; i++) { if ((*out >> 56) != 0) { - /* Too large to represent as a uint64_t. */ + // Too large to represent as a uint64_t. return 0; } *out <<= 8; @@ -462,7 +461,7 @@ int CBS_is_valid_asn1_bitstring(const CBS *cbs) { return 1; } - /* All num_unused_bits bits must exist and be zeros. */ + // All num_unused_bits bits must exist and be zeros. uint8_t last; if (!CBS_get_last_u8(&in, &last) || (last & ((1 << num_unused_bits) - 1)) != 0) { @@ -480,9 +479,9 @@ int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit) { const unsigned byte_num = (bit >> 3) + 1; const unsigned bit_num = 7 - (bit & 7); - /* Unused bits are zero, and this function does not distinguish between - * missing and unset bits. Thus it is sufficient to do a byte-level length - * check. */ + // Unused bits are zero, and this function does not distinguish between + // missing and unset bits. Thus it is sufficient to do a byte-level length + // check. return byte_num < CBS_len(cbs) && (CBS_data(cbs)[byte_num] & (1 << bit_num)) != 0; } diff --git a/crypto/bytestring/internal.h b/crypto/bytestring/internal.h index 2fed4139..f6ac32cd 100644 --- a/crypto/bytestring/internal.h +++ b/crypto/bytestring/internal.h @@ -22,54 +22,54 @@ extern "C" { #endif -/* CBS_asn1_ber_to_der reads a BER element from |in|. If it finds - * indefinite-length elements or constructed strings then it converts the BER - * data to DER and sets |*out| and |*out_length| to describe a malloced buffer - * containing the DER data. Additionally, |*in| will be advanced over the BER - * element. - * - * If it doesn't find any indefinite-length elements or constructed strings then - * it sets |*out| to NULL and |*in| is unmodified. - * - * This function should successfully process any valid BER input, however it - * will not convert all of BER's deviations from DER. BER is ambiguous between - * implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed - * strings. Implicitly-tagged strings must be parsed with - * |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller - * must also account for BER variations in the contents of a primitive. - * - * It returns one on success and zero otherwise. */ +// CBS_asn1_ber_to_der reads a BER element from |in|. If it finds +// indefinite-length elements or constructed strings then it converts the BER +// data to DER and sets |*out| and |*out_length| to describe a malloced buffer +// containing the DER data. Additionally, |*in| will be advanced over the BER +// element. +// +// If it doesn't find any indefinite-length elements or constructed strings then +// it sets |*out| to NULL and |*in| is unmodified. +// +// This function should successfully process any valid BER input, however it +// will not convert all of BER's deviations from DER. BER is ambiguous between +// implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed +// strings. Implicitly-tagged strings must be parsed with +// |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller +// must also account for BER variations in the contents of a primitive. +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len); -/* CBS_get_asn1_implicit_string parses a BER string of primitive type - * |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the - * contents. If concatenation was needed, it sets |*out_storage| to a buffer - * which the caller must release with |OPENSSL_free|. Otherwise, it sets - * |*out_storage| to NULL. - * - * This function does not parse all of BER. It requires the string be - * definite-length. Constructed strings are allowed, but all children of the - * outermost element must be primitive. The caller should use - * |CBS_asn1_ber_to_der| before running this function. - * - * It returns one on success and zero otherwise. */ +// CBS_get_asn1_implicit_string parses a BER string of primitive type +// |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the +// contents. If concatenation was needed, it sets |*out_storage| to a buffer +// which the caller must release with |OPENSSL_free|. Otherwise, it sets +// |*out_storage| to NULL. +// +// This function does not parse all of BER. It requires the string be +// definite-length. Constructed strings are allowed, but all children of the +// outermost element must be primitive. The caller should use +// |CBS_asn1_ber_to_der| before running this function. +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, unsigned outer_tag, unsigned inner_tag); -/* CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized - * with |CBB_init|. If |outp| is not NULL then the result is written to |*outp| - * and |*outp| is advanced just past the output. It returns the number of bytes - * in the result, whether written or not, or a negative value on error. On - * error, it calls |CBB_cleanup| on |cbb|. - * - * This function may be used to help implement legacy i2d ASN.1 functions. */ +// CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized +// with |CBB_init|. If |outp| is not NULL then the result is written to |*outp| +// and |*outp| is advanced just past the output. It returns the number of bytes +// in the result, whether written or not, or a negative value on error. On +// error, it calls |CBB_cleanup| on |cbb|. +// +// This function may be used to help implement legacy i2d ASN.1 functions. int CBB_finish_i2d(CBB *cbb, uint8_t **outp); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BYTESTRING_INTERNAL_H */ +#endif // OPENSSL_HEADER_BYTESTRING_INTERNAL_H diff --git a/crypto/chacha/chacha.c b/crypto/chacha/chacha.c index fe32596a..646ef7a6 100644 --- a/crypto/chacha/chacha.c +++ b/crypto/chacha/chacha.c @@ -12,7 +12,7 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Adapted from the public domain, estream code by D. Bernstein. */ +// Adapted from the public domain, estream code by D. Bernstein. #include @@ -32,7 +32,7 @@ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) -/* ChaCha20_ctr32 is defined in asm/chacha-*.pl. */ +// ChaCha20_ctr32 is defined in asm/chacha-*.pl. void ChaCha20_ctr32(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); @@ -48,7 +48,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t *key_ptr = (const uint32_t *)key; #if !defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) - /* The assembly expects the key to be four-byte aligned. */ + // The assembly expects the key to be four-byte aligned. uint32_t key_u32[8]; if ((((uintptr_t)key) & 3) != 0) { key_u32[0] = U8TO32_LITTLE(key + 0); @@ -69,7 +69,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, #else -/* sigma contains the ChaCha constants, which happen to be an ASCII string. */ +// sigma contains the ChaCha constants, which happen to be an ASCII string. static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' }; @@ -83,15 +83,15 @@ static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', (p)[3] = (v >> 24) & 0xff; \ } -/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */ +// QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. #define QUARTERROUND(a, b, c, d) \ x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 16); \ x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 12); \ x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 8); \ x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 7); -/* chacha_core performs 20 rounds of ChaCha on the input words in - * |input| and writes the 64 output bytes to |output|. */ +// chacha_core performs 20 rounds of ChaCha on the input words in +// |input| and writes the 64 output bytes to |output|. static void chacha_core(uint8_t output[64], const uint32_t input[16]) { uint32_t x[16]; int i; diff --git a/crypto/cipher_extra/aead_test.cc b/crypto/cipher_extra/aead_test.cc index a699890a..a40d673c 100644 --- a/crypto/cipher_extra/aead_test.cc +++ b/crypto/cipher_extra/aead_test.cc @@ -393,14 +393,14 @@ TEST_P(PerAEADTest, CleanupAfterInitFailure) { 9999 /* a silly tag length to trigger an error */, NULL /* ENGINE */)); ERR_clear_error(); - /* Running a second, failed _init should not cause a memory leak. */ + // Running a second, failed _init should not cause a memory leak. ASSERT_FALSE(EVP_AEAD_CTX_init( &ctx, aead(), key, key_len, 9999 /* a silly tag length to trigger an error */, NULL /* ENGINE */)); ERR_clear_error(); - /* Calling _cleanup on an |EVP_AEAD_CTX| after a failed _init should be a - * no-op. */ + // Calling _cleanup on an |EVP_AEAD_CTX| after a failed _init should be a + // no-op. EVP_AEAD_CTX_cleanup(&ctx); } diff --git a/crypto/cipher_extra/e_aesctrhmac.c b/crypto/cipher_extra/e_aesctrhmac.c index 3034b8ff..9c357f48 100644 --- a/crypto/cipher_extra/e_aesctrhmac.c +++ b/crypto/cipher_extra/e_aesctrhmac.c @@ -66,13 +66,13 @@ static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key, if (key_len < hmac_key_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ + return 0; // EVP_AEAD_CTX_init should catch this. } const size_t aes_key_len = key_len - hmac_key_len; if (aes_key_len != 16 && aes_key_len != 32) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ + return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { @@ -131,7 +131,7 @@ static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH], SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); SHA256_Update(&sha256, ad, ad_len); - /* Pad with zeros to the end of the SHA-256 block. */ + // Pad with zeros to the end of the SHA-256 block. const unsigned num_padding = (SHA256_CBLOCK - ((sizeof(uint64_t)*2 + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) % @@ -154,8 +154,8 @@ static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH], static void aead_aes_ctr_hmac_sha256_crypt( const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out, const uint8_t *in, size_t len, const uint8_t *nonce) { - /* Since the AEAD operation is one-shot, keeping a buffer of unused keystream - * bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. */ + // Since the AEAD operation is one-shot, keeping a buffer of unused keystream + // bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. uint8_t partial_block_buffer[AES_BLOCK_SIZE]; unsigned partial_block_offset = 0; OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer)); @@ -184,7 +184,7 @@ static int aead_aes_ctr_hmac_sha256_seal_scatter( const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) { - /* This input is so large it would overflow the 32-bit block counter. */ + // This input is so large it would overflow the 32-bit block counter. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } @@ -242,10 +242,10 @@ static int aead_aes_ctr_hmac_sha256_open_gather( static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = { 16 /* AES key */ + 32 /* HMAC key */, - 12, /* nonce length */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 12, // nonce length + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_ctr_hmac_sha256_init, NULL /* init_with_direction */, @@ -259,10 +259,10 @@ static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = { static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = { 32 /* AES key */ + 32 /* HMAC key */, - 12, /* nonce length */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 12, // nonce length + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_ctr_hmac_sha256_init, NULL /* init_with_direction */, diff --git a/crypto/cipher_extra/e_aesgcmsiv.c b/crypto/cipher_extra/e_aesgcmsiv.c index 6adcf17a..8c6589d6 100644 --- a/crypto/cipher_extra/e_aesgcmsiv.c +++ b/crypto/cipher_extra/e_aesgcmsiv.c @@ -29,20 +29,20 @@ #if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) -/* Optimised AES-GCM-SIV */ +// Optimised AES-GCM-SIV struct aead_aes_gcm_siv_asm_ctx { alignas(16) uint8_t key[16*15]; int is_128_bit; }; -/* aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to - * |out_expanded_key|. */ +// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to +// |out_expanded_key|. extern void aes128gcmsiv_aes_ks( const uint8_t key[16], uint8_t out_expanded_key[16*15]); -/* aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to - * |out_expanded_key|. */ +// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to +// |out_expanded_key|. extern void aes256gcmsiv_aes_ks( const uint8_t key[16], uint8_t out_expanded_key[16*15]); @@ -52,7 +52,7 @@ static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, if (key_bits != 128 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ + return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { @@ -70,7 +70,7 @@ static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, return 0; } - /* malloc should return a 16-byte-aligned address. */ + // malloc should return a 16-byte-aligned address. assert((((uintptr_t)gcm_siv_ctx) & 15) == 0); if (key_bits == 128) { @@ -92,123 +92,123 @@ static void aead_aes_gcm_siv_asm_cleanup(EVP_AEAD_CTX *ctx) { OPENSSL_free(gcm_siv_asm_ctx); } -/* aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to - * include a number (|in_blocks|) of 16-byte blocks of data from |in|, given - * the POLYVAL key in |key|. */ +// aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to +// include a number (|in_blocks|) of 16-byte blocks of data from |in|, given +// the POLYVAL key in |key|. extern void aesgcmsiv_polyval_horner(const uint8_t in_out_poly[16], const uint8_t key[16], const uint8_t *in, size_t in_blocks); -/* aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|. */ +// aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|. extern void aesgcmsiv_htable_init(uint8_t out_htable[16 * 8], const uint8_t auth_key[16]); -/* aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|. */ +// aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|. extern void aesgcmsiv_htable6_init(uint8_t out_htable[16 * 6], const uint8_t auth_key[16]); -/* aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to - * include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple - * of 16.) It uses the precomputed powers of the key given in |htable|. */ +// aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to +// include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple +// of 16.) It uses the precomputed powers of the key given in |htable|. extern void aesgcmsiv_htable_polyval(const uint8_t htable[16 * 8], const uint8_t *in, size_t in_len, uint8_t in_out_poly[16]); -/* aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to - * |in|. (The full value of |in_len| is still used to find the authentication - * tag appended to the ciphertext, however, so must not be pre-masked.) - * - * |in| and |out| may be equal, but must not otherwise overlap. - * - * While decrypting, it updates the POLYVAL value found at the beginning of - * |in_out_calculated_tag_and_scratch| and writes the updated value back before - * return. During executation, it may use the whole of this space for other - * purposes. In order to decrypt and update the POLYVAL value, it uses the - * expanded key from |key| and the table of powers in |htable|. */ +// aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to +// |in|. (The full value of |in_len| is still used to find the authentication +// tag appended to the ciphertext, however, so must not be pre-masked.) +// +// |in| and |out| may be equal, but must not otherwise overlap. +// +// While decrypting, it updates the POLYVAL value found at the beginning of +// |in_out_calculated_tag_and_scratch| and writes the updated value back before +// return. During executation, it may use the whole of this space for other +// purposes. In order to decrypt and update the POLYVAL value, it uses the +// expanded key from |key| and the table of powers in |htable|. extern void aes128gcmsiv_dec(const uint8_t *in, uint8_t *out, uint8_t in_out_calculated_tag_and_scratch[16 * 8], const uint8_t htable[16 * 6], const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256. */ +// aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256. extern void aes256gcmsiv_dec(const uint8_t *in, uint8_t *out, uint8_t in_out_calculated_tag_and_scratch[16 * 8], const uint8_t htable[16 * 6], const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from - * |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of - * the nonce are used, 16 bytes are read and so the value must be - * right-padded. */ +// aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from +// |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of +// the nonce are used, 16 bytes are read and so the value must be +// right-padded. extern void aes128gcmsiv_kdf(const uint8_t nonce[16], uint64_t out_key_material[8], const uint8_t *key_schedule); -/* aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256. */ +// aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256. extern void aes256gcmsiv_kdf(const uint8_t nonce[16], uint64_t out_key_material[12], const uint8_t *key_schedule); -/* aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in - * |key|, writes the expanded key to |out_expanded_key| and encrypts a single - * block from |in| to |out|. */ +// aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in +// |key|, writes the expanded key to |out_expanded_key| and encrypts a single +// block from |in| to |out|. extern void aes128gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], uint8_t out_expanded_key[16 * 15], const uint64_t key[2]); -/* aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for - * AES-256. */ +// aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for +// AES-256. extern void aes256gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], uint8_t out_expanded_key[16 * 15], const uint64_t key[4]); -/* aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using - * the expanded key in |expanded_key|. */ +// aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using +// the expanded key in |expanded_key|. extern void aes128gcmsiv_ecb_enc_block( const uint8_t in[16], uint8_t out[16], const struct aead_aes_gcm_siv_asm_ctx *expanded_key); -/* aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for - * AES-256. */ +// aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for +// AES-256. extern void aes256gcmsiv_ecb_enc_block( const uint8_t in[16], uint8_t out[16], const struct aead_aes_gcm_siv_asm_ctx *expanded_key); -/* aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the - * expanded key from |key|. (The value of |in_len| must be a multiple of 16.) - * The |in| and |out| buffers may be equal but must not otherwise overlap. The - * initial counter is constructed from the given |tag| as required by - * AES-GCM-SIV. */ +// aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the +// expanded key from |key|. (The value of |in_len| must be a multiple of 16.) +// The |in| and |out| buffers may be equal but must not otherwise overlap. The +// initial counter is constructed from the given |tag| as required by +// AES-GCM-SIV. extern void aes128gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for - * AES-256. */ +// aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for +// AES-256. extern void aes256gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is - * optimised for longer messages. */ +// aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is +// optimised for longer messages. extern void aes128gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is - * optimised for longer messages. */ +// aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is +// optimised for longer messages. extern void aes256gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); -/* gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext - * and AD. The result is written to |out_tag|. */ +// gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext +// and AD. The result is written to |out_tag|. static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len, const uint8_t auth_key[16], @@ -268,10 +268,10 @@ static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in, out_tag[15] &= 0x7f; } -/* aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption - * (same thing in CTR mode) of the final block of a plaintext/ciphertext. It - * writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter - * derived from |tag|. */ +// aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption +// (same thing in CTR mode) of the final block of a plaintext/ciphertext. It +// writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter +// derived from |tag|. static void aead_aes_gcm_siv_asm_crypt_last_block( int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t tag[16], @@ -299,8 +299,8 @@ static void aead_aes_gcm_siv_asm_crypt_last_block( } } -/* aead_aes_gcm_siv_kdf calculates the record encryption and authentication - * keys given the |nonce|. */ +// aead_aes_gcm_siv_kdf calculates the record encryption and authentication +// keys given the |nonce|. static void aead_aes_gcm_siv_kdf( int is_128_bit, const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx, uint64_t out_record_auth_key[2], uint64_t out_record_enc_key[4], @@ -433,8 +433,8 @@ static int aead_aes_gcm_siv_asm_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } else { aes256gcmsiv_aes_ks((const uint8_t *) record_enc_key, &expanded_key.key[0]); } - /* calculated_tag is 16*8 bytes, rather than 16 bytes, because - * aes[128|256]gcmsiv_dec uses the extra as scratch space. */ + // calculated_tag is 16*8 bytes, rather than 16 bytes, because + // aes[128|256]gcmsiv_dec uses the extra as scratch space. alignas(16) uint8_t calculated_tag[16 * 8] = {0}; OPENSSL_memset(calculated_tag, 0, EVP_AEAD_AES_GCM_SIV_TAG_LEN); @@ -507,11 +507,11 @@ static int aead_aes_gcm_siv_asm_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } static const EVP_AEAD aead_aes_128_gcm_siv_asm = { - 16, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 16, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_asm_init, NULL /* init_with_direction */, @@ -524,11 +524,11 @@ static const EVP_AEAD aead_aes_128_gcm_siv_asm = { }; static const EVP_AEAD aead_aes_256_gcm_siv_asm = { - 32, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 32, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_asm_init, NULL /* init_with_direction */, @@ -540,7 +540,7 @@ static const EVP_AEAD aead_aes_256_gcm_siv_asm = { NULL /* tag_len */, }; -#endif /* X86_64 && !NO_ASM */ +#endif // X86_64 && !NO_ASM struct aead_aes_gcm_siv_ctx { union { @@ -557,7 +557,7 @@ static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key, if (key_bits != 128 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ + return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { @@ -590,13 +590,13 @@ static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) { OPENSSL_free(gcm_siv_ctx); } -/* gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from - * |in| to |out|, using the block function |enc_block| with |key| in counter - * mode, starting at |initial_counter|. This differs from the traditional - * counter mode code in that the counter is handled little-endian, only the - * first four bytes are used and the GCM-SIV tweak to the final byte is - * applied. The |in| and |out| pointers may be equal but otherwise must not - * alias. */ +// gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from +// |in| to |out|, using the block function |enc_block| with |key| in counter +// mode, starting at |initial_counter|. This differs from the traditional +// counter mode code in that the counter is handled little-endian, only the +// first four bytes are used and the GCM-SIV tweak to the final byte is +// applied. The |in| and |out| pointers may be equal but otherwise must not +// alias. static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t initial_counter[AES_BLOCK_SIZE], block128_f enc_block, const AES_KEY *key) { @@ -626,8 +626,8 @@ static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, } } -/* gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and - * AD. The result is written to |out_tag|. */ +// gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and +// AD. The result is written to |out_tag|. static void gcm_siv_polyval( uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len, const uint8_t auth_key[16], @@ -671,7 +671,7 @@ static void gcm_siv_polyval( out_tag[15] &= 0x7f; } -/* gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. */ +// gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. struct gcm_siv_record_keys { uint8_t auth_key[16]; union { @@ -681,8 +681,8 @@ struct gcm_siv_record_keys { block128_f enc_block; }; -/* gcm_siv_keys calculates the keys for a specific GCM-SIV record with the - * given nonce and writes them to |*out_keys|. */ +// gcm_siv_keys calculates the keys for a specific GCM-SIV record with the +// given nonce and writes them to |*out_keys|. static void gcm_siv_keys( const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx, struct gcm_siv_record_keys *out_keys, @@ -793,11 +793,11 @@ static int aead_aes_gcm_siv_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, } static const EVP_AEAD aead_aes_128_gcm_siv = { - 16, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 16, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_init, NULL /* init_with_direction */, @@ -810,11 +810,11 @@ static const EVP_AEAD aead_aes_128_gcm_siv = { }; static const EVP_AEAD aead_aes_256_gcm_siv = { - 32, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + 32, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_init, NULL /* init_with_direction */, @@ -859,4 +859,4 @@ const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { return &aead_aes_256_gcm_siv; } -#endif /* X86_64 && !NO_ASM */ +#endif // X86_64 && !NO_ASM diff --git a/crypto/cipher_extra/e_chacha20poly1305.c b/crypto/cipher_extra/e_chacha20poly1305.c index 8946f1ff..d80a9109 100644 --- a/crypto/cipher_extra/e_chacha20poly1305.c +++ b/crypto/cipher_extra/e_chacha20poly1305.c @@ -120,7 +120,7 @@ static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key, } if (key_len != sizeof(c20_ctx->key)) { - return 0; /* internal error - EVP_AEAD_CTX_init should catch this. */ + return 0; // internal error - EVP_AEAD_CTX_init should catch this. } c20_ctx = OPENSSL_malloc(sizeof(struct aead_chacha20_poly1305_ctx)); @@ -152,7 +152,7 @@ static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) { CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes)); } -/* calc_tag fills |tag| with the authentication tag for the given inputs. */ +// calc_tag fills |tag| with the authentication tag for the given inputs. static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], const struct aead_chacha20_poly1305_ctx *c20_ctx, const uint8_t nonce[12], const uint8_t *ad, size_t ad_len, @@ -164,7 +164,7 @@ static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), c20_ctx->key, nonce, 0); - static const uint8_t padding[16] = { 0 }; /* Padding is all zeros. */ + static const uint8_t padding[16] = { 0 }; // Padding is all zeros. poly1305_state ctx; CRYPTO_poly1305_init(&ctx, poly1305_key); CRYPTO_poly1305_update(&ctx, ad, ad_len); @@ -203,12 +203,12 @@ static int aead_chacha20_poly1305_seal_scatter( return 0; } - /* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow - * individual operations that work on more than 256GB at a time. - * |in_len_64| is needed because, on 32-bit platforms, size_t is only - * 32-bits and this produces a warning because it's always false. - * Casting to uint64_t inside the conditional is not sufficient to stop - * the warning. */ + // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow + // individual operations that work on more than 256GB at a time. + // |in_len_64| is needed because, on 32-bit platforms, size_t is only + // 32-bits and this produces a warning because it's always false. + // Casting to uint64_t inside the conditional is not sufficient to stop + // the warning. const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); @@ -220,8 +220,8 @@ static int aead_chacha20_poly1305_seal_scatter( return 0; } - /* The the extra input is given, it is expected to be very short and so is - * encrypted byte-by-byte first. */ + // The the extra input is given, it is expected to be very short and so is + // encrypted byte-by-byte first. if (extra_in_len) { static const size_t kChaChaBlockSize = 64; uint32_t block_counter = 1 + (in_len / kChaChaBlockSize); @@ -275,12 +275,12 @@ static int aead_chacha20_poly1305_open_gather( return 0; } - /* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow - * individual operations that work on more than 256GB at a time. - * |in_len_64| is needed because, on 32-bit platforms, size_t is only - * 32-bits and this produces a warning because it's always false. - * Casting to uint64_t inside the conditional is not sufficient to stop - * the warning. */ + // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow + // individual operations that work on more than 256GB at a time. + // |in_len_64| is needed because, on 32-bit platforms, size_t is only + // 32-bits and this produces a warning because it's always false. + // Casting to uint64_t inside the conditional is not sufficient to stop + // the warning. const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); @@ -307,20 +307,20 @@ static int aead_chacha20_poly1305_open_gather( } static const EVP_AEAD aead_chacha20_poly1305 = { - 32, /* key len */ - 12, /* nonce len */ - POLY1305_TAG_LEN, /* overhead */ - POLY1305_TAG_LEN, /* max tag length */ - 1, /* seal_scatter_supports_extra_in */ + 32, // key len + 12, // nonce len + POLY1305_TAG_LEN, // overhead + POLY1305_TAG_LEN, // max tag length + 1, // seal_scatter_supports_extra_in aead_chacha20_poly1305_init, - NULL, /* init_with_direction */ + NULL, // init_with_direction aead_chacha20_poly1305_cleanup, NULL /* open */, aead_chacha20_poly1305_seal_scatter, aead_chacha20_poly1305_open_gather, - NULL, /* get_iv */ - NULL, /* tag_len */ + NULL, // get_iv + NULL, // tag_len }; const EVP_AEAD *EVP_aead_chacha20_poly1305(void) { diff --git a/crypto/cipher_extra/e_rc2.c b/crypto/cipher_extra/e_rc2.c index a18229c9..dc42dd59 100644 --- a/crypto/cipher_extra/e_rc2.c +++ b/crypto/cipher_extra/e_rc2.c @@ -317,7 +317,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { unsigned int c, d; k = (uint8_t *)&key->data[0]; - *k = 0; /* for if there is a zero length key */ + *k = 0; // for if there is a zero length key if (len > 128) { len = 128; @@ -333,7 +333,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = data[i]; } - /* expand table */ + // expand table d = k[len - 1]; j = 0; for (i = len; i < 128; i++, j++) { @@ -341,7 +341,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = d; } - /* hmm.... key reduction to 'bits' bits */ + // hmm.... key reduction to 'bits' bits j = (bits + 7) >> 3; i = 128 - j; @@ -354,7 +354,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = d; } - /* copy from bytes into uint16_t's */ + // copy from bytes into uint16_t's ki = &(key->data[63]); for (i = 127; i >= 0; i -= 2) { *(ki--) = ((k[i] << 8) | k[i - 1]) & 0xffff; @@ -362,8 +362,8 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { } typedef struct { - int key_bits; /* effective key bits */ - RC2_KEY ks; /* key schedule */ + int key_bits; // effective key bits + RC2_KEY ks; // key schedule } EVP_RC2_KEY; static int rc2_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, @@ -399,8 +399,8 @@ static int rc2_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) { key->key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8; return 1; case EVP_CTRL_SET_RC2_KEY_BITS: - /* Should be overridden by later call to |EVP_CTRL_INIT|, but - * people call it, so it may as well work. */ + // Should be overridden by later call to |EVP_CTRL_INIT|, but + // people call it, so it may as well work. key->key_bits = arg; return 1; diff --git a/crypto/cipher_extra/e_ssl3.c b/crypto/cipher_extra/e_ssl3.c index dc437136..61f25cad 100644 --- a/crypto/cipher_extra/e_ssl3.c +++ b/crypto/cipher_extra/e_ssl3.c @@ -40,8 +40,8 @@ static int ssl3_mac(AEAD_SSL3_CTX *ssl3_ctx, uint8_t *out, unsigned *out_len, size_t md_size = EVP_MD_CTX_size(&ssl3_ctx->md_ctx); size_t pad_len = (md_size == 20) ? 40 : 48; - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); @@ -135,8 +135,8 @@ static size_t aead_ssl3_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, } const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); - /* An overflow of |in_len + digest_len| doesn't affect the result mod - * |block_size|, provided that |block_size| is a smaller power of two. */ + // An overflow of |in_len + digest_len| doesn't affect the result mod + // |block_size|, provided that |block_size| is a smaller power of two. assert(block_size != 0 && (block_size & (block_size - 1)) == 0); const size_t pad_len = block_size - ((in_len + digest_len) % block_size); return digest_len + pad_len; @@ -153,13 +153,13 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state; if (!ssl3_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */ + // Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } @@ -179,15 +179,15 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* Compute the MAC. This must be first in case the operation is being done - * in-place. */ + // Compute the MAC. This must be first in case the operation is being done + // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!ssl3_mac(ssl3_ctx, mac, &mac_len, ad, ad_len, in, in_len)) { return 0; } - /* Encrypt the input. */ + // Encrypt the input. int len; if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) { @@ -196,9 +196,9 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); - /* Feed the MAC into the cipher in two steps. First complete the final partial - * block from encrypting the input and split the result between |out| and - * |out_tag|. Then encrypt the remainder. */ + // Feed the MAC into the cipher in two steps. First complete the final partial + // block from encrypting the input and split the result between |out| and + // |out_tag|. Then encrypt the remainder. size_t early_mac_len = (block_size - (in_len % block_size)) % block_size; if (early_mac_len != 0) { @@ -225,7 +225,7 @@ static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); - /* Compute padding and feed that into the cipher. */ + // Compute padding and feed that into the cipher. uint8_t padding[256]; size_t padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, 0, padding_len - 1); @@ -255,7 +255,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state; if (ssl3_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */ + // Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } @@ -267,8 +267,8 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (max_out_len < in_len) { - /* This requires that the caller provide space for the MAC, even though it - * will always be removed on return. */ + // This requires that the caller provide space for the MAC, even though it + // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -284,12 +284,12 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - /* Decrypt to get the plaintext + MAC + padding. */ + // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) { @@ -302,9 +302,9 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, total += len; assert(total == in_len); - /* Remove CBC padding and MAC. This would normally be timing-sensitive, but - * SSLv3 CBC ciphers are already broken. Support will be removed eventually. - * https://www.openssl.org/~bodo/ssl-poodle.pdf */ + // Remove CBC padding and MAC. This would normally be timing-sensitive, but + // SSLv3 CBC ciphers are already broken. Support will be removed eventually. + // https://www.openssl.org/~bodo/ssl-poodle.pdf size_t data_len; if (EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { unsigned padding_length = out[total - 1]; @@ -312,7 +312,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } - /* The padding must be minimal. */ + // The padding must be minimal. if (padding_length + 1 > EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; @@ -322,7 +322,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, data_len = total - mac_len; } - /* Compute the MAC and compare against the one in the record. */ + // Compute the MAC and compare against the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; if (!ssl3_mac(ssl3_ctx, mac, NULL, ad, ad_len, out, data_len)) { return 0; @@ -378,70 +378,70 @@ static int aead_null_sha1_ssl3_init(EVP_AEAD_CTX *ctx, const uint8_t *key, } static const EVP_AEAD aead_aes_128_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_128_cbc_sha1_ssl3_init, aead_ssl3_cleanup, aead_ssl3_open, aead_ssl3_seal_scatter, - NULL, /* open_gather */ + NULL, // open_gather aead_ssl3_get_iv, aead_ssl3_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_256_cbc_sha1_ssl3_init, aead_ssl3_cleanup, aead_ssl3_open, aead_ssl3_seal_scatter, - NULL, /* open_gather */ + NULL, // open_gather aead_ssl3_get_iv, aead_ssl3_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */ - 0, /* nonce len */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) + 0, // nonce len + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_des_ede3_cbc_sha1_ssl3_init, aead_ssl3_cleanup, aead_ssl3_open, aead_ssl3_seal_scatter, - NULL, /* open_gather */ + NULL, // open_gather aead_ssl3_get_iv, aead_ssl3_tag_len, }; static const EVP_AEAD aead_null_sha1_ssl3 = { - SHA_DIGEST_LENGTH, /* key len */ - 0, /* nonce len */ - SHA_DIGEST_LENGTH, /* overhead (SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH, // key len + 0, // nonce len + SHA_DIGEST_LENGTH, // overhead (SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_null_sha1_ssl3_init, aead_ssl3_cleanup, aead_ssl3_open, aead_ssl3_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_ssl3_tag_len, }; diff --git a/crypto/cipher_extra/e_tls.c b/crypto/cipher_extra/e_tls.c index ca206abd..4b87983a 100644 --- a/crypto/cipher_extra/e_tls.c +++ b/crypto/cipher_extra/e_tls.c @@ -33,12 +33,12 @@ typedef struct { EVP_CIPHER_CTX cipher_ctx; HMAC_CTX hmac_ctx; - /* mac_key is the portion of the key used for the MAC. It is retained - * separately for the constant-time CBC code. */ + // mac_key is the portion of the key used for the MAC. It is retained + // separately for the constant-time CBC code. uint8_t mac_key[EVP_MAX_MD_SIZE]; uint8_t mac_key_len; - /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit - * IV. */ + // implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit + // IV. char implicit_iv; } AEAD_TLS_CTX; @@ -111,8 +111,8 @@ static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, } const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); - /* An overflow of |in_len + hmac_len| doesn't affect the result mod - * |block_size|, provided that |block_size| is a smaller power of two. */ + // An overflow of |in_len + hmac_len| doesn't affect the result mod + // |block_size|, provided that |block_size| is a smaller power of two. assert(block_size != 0 && (block_size & (block_size - 1)) == 0); const size_t pad_len = block_size - (in_len + hmac_len) % block_size; return hmac_len + pad_len; @@ -129,13 +129,13 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (!tls_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ + // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } @@ -155,14 +155,14 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); - /* Compute the MAC. This must be first in case the operation is being done - * in-place. */ + // Compute the MAC. This must be first in case the operation is being done + // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || @@ -173,14 +173,14 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* Configure the explicit IV. */ + // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } - /* Encrypt the input. */ + // Encrypt the input. int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; @@ -188,9 +188,9 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); - /* Feed the MAC into the cipher in two steps. First complete the final partial - * block from encrypting the input and split the result between |out| and - * |out_tag|. Then feed the rest. */ + // Feed the MAC into the cipher in two steps. First complete the final partial + // block from encrypting the input and split the result between |out| and + // |out_tag|. Then feed the rest. const size_t early_mac_len = (block_size - (in_len % block_size) % block_size); @@ -218,7 +218,7 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); - /* Compute padding and feed that into the cipher. */ + // Compute padding and feed that into the cipher. uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, padding_len - 1, padding_len); @@ -232,7 +232,7 @@ static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } - assert(len == 0); /* Padding is explicit. */ + assert(len == 0); // Padding is explicit. assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); *out_tag_len = tag_len; @@ -246,7 +246,7 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ + // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } @@ -257,8 +257,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, } if (max_out_len < in_len) { - /* This requires that the caller provide space for the MAC, even though it - * will always be removed on return. */ + // This requires that the caller provide space for the MAC, even though it + // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -274,19 +274,19 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - /* Configure the explicit IV. */ + // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } - /* Decrypt to get the plaintext + MAC + padding. */ + // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { @@ -299,8 +299,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, total += len; assert(total == in_len); - /* Remove CBC padding. Code from here on is timing-sensitive with respect to - * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */ + // Remove CBC padding. Code from here on is timing-sensitive with respect to + // |padding_ok| and |data_plus_mac_len| for CBC ciphers. size_t data_plus_mac_len; crypto_word_t padding_ok; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { @@ -308,32 +308,32 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, &padding_ok, &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), HMAC_size(&tls_ctx->hmac_ctx))) { - /* Publicly invalid. This can be rejected in non-constant time. */ + // Publicly invalid. This can be rejected in non-constant time. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = CONSTTIME_TRUE_W; data_plus_mac_len = total; - /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has - * already been checked against the MAC size at the top of the function. */ + // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has + // already been checked against the MAC size at the top of the function. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); - /* At this point, if the padding is valid, the first |data_plus_mac_len| bytes - * after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is - * still large enough to extract a MAC, but it will be irrelevant. */ + // At this point, if the padding is valid, the first |data_plus_mac_len| bytes + // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is + // still large enough to extract a MAC, but it will be irrelevant. - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_fixed[13]; OPENSSL_memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; - /* Compute the MAC and extract the one in the record. */ + // Compute the MAC and extract the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; @@ -351,8 +351,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { - /* We should support the constant-time path for all CBC-mode ciphers - * implemented. */ + // We should support the constant-time path for all CBC-mode ciphers + // implemented. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); unsigned mac_len_u; @@ -368,10 +368,10 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, record_mac = &out[data_len]; } - /* Perform the MAC check and the padding check in constant-time. It should be - * safe to simply perform the padding check first, but it would not be under a - * different choice of MAC location on padding failure. See - * EVP_tls_cbc_remove_padding. */ + // Perform the MAC check and the padding check in constant-time. It should be + // safe to simply perform the padding check first, but it would not be under a + // different choice of MAC location on padding failure. See + // EVP_tls_cbc_remove_padding. crypto_word_t good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= padding_ok; @@ -380,7 +380,7 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, return 0; } - /* End of timing-sensitive code. */ + // End of timing-sensitive code. *out_len = data_len; return 1; @@ -474,172 +474,172 @@ static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, } static const EVP_AEAD aead_aes_128_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */ - 16, /* nonce len (IV) */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128) + 16, // nonce len (IV) + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_128_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_128_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - aead_tls_get_iv, /* get_iv */ + NULL, // open_gather + aead_tls_get_iv, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha256_tls = { - SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */ - 16, /* nonce len (IV) */ - 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */ - SHA256_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128) + 16, // nonce len (IV) + 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) + SHA256_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_128_cbc_sha256_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256) + 16, // nonce len (IV) + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_256_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_256_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - aead_tls_get_iv, /* get_iv */ + NULL, // open_gather + aead_tls_get_iv, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha256_tls = { - SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */ - SHA256_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA256_DIGEST_LENGTH + 32, // key len (SHA256 + AES256) + 16, // nonce len (IV) + 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) + SHA256_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_256_cbc_sha256_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha384_tls = { - SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */ - SHA384_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA384_DIGEST_LENGTH + 32, // key len (SHA384 + AES256) + 16, // nonce len (IV) + 16 + SHA384_DIGEST_LENGTH, // overhead (padding + SHA384) + SHA384_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_aes_256_cbc_sha384_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */ - 8, /* nonce len (IV) */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES) + 8, // nonce len (IV) + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_des_ede3_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */ - 0, /* nonce len */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) + 0, // nonce len + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_des_ede3_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - aead_tls_get_iv, /* get_iv */ + NULL, // open_gather + aead_tls_get_iv, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_null_sha1_tls = { - SHA_DIGEST_LENGTH, /* key len */ - 0, /* nonce len */ - SHA_DIGEST_LENGTH, /* overhead (SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - 0, /* seal_scatter_supports_extra_in */ + SHA_DIGEST_LENGTH, // key len + 0, // nonce len + SHA_DIGEST_LENGTH, // overhead (SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in - NULL, /* init */ + NULL, // init aead_null_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, - NULL, /* open_gather */ - NULL, /* get_iv */ + NULL, // open_gather + NULL, // get_iv aead_tls_tag_len, }; diff --git a/crypto/cipher_extra/internal.h b/crypto/cipher_extra/internal.h index 71361959..1d2c4e1f 100644 --- a/crypto/cipher_extra/internal.h +++ b/crypto/cipher_extra/internal.h @@ -66,53 +66,53 @@ extern "C" { #endif -/* EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC - * record in |in|. This decrypted record should not include any "decrypted" - * explicit IV. If the record is publicly invalid, it returns zero. Otherwise, - * it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the - * padding is valid and zero otherwise. It then sets |*out_len| to the length - * with the padding removed or |in_len| if invalid. - * - * If the function returns one, it runs in time independent of the contents of - * |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying - * |EVP_tls_cbc_copy_mac|'s precondition. */ +// EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC +// record in |in|. This decrypted record should not include any "decrypted" +// explicit IV. If the record is publicly invalid, it returns zero. Otherwise, +// it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the +// padding is valid and zero otherwise. It then sets |*out_len| to the length +// with the padding removed or |in_len| if invalid. +// +// If the function returns one, it runs in time independent of the contents of +// |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying +// |EVP_tls_cbc_copy_mac|'s precondition. int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, const uint8_t *in, size_t in_len, size_t block_size, size_t mac_size); -/* EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first - * |in_len| bytes of |in| to |out| in constant time (independent of the concrete - * value of |in_len|, which may vary within a 256-byte window). |in| must point - * to a buffer of |orig_len| bytes. - * - * On entry: - * orig_len >= in_len >= md_size - * md_size <= EVP_MAX_MD_SIZE */ +// EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first +// |in_len| bytes of |in| to |out| in constant time (independent of the concrete +// value of |in_len|, which may vary within a 256-byte window). |in| must point +// to a buffer of |orig_len| bytes. +// +// On entry: +// orig_len >= in_len >= md_size +// md_size <= EVP_MAX_MD_SIZE void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, size_t in_len, size_t orig_len); -/* EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function - * which EVP_tls_cbc_digest_record supports. */ +// EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function +// which EVP_tls_cbc_digest_record supports. int EVP_tls_cbc_record_digest_supported(const EVP_MD *md); -/* EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS - * record. - * - * md: the hash function used in the HMAC. - * EVP_tls_cbc_record_digest_supported must return true for this hash. - * md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. - * md_out_size: the number of output bytes is written here. - * header: the 13-byte, TLS record header. - * data: the record data itself - * data_plus_mac_size: the secret, reported length of the data and MAC - * once the padding has been removed. - * data_plus_mac_plus_padding_size: the public length of the whole - * record, including padding. - * - * On entry: by virtue of having been through one of the remove_padding - * functions, above, we know that data_plus_mac_size is large enough to contain - * a padding byte and MAC. (If the padding was invalid, it might contain the - * padding too. ) */ +// EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS +// record. +// +// md: the hash function used in the HMAC. +// EVP_tls_cbc_record_digest_supported must return true for this hash. +// md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. +// md_out_size: the number of output bytes is written here. +// header: the 13-byte, TLS record header. +// data: the record data itself +// data_plus_mac_size: the secret, reported length of the data and MAC +// once the padding has been removed. +// data_plus_mac_plus_padding_size: the public length of the whole +// record, including padding. +// +// On entry: by virtue of having been through one of the remove_padding +// functions, above, we know that data_plus_mac_size is large enough to contain +// a padding byte and MAC. (If the padding was invalid, it might contain the +// padding too. ) int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_plus_mac_size, @@ -122,7 +122,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H */ +#endif // OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H diff --git a/crypto/cipher_extra/tls_cbc.c b/crypto/cipher_extra/tls_cbc.c index 2372c5c0..6f95130a 100644 --- a/crypto/cipher_extra/tls_cbc.c +++ b/crypto/cipher_extra/tls_cbc.c @@ -62,13 +62,13 @@ #include "../fipsmodule/cipher/internal.h" -/* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length - * field. (SHA-384/512 have 128-bit length.) */ +// MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length +// field. (SHA-384/512 have 128-bit length.) #define MAX_HASH_BIT_COUNT_BYTES 16 -/* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. - * Currently SHA-384/512 has a 128-byte block size and that's the largest - * supported by TLS.) */ +// MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. +// Currently SHA-384/512 has a 128-byte block size and that's the largest +// supported by TLS.) #define MAX_HASH_BLOCK_SIZE 128 int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, @@ -76,7 +76,7 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, size_t block_size, size_t mac_size) { const size_t overhead = 1 /* padding length byte */ + mac_size; - /* These lengths are all public so we can test them in non-constant time. */ + // These lengths are all public so we can test them in non-constant time. if (overhead > in_len) { return 0; } @@ -84,16 +84,16 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, size_t padding_length = in[in_len - 1]; crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length); - /* The padding consists of a length byte at the end of the record and - * then that many bytes of padding, all with the same value as the - * length byte. Thus, with the length byte included, there are i+1 - * bytes of padding. - * - * We can't check just |padding_length+1| bytes because that leaks - * decrypted information. Therefore we always have to check the maximum - * amount of padding possible. (Again, the length of the record is - * public information so we can use it.) */ - size_t to_check = 256; /* maximum amount of padding, inc length byte. */ + // The padding consists of a length byte at the end of the record and + // then that many bytes of padding, all with the same value as the + // length byte. Thus, with the length byte included, there are i+1 + // bytes of padding. + // + // We can't check just |padding_length+1| bytes because that leaks + // decrypted information. Therefore we always have to check the maximum + // amount of padding possible. (Again, the length of the record is + // public information so we can use it.) + size_t to_check = 256; // maximum amount of padding, inc length byte. if (to_check > in_len) { to_check = in_len; } @@ -101,19 +101,19 @@ int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, for (size_t i = 0; i < to_check; i++) { uint8_t mask = constant_time_ge_8(padding_length, i); uint8_t b = in[in_len - 1 - i]; - /* The final |padding_length+1| bytes should all have the value - * |padding_length|. Therefore the XOR should be zero. */ + // The final |padding_length+1| bytes should all have the value + // |padding_length|. Therefore the XOR should be zero. good &= ~(mask & (padding_length ^ b)); } - /* If any of the final |padding_length+1| bytes had the wrong value, - * one or more of the lower eight bits of |good| will be cleared. */ + // If any of the final |padding_length+1| bytes had the wrong value, + // one or more of the lower eight bits of |good| will be cleared. good = constant_time_eq_w(0xff, good & 0xff); - /* Always treat |padding_length| as zero on error. If, assuming block size of - * 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 - * and returned -1, distinguishing good MAC and bad padding from bad MAC and - * bad padding would give POODLE's padding oracle. */ + // Always treat |padding_length| as zero on error. If, assuming block size of + // 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 + // and returned -1, distinguishing good MAC and bad padding from bad MAC and + // bad padding would give POODLE's padding oracle. padding_length = good & (padding_length + 1); *out_len = in_len - padding_length; *out_padding_ok = good; @@ -126,7 +126,7 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, uint8_t *rotated_mac = rotated_mac1; uint8_t *rotated_mac_tmp = rotated_mac2; - /* mac_end is the index of |in| just after the end of the MAC. */ + // mac_end is the index of |in| just after the end of the MAC. size_t mac_end = in_len; size_t mac_start = mac_end - md_size; @@ -134,10 +134,10 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, assert(in_len >= md_size); assert(md_size <= EVP_MAX_MD_SIZE); - /* scan_start contains the number of bytes that we can ignore because - * the MAC's position can only vary by 255 bytes. */ + // scan_start contains the number of bytes that we can ignore because + // the MAC's position can only vary by 255 bytes. size_t scan_start = 0; - /* This information is public so it's safe to branch based on it. */ + // This information is public so it's safe to branch based on it. if (orig_len > md_size + 255 + 1) { scan_start = orig_len - (md_size + 255 + 1); } @@ -153,15 +153,15 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, mac_started |= is_mac_start; uint8_t mac_ended = constant_time_ge_8(i, mac_end); rotated_mac[j] |= in[i] & mac_started & ~mac_ended; - /* Save the offset that |mac_start| is mapped to. */ + // Save the offset that |mac_start| is mapped to. rotate_offset |= j & is_mac_start; } - /* Now rotate the MAC. We rotate in log(md_size) steps, one for each bit - * position. */ + // Now rotate the MAC. We rotate in log(md_size) steps, one for each bit + // position. for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) { - /* Rotate by |offset| iff the corresponding bit is set in - * |rotate_offset|, placing the result in |rotated_mac_tmp|. */ + // Rotate by |offset| iff the corresponding bit is set in + // |rotate_offset|, placing the result in |rotated_mac_tmp|. const uint8_t skip_rotate = (rotate_offset & 1) - 1; for (size_t i = 0, j = offset; i < md_size; i++, j++) { if (j >= md_size) { @@ -171,9 +171,9 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); } - /* Swap pointers so |rotated_mac| contains the (possibly) rotated value. - * Note the number of iterations and thus the identity of these pointers is - * public information. */ + // Swap pointers so |rotated_mac| contains the (possibly) rotated value. + // Note the number of iterations and thus the identity of these pointers is + // public information. uint8_t *tmp = rotated_mac; rotated_mac = rotated_mac_tmp; rotated_mac_tmp = tmp; @@ -182,8 +182,8 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, OPENSSL_memcpy(out, rotated_mac, md_size); } -/* u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in - * big-endian order. The value of p is advanced by four. */ +// u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in +// big-endian order. The value of p is advanced by four. #define u32toBE(n, p) \ do { \ *((p)++) = (uint8_t)((n) >> 24); \ @@ -192,8 +192,8 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, *((p)++) = (uint8_t)((n)); \ } while (0) -/* u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in - * big-endian order. The value of p is advanced by eight. */ +// u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in +// big-endian order. The value of p is advanced by eight. #define u64toBE(n, p) \ do { \ *((p)++) = (uint8_t)((n) >> 56); \ @@ -224,9 +224,9 @@ static void tls1_sha512_transform(HASH_CTX *ctx, const uint8_t *block) { SHA512_Transform(&ctx->sha512, block); } -/* These functions serialize the state of a hash and thus perform the standard - * "final" operation without adding the padding and length that such a function - * typically does. */ +// These functions serialize the state of a hash and thus perform the standard +// "final" operation without adding the padding and length that such a function +// typically does. static void tls1_sha1_final_raw(HASH_CTX *ctx, uint8_t *md_out) { SHA_CTX *sha1 = &ctx->sha1; u32toBE(sha1->h[0], md_out); @@ -272,13 +272,13 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out); void (*md_transform)(HASH_CTX *ctx, const uint8_t *block); unsigned md_size, md_block_size = 64; - /* md_length_size is the number of bytes in the length field that terminates - * the hash. */ + // md_length_size is the number of bytes in the length field that terminates + // the hash. unsigned md_length_size = 8; - /* Bound the acceptable input so we can forget about many possible overflows - * later in this function. This is redundant with the record size limits in - * TLS. */ + // Bound the acceptable input so we can forget about many possible overflows + // later in this function. This is redundant with the record size limits in + // TLS. if (data_plus_mac_plus_padding_size >= 1024 * 1024) { assert(0); return 0; @@ -309,8 +309,8 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, break; default: - /* EVP_tls_cbc_record_digest_supported should have been called first to - * check that the hash function is supported. */ + // EVP_tls_cbc_record_digest_supported should have been called first to + // check that the hash function is supported. assert(0); *md_out_size = 0; return 0; @@ -322,45 +322,45 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, static const size_t kHeaderLength = 13; - /* kVarianceBlocks is the number of blocks of the hash that we have to - * calculate in constant time because they could be altered by the - * padding value. - * - * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not - * required to be minimal. Therefore we say that the final six blocks - * can vary based on the padding. */ + // kVarianceBlocks is the number of blocks of the hash that we have to + // calculate in constant time because they could be altered by the + // padding value. + // + // TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not + // required to be minimal. Therefore we say that the final six blocks + // can vary based on the padding. static const size_t kVarianceBlocks = 6; - /* From now on we're dealing with the MAC, which conceptually has 13 - * bytes of `header' before the start of the data. */ + // From now on we're dealing with the MAC, which conceptually has 13 + // bytes of `header' before the start of the data. size_t len = data_plus_mac_plus_padding_size + kHeaderLength; - /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including - * |header|, assuming that there's no padding. */ + // max_mac_bytes contains the maximum bytes of bytes in the MAC, including + // |header|, assuming that there's no padding. size_t max_mac_bytes = len - md_size - 1; - /* num_blocks is the maximum number of hash blocks. */ + // num_blocks is the maximum number of hash blocks. size_t num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; - /* In order to calculate the MAC in constant time we have to handle - * the final blocks specially because the padding value could cause the - * end to appear somewhere in the final |kVarianceBlocks| blocks and we - * can't leak where. However, |num_starting_blocks| worth of data can - * be hashed right away because no padding value can affect whether - * they are plaintext. */ + // In order to calculate the MAC in constant time we have to handle + // the final blocks specially because the padding value could cause the + // end to appear somewhere in the final |kVarianceBlocks| blocks and we + // can't leak where. However, |num_starting_blocks| worth of data can + // be hashed right away because no padding value can affect whether + // they are plaintext. size_t num_starting_blocks = 0; - /* k is the starting byte offset into the conceptual header||data where - * we start processing. */ + // k is the starting byte offset into the conceptual header||data where + // we start processing. size_t k = 0; - /* mac_end_offset is the index just past the end of the data to be - * MACed. */ + // mac_end_offset is the index just past the end of the data to be + // MACed. size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; - /* c is the index of the 0x80 byte in the final hash block that - * contains application data. */ + // c is the index of the 0x80 byte in the final hash block that + // contains application data. size_t c = mac_end_offset % md_block_size; - /* index_a is the hash block number that contains the 0x80 terminating - * value. */ + // index_a is the hash block number that contains the 0x80 terminating + // value. size_t index_a = mac_end_offset / md_block_size; - /* index_b is the hash block number that contains the 64-bit hash - * length, in bits. */ + // index_b is the hash block number that contains the 64-bit hash + // length, in bits. size_t index_b = (mac_end_offset + md_length_size) / md_block_size; if (num_blocks > kVarianceBlocks) { @@ -368,13 +368,13 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, k = md_block_size * num_starting_blocks; } - /* bits is the hash-length in bits. It includes the additional hash - * block for the masked HMAC key. */ - size_t bits = 8 * mac_end_offset; /* at most 18 bits to represent */ + // bits is the hash-length in bits. It includes the additional hash + // block for the masked HMAC key. + size_t bits = 8 * mac_end_offset; // at most 18 bits to represent - /* Compute the initial HMAC block. */ + // Compute the initial HMAC block. bits += 8 * md_block_size; - /* hmac_pad is the masked HMAC key. */ + // hmac_pad is the masked HMAC key. uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; OPENSSL_memset(hmac_pad, 0, md_block_size); assert(mac_secret_length <= sizeof(hmac_pad)); @@ -385,7 +385,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, md_transform(&md_state, hmac_pad); - /* The length check means |bits| fits in four bytes. */ + // The length check means |bits| fits in four bytes. uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; OPENSSL_memset(length_bytes, 0, md_length_size - 4); length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); @@ -394,7 +394,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, length_bytes[md_length_size - 1] = (uint8_t)bits; if (k > 0) { - /* k is a multiple of md_block_size. */ + // k is a multiple of md_block_size. uint8_t first_block[MAX_HASH_BLOCK_SIZE]; OPENSSL_memcpy(first_block, header, 13); OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); @@ -407,10 +407,10 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, uint8_t mac_out[EVP_MAX_MD_SIZE]; OPENSSL_memset(mac_out, 0, sizeof(mac_out)); - /* We now process the final hash blocks. For each block, we construct - * it in constant time. If the |i==index_a| then we'll include the 0x80 - * bytes and zero pad etc. For each block we selectively copy it, in - * constant time, to |mac_out|. */ + // We now process the final hash blocks. For each block, we construct + // it in constant time. If the |i==index_a| then we'll include the 0x80 + // bytes and zero pad etc. For each block we selectively copy it, in + // constant time, to |mac_out|. for (size_t i = num_starting_blocks; i <= num_starting_blocks + kVarianceBlocks; i++) { uint8_t block[MAX_HASH_BLOCK_SIZE]; @@ -427,24 +427,24 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c); uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); - /* If this is the block containing the end of the - * application data, and we are at the offset for the - * 0x80 value, then overwrite b with 0x80. */ + // If this is the block containing the end of the + // application data, and we are at the offset for the + // 0x80 value, then overwrite b with 0x80. b = constant_time_select_8(is_past_c, 0x80, b); - /* If this the the block containing the end of the - * application data and we're past the 0x80 value then - * just write zero. */ + // If this the the block containing the end of the + // application data and we're past the 0x80 value then + // just write zero. b = b & ~is_past_cp1; - /* If this is index_b (the final block), but not - * index_a (the end of the data), then the 64-bit - * length didn't fit into index_a and we're having to - * add an extra block of zeros. */ + // If this is index_b (the final block), but not + // index_a (the end of the data), then the 64-bit + // length didn't fit into index_a and we're having to + // add an extra block of zeros. b &= ~is_block_b | is_block_a; - /* The final bytes of one of the blocks contains the - * length. */ + // The final bytes of one of the blocks contains the + // length. if (j >= md_block_size - md_length_size) { - /* If this is index_b, write a length byte. */ + // If this is index_b, write a length byte. b = constant_time_select_8( is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); } @@ -453,7 +453,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, md_transform(&md_state, block); md_final_raw(&md_state, block); - /* If this is index_b, copy the hash value to |mac_out|. */ + // If this is index_b, copy the hash value to |mac_out|. for (size_t j = 0; j < md_size; j++) { mac_out[j] |= block[j] & is_block_b; } @@ -466,7 +466,7 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, return 0; } - /* Complete the HMAC in the standard manner. */ + // Complete the HMAC in the standard manner. for (size_t i = 0; i < md_block_size; i++) { hmac_pad[i] ^= 0x6a; } diff --git a/crypto/cmac/cmac.c b/crypto/cmac/cmac.c index a9a527d5..fb4e69c7 100644 --- a/crypto/cmac/cmac.c +++ b/crypto/cmac/cmac.c @@ -60,13 +60,13 @@ struct cmac_ctx_st { EVP_CIPHER_CTX cipher_ctx; - /* k1 and k2 are the CMAC subkeys. See - * https://tools.ietf.org/html/rfc4493#section-2.3 */ + // k1 and k2 are the CMAC subkeys. See + // https://tools.ietf.org/html/rfc4493#section-2.3 uint8_t k1[AES_BLOCK_SIZE]; uint8_t k2[AES_BLOCK_SIZE]; - /* Last (possibly partial) scratch */ + // Last (possibly partial) scratch uint8_t block[AES_BLOCK_SIZE]; - /* block_used contains the number of valid bytes in |block|. */ + // block_used contains the number of valid bytes in |block|. unsigned block_used; }; @@ -124,20 +124,20 @@ void CMAC_CTX_free(CMAC_CTX *ctx) { OPENSSL_free(ctx); } -/* binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸) - * with a hard-coded reduction polynomial and sets |out| as x times the - * input. - * - * See https://tools.ietf.org/html/rfc4493#section-2.3 */ +// binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸) +// with a hard-coded reduction polynomial and sets |out| as x times the +// input. +// +// See https://tools.ietf.org/html/rfc4493#section-2.3 static void binary_field_mul_x(uint8_t out[16], const uint8_t in[16]) { unsigned i; - /* Shift |in| to left, including carry. */ + // Shift |in| to left, including carry. for (i = 0; i < 15; i++) { out[i] = (in[i] << 1) | (in[i+1] >> 7); } - /* If MSB set fixup with R. */ + // If MSB set fixup with R. const uint8_t carry = in[0] >> 7; out[i] = (in[i] << 1) ^ ((0 - carry) & 0x87); } @@ -152,7 +152,7 @@ int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len, EVP_CIPHER_key_length(cipher) != key_len || !EVP_EncryptInit_ex(&ctx->cipher_ctx, cipher, NULL, key, kZeroIV) || !EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, AES_BLOCK_SIZE) || - /* Reset context again ready for first data. */ + // Reset context again ready for first data. !EVP_EncryptInit_ex(&ctx->cipher_ctx, NULL, NULL, NULL, kZeroIV)) { return 0; } @@ -183,11 +183,11 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) { in_len -= todo; ctx->block_used += todo; - /* If |in_len| is zero then either |ctx->block_used| is less than - * |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used| - * is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the - * latter case we don't want to process this block now because it might be - * the last block and that block is treated specially. */ + // If |in_len| is zero then either |ctx->block_used| is less than + // |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used| + // is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the + // latter case we don't want to process this block now because it might be + // the last block and that block is treated specially. if (in_len == 0) { return 1; } @@ -199,7 +199,7 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) { } } - /* Encrypt all but one of the remaining blocks. */ + // Encrypt all but one of the remaining blocks. while (in_len > AES_BLOCK_SIZE) { if (!EVP_Cipher(&ctx->cipher_ctx, scratch, in, AES_BLOCK_SIZE)) { return 0; @@ -223,8 +223,8 @@ int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len) { const uint8_t *mask = ctx->k1; if (ctx->block_used != AES_BLOCK_SIZE) { - /* If the last block is incomplete, terminate it with a single 'one' bit - * followed by zeros. */ + // If the last block is incomplete, terminate it with a single 'one' bit + // followed by zeros. ctx->block[ctx->block_used] = 0x80; OPENSSL_memset(ctx->block + ctx->block_used + 1, 0, AES_BLOCK_SIZE - (ctx->block_used + 1)); diff --git a/crypto/conf/conf.c b/crypto/conf/conf.c index 00172f5b..f8ff6136 100644 --- a/crypto/conf/conf.c +++ b/crypto/conf/conf.c @@ -69,8 +69,8 @@ #include "../internal.h" -/* The maximum length we can grow a value to after variable expansion. 64k - * should be more than enough for all reasonable uses. */ +// The maximum length we can grow a value to after variable expansion. 64k +// should be more than enough for all reasonable uses. #define MAX_CONF_VALUE_LENGTH 65536 static uint32_t conf_value_hash(const CONF_VALUE *v) { @@ -263,7 +263,7 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) { } else if (IS_EOF(conf, *from)) { break; } else if (*from == '$') { - /* try to expand it */ + // try to expand it rrp = NULL; s = &(from[1]); if (*s == '{') { @@ -303,14 +303,14 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) { } e++; } - /* So at this point we have - * np which is the start of the name string which is - * '\0' terminated. - * cp which is the start of the section string which is - * '\0' terminated. - * e is the 'next point after'. - * r and rr are the chars replaced by the '\0' - * rp and rrp is where 'r' and 'rr' came from. */ + // So at this point we have + // np which is the start of the name string which is + // '\0' terminated. + // cp which is the start of the section string which is + // '\0' terminated. + // e is the 'next point after'. + // r and rr are the chars replaced by the '\0' + // rp and rrp is where 'r' and 'rr' came from. p = NCONF_get_string(conf, cp, np); if (rrp != NULL) { *rrp = rr; @@ -566,25 +566,25 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) { i--; } } - /* we removed some trailing stuff so there is a new - * line on the end. */ + // we removed some trailing stuff so there is a new + // line on the end. if (ii && i == ii) { - again = 1; /* long line */ + again = 1; // long line } else { p[i] = '\0'; - eline++; /* another input line */ + eline++; // another input line } - /* we now have a line with trailing \r\n removed */ + // we now have a line with trailing \r\n removed - /* i is the number of bytes */ + // i is the number of bytes bufnum += i; v = NULL; - /* check for line continuation */ + // check for line continuation if (bufnum >= 1) { - /* If we have bytes and the last char '\\' and - * second last char is not '\\' */ + // If we have bytes and the last char '\\' and + // second last char is not '\\' p = &(buff->data[bufnum - 1]); if (IS_ESC(conf, p[0]) && ((bufnum <= 1) || !IS_ESC(conf, p[-1]))) { bufnum--; @@ -600,7 +600,7 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) { clear_comments(conf, buf); s = eat_ws(conf, buf); if (IS_EOF(conf, *s)) { - continue; /* blank line */ + continue; // blank line } if (*s == '[') { char *ss; diff --git a/crypto/conf/internal.h b/crypto/conf/internal.h index 03d1a8f3..3e0e57df 100644 --- a/crypto/conf/internal.h +++ b/crypto/conf/internal.h @@ -20,12 +20,12 @@ extern "C" { #endif -/* CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. */ +// CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. CONF_VALUE *CONF_VALUE_new(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H */ +#endif // OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H diff --git a/crypto/cpu-aarch64-linux.c b/crypto/cpu-aarch64-linux.c index 1b0f3955..f9fa6c5c 100644 --- a/crypto/cpu-aarch64-linux.c +++ b/crypto/cpu-aarch64-linux.c @@ -28,8 +28,8 @@ extern uint32_t OPENSSL_armcap_P; void OPENSSL_cpuid_setup(void) { unsigned long hwcap = getauxval(AT_HWCAP); - /* See /usr/include/asm/hwcap.h on an aarch64 installation for the source of - * these values. */ + // See /usr/include/asm/hwcap.h on an aarch64 installation for the source of + // these values. static const unsigned long kNEON = 1 << 1; static const unsigned long kAES = 1 << 3; static const unsigned long kPMULL = 1 << 4; @@ -37,8 +37,8 @@ void OPENSSL_cpuid_setup(void) { static const unsigned long kSHA256 = 1 << 6; if ((hwcap & kNEON) == 0) { - /* Matching OpenSSL, if NEON is missing, don't report other features - * either. */ + // Matching OpenSSL, if NEON is missing, don't report other features + // either. return; } @@ -58,4 +58,4 @@ void OPENSSL_cpuid_setup(void) { } } -#endif /* OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP */ +#endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP diff --git a/crypto/cpu-arm-linux.c b/crypto/cpu-arm-linux.c index 95bb5ee3..a5f1f8ac 100644 --- a/crypto/cpu-arm-linux.c +++ b/crypto/cpu-arm-linux.c @@ -34,15 +34,15 @@ #define HWCAP_NEON (1 << 12) -/* See /usr/include/asm/hwcap.h on an ARM installation for the source of - * these values. */ +// See /usr/include/asm/hwcap.h on an ARM installation for the source of +// these values. #define HWCAP2_AES (1 << 0) #define HWCAP2_PMULL (1 << 1) #define HWCAP2_SHA1 (1 << 2) #define HWCAP2_SHA2 (1 << 3) -/* |getauxval| is not available on Android until API level 20. Link it as a weak - * symbol and use other methods as fallback. */ +// |getauxval| is not available on Android until API level 20. Link it as a weak +// symbol and use other methods as fallback. unsigned long getauxval(unsigned long type) __attribute__((weak)); static int open_eintr(const char *path, int flags) { @@ -61,8 +61,8 @@ static ssize_t read_eintr(int fd, void *out, size_t len) { return ret; } -/* read_full reads exactly |len| bytes from |fd| to |out|. On error or end of - * file, it returns zero. */ +// read_full reads exactly |len| bytes from |fd| to |out|. On error or end of +// file, it returns zero. static int read_full(int fd, void *out, size_t len) { char *outp = out; while (len > 0) { @@ -76,9 +76,9 @@ static int read_full(int fd, void *out, size_t len) { return 1; } -/* read_file opens |path| and reads until end-of-file. On success, it returns - * one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the - * contents. Otherwise, it returns zero. */ +// read_file opens |path| and reads until end-of-file. On success, it returns +// one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the +// contents. Otherwise, it returns zero. static int read_file(char **out_ptr, size_t *out_len, const char *path) { int fd = open_eintr(path, O_RDONLY); if (fd < 0) { @@ -128,7 +128,7 @@ err: return ret; } -/* getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv. */ +// getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv. static unsigned long getauxval_proc(unsigned long type) { int fd = open_eintr("/proc/self/auxv", O_RDONLY); if (fd < 0) { @@ -164,16 +164,16 @@ static int STRING_PIECE_equals(const STRING_PIECE *a, const char *b) { return a->len == b_len && OPENSSL_memcmp(a->data, b, b_len) == 0; } -/* STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found, - * sets |*out_left| and |*out_right| to |in| split before and after it. It - * returns one if |sep| was found and zero otherwise. */ +// STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found, +// sets |*out_left| and |*out_right| to |in| split before and after it. It +// returns one if |sep| was found and zero otherwise. static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right, const STRING_PIECE *in, char sep) { const char *p = OPENSSL_memchr(in->data, sep, in->len); if (p == NULL) { return 0; } - /* |out_left| or |out_right| may alias |in|, so make a copy. */ + // |out_left| or |out_right| may alias |in|, so make a copy. STRING_PIECE in_copy = *in; out_left->data = in_copy.data; out_left->len = p - in_copy.data; @@ -182,7 +182,7 @@ static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right, return 1; } -/* STRING_PIECE_trim removes leading and trailing whitespace from |s|. */ +// STRING_PIECE_trim removes leading and trailing whitespace from |s|. static void STRING_PIECE_trim(STRING_PIECE *s) { while (s->len != 0 && (s->data[0] == ' ' || s->data[0] == '\t')) { s->data++; @@ -194,12 +194,12 @@ static void STRING_PIECE_trim(STRING_PIECE *s) { } } -/* extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from - * |in|. If found, it sets |*out| to the value and returns one. Otherwise, it - * returns zero. */ +// extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from +// |in|. If found, it sets |*out| to the value and returns one. Otherwise, it +// returns zero. static int extract_cpuinfo_field(STRING_PIECE *out, const STRING_PIECE *in, const char *field) { - /* Process |in| one line at a time. */ + // Process |in| one line at a time. STRING_PIECE remaining = *in, line; while (STRING_PIECE_split(&line, &remaining, &remaining, '\n')) { STRING_PIECE key, value; @@ -224,8 +224,8 @@ static int cpuinfo_field_equals(const STRING_PIECE *cpuinfo, const char *field, STRING_PIECE_equals(&extracted, value); } -/* has_list_item treats |list| as a space-separated list of items and returns - * one if |item| is contained in |list| and zero otherwise. */ +// has_list_item treats |list| as a space-separated list of items and returns +// one if |item| is contained in |list| and zero otherwise. static int has_list_item(const STRING_PIECE *list, const char *item) { STRING_PIECE remaining = *list, feature; while (STRING_PIECE_split(&feature, &remaining, &remaining, ' ')) { @@ -238,11 +238,11 @@ static int has_list_item(const STRING_PIECE *list, const char *item) { static unsigned long get_hwcap_cpuinfo(const STRING_PIECE *cpuinfo) { if (cpuinfo_field_equals(cpuinfo, "CPU architecture", "8")) { - /* This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always - * available on ARMv8. Linux omits required features, so reading the - * "Features" line does not work. (For simplicity, use strict equality. We - * assume everything running on future ARM architectures will have a - * working |getauxval|.) */ + // This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always + // available on ARMv8. Linux omits required features, so reading the + // "Features" line does not work. (For simplicity, use strict equality. We + // assume everything running on future ARM architectures will have a + // working |getauxval|.) return HWCAP_NEON; } @@ -276,8 +276,8 @@ static unsigned long get_hwcap2_cpuinfo(const STRING_PIECE *cpuinfo) { return ret; } -/* has_broken_neon returns one if |in| matches a CPU known to have a broken - * NEON unit. See https://crbug.com/341598. */ +// has_broken_neon returns one if |in| matches a CPU known to have a broken +// NEON unit. See https://crbug.com/341598. static int has_broken_neon(const STRING_PIECE *cpuinfo) { return cpuinfo_field_equals(cpuinfo, "CPU implementer", "0x51") && cpuinfo_field_equals(cpuinfo, "CPU architecture", "7") && @@ -300,13 +300,13 @@ void OPENSSL_cpuid_setup(void) { cpuinfo.data = cpuinfo_data; cpuinfo.len = cpuinfo_len; - /* |getauxval| is not available on Android until API level 20. If it is - * unavailable, read from /proc/self/auxv as a fallback. This is unreadable - * on some versions of Android, so further fall back to /proc/cpuinfo. - * - * See - * https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2 - * and b/13679666 (Google-internal) for details. */ + // |getauxval| is not available on Android until API level 20. If it is + // unavailable, read from /proc/self/auxv as a fallback. This is unreadable + // on some versions of Android, so further fall back to /proc/cpuinfo. + // + // See + // https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2 + // and b/13679666 (Google-internal) for details. unsigned long hwcap = 0; if (getauxval != NULL) { hwcap = getauxval(AT_HWCAP); @@ -318,18 +318,18 @@ void OPENSSL_cpuid_setup(void) { hwcap = get_hwcap_cpuinfo(&cpuinfo); } - /* Clear NEON support if known broken. */ + // Clear NEON support if known broken. g_has_broken_neon = has_broken_neon(&cpuinfo); if (g_has_broken_neon) { hwcap &= ~HWCAP_NEON; } - /* Matching OpenSSL, only report other features if NEON is present. */ + // Matching OpenSSL, only report other features if NEON is present. if (hwcap & HWCAP_NEON) { OPENSSL_armcap_P |= ARMV7_NEON; - /* Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to - * /proc/cpuinfo. See https://crbug.com/596156. */ + // Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to + // /proc/cpuinfo. See https://crbug.com/596156. unsigned long hwcap2 = 0; if (getauxval != NULL) { hwcap2 = getauxval(AT_HWCAP2); @@ -357,4 +357,4 @@ void OPENSSL_cpuid_setup(void) { int CRYPTO_has_broken_NEON(void) { return g_has_broken_neon; } -#endif /* OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP */ +#endif // OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP diff --git a/crypto/cpu-intel.c b/crypto/cpu-intel.c index 01000894..127fa57a 100644 --- a/crypto/cpu-intel.c +++ b/crypto/cpu-intel.c @@ -78,9 +78,9 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #include "internal.h" -/* OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX - * is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through - * |*out_edx|. */ +// OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX +// is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through +// |*out_edx|. static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, uint32_t *out_ecx, uint32_t *out_edx, uint32_t leaf) { #if defined(_MSC_VER) @@ -91,8 +91,8 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, *out_ecx = (uint32_t)tmp[2]; *out_edx = (uint32_t)tmp[3]; #elif defined(__pic__) && defined(OPENSSL_32_BIT) - /* Inline assembly may not clobber the PIC register. For 32-bit, this is EBX. - * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. */ + // Inline assembly may not clobber the PIC register. For 32-bit, this is EBX. + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. __asm__ volatile ( "xor %%ecx, %%ecx\n" "mov %%ebx, %%edi\n" @@ -111,8 +111,8 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, #endif } -/* OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR). - * Currently only XCR0 is defined by Intel so |xcr| should always be zero. */ +// OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR). +// Currently only XCR0 is defined by Intel so |xcr| should always be zero. static uint64_t OPENSSL_xgetbv(uint32_t xcr) { #if defined(_MSC_VER) return (uint64_t)_xgetbv(xcr); @@ -123,8 +123,8 @@ static uint64_t OPENSSL_xgetbv(uint32_t xcr) { #endif } -/* handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| - * and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. */ +// handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| +// and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. static void handle_cpu_env(uint32_t *out, const char *in) { const int invert = in[0] == '~'; uint64_t v; @@ -143,7 +143,7 @@ static void handle_cpu_env(uint32_t *out, const char *in) { } void OPENSSL_cpuid_setup(void) { - /* Determine the vendor and maximum input value. */ + // Determine the vendor and maximum input value. uint32_t eax, ebx, ecx, edx; OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0); @@ -158,8 +158,8 @@ void OPENSSL_cpuid_setup(void) { int has_amd_xop = 0; if (is_amd) { - /* AMD-specific logic. - * See http://developer.amd.com/wordpress/media/2012/10/254811.pdf */ + // AMD-specific logic. + // See http://developer.amd.com/wordpress/media/2012/10/254811.pdf OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0x80000000); uint32_t num_extended_ids = eax; if (num_extended_ids >= 0x80000001) { @@ -176,23 +176,23 @@ void OPENSSL_cpuid_setup(void) { extended_features = ebx; } - /* Determine the number of cores sharing an L1 data cache to adjust the - * hyper-threading bit. */ + // Determine the number of cores sharing an L1 data cache to adjust the + // hyper-threading bit. uint32_t cores_per_cache = 0; if (is_amd) { - /* AMD CPUs never share an L1 data cache between threads but do set the HTT - * bit on multi-core CPUs. */ + // AMD CPUs never share an L1 data cache between threads but do set the HTT + // bit on multi-core CPUs. cores_per_cache = 1; } else if (num_ids >= 4) { - /* TODO(davidben): The Intel manual says this CPUID leaf enumerates all - * caches using ECX and doesn't say which is first. Does this matter? */ + // TODO(davidben): The Intel manual says this CPUID leaf enumerates all + // caches using ECX and doesn't say which is first. Does this matter? OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 4); cores_per_cache = 1 + ((eax >> 14) & 0xfff); } OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 1); - /* Adjust the hyper-threading bit. */ + // Adjust the hyper-threading bit. if (edx & (1 << 28)) { uint32_t num_logical_cores = (ebx >> 16) & 0xff; if (cores_per_cache == 1 || num_logical_cores <= 1) { @@ -200,17 +200,17 @@ void OPENSSL_cpuid_setup(void) { } } - /* Reserved bit #20 was historically repurposed to control the in-memory - * representation of RC4 state. Always set it to zero. */ + // Reserved bit #20 was historically repurposed to control the in-memory + // representation of RC4 state. Always set it to zero. edx &= ~(1 << 20); - /* Reserved bit #30 is repurposed to signal an Intel CPU. */ + // Reserved bit #30 is repurposed to signal an Intel CPU. if (is_intel) { edx |= (1 << 30); - /* Clear the XSAVE bit on Knights Landing to mimic Silvermont. This enables - * some Silvermont-specific codepaths which perform better. See OpenSSL - * commit 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. */ + // Clear the XSAVE bit on Knights Landing to mimic Silvermont. This enables + // some Silvermont-specific codepaths which perform better. See OpenSSL + // commit 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. if ((eax & 0x0fff0ff0) == 0x00050670 /* Knights Landing */ || (eax & 0x0fff0ff0) == 0x00080650 /* Knights Mill (per SDE) */) { ecx &= ~(1 << 26); @@ -219,7 +219,7 @@ void OPENSSL_cpuid_setup(void) { edx &= ~(1 << 30); } - /* The SDBG bit is repurposed to denote AMD XOP support. */ + // The SDBG bit is repurposed to denote AMD XOP support. if (has_amd_xop) { ecx |= (1 << 11); } else { @@ -228,31 +228,31 @@ void OPENSSL_cpuid_setup(void) { uint64_t xcr0 = 0; if (ecx & (1 << 27)) { - /* XCR0 may only be queried if the OSXSAVE bit is set. */ + // XCR0 may only be queried if the OSXSAVE bit is set. xcr0 = OPENSSL_xgetbv(0); } - /* See Intel manual, volume 1, section 14.3. */ + // See Intel manual, volume 1, section 14.3. if ((xcr0 & 6) != 6) { - /* YMM registers cannot be used. */ - ecx &= ~(1 << 28); /* AVX */ - ecx &= ~(1 << 12); /* FMA */ - ecx &= ~(1 << 11); /* AMD XOP */ - /* Clear AVX2 and AVX512* bits. - * - * TODO(davidben): Should bits 17 and 26-28 also be cleared? Upstream - * doesn't clear those. */ + // YMM registers cannot be used. + ecx &= ~(1 << 28); // AVX + ecx &= ~(1 << 12); // FMA + ecx &= ~(1 << 11); // AMD XOP + // Clear AVX2 and AVX512* bits. + // + // TODO(davidben): Should bits 17 and 26-28 also be cleared? Upstream + // doesn't clear those. extended_features &= ~((1 << 5) | (1 << 16) | (1 << 21) | (1 << 30) | (1 << 31)); } - /* See Intel manual, volume 1, section 15.2. */ + // See Intel manual, volume 1, section 15.2. if ((xcr0 & 0xe6) != 0xe6) { - /* Clear AVX512F. Note we don't touch other AVX512 extensions because they - * can be used with YMM. */ + // Clear AVX512F. Note we don't touch other AVX512 extensions because they + // can be used with YMM. extended_features &= ~(1 << 16); } - /* Disable ADX instructions on Knights Landing. See OpenSSL commit - * 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. */ + // Disable ADX instructions on Knights Landing. See OpenSSL commit + // 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. if ((ecx & (1 << 26)) == 0) { extended_features &= ~(1 << 19); } @@ -268,15 +268,15 @@ void OPENSSL_cpuid_setup(void) { return; } - /* OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'. - * Each value is a 64-bit, unsigned value which may start with "0x" to - * indicate a hex value. Prior to the 64-bit value, a '~' may be given. - * - * If '~' isn't present, then the value is taken as the result of the CPUID. - * Otherwise the value is inverted and ANDed with the probed CPUID result. - * - * The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2] - * and [3]. */ + // OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'. + // Each value is a 64-bit, unsigned value which may start with "0x" to + // indicate a hex value. Prior to the 64-bit value, a '~' may be given. + // + // If '~' isn't present, then the value is taken as the result of the CPUID. + // Otherwise the value is inverted and ANDed with the probed CPUID result. + // + // The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2] + // and [3]. handle_cpu_env(&OPENSSL_ia32cap_P[0], env1); env2 = strchr(env1, ':'); @@ -285,4 +285,4 @@ void OPENSSL_cpuid_setup(void) { } } -#endif /* !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) */ +#endif // !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) diff --git a/crypto/cpu-ppc64le.c b/crypto/cpu-ppc64le.c index 54571bdb..6cc8aee5 100644 --- a/crypto/cpu-ppc64le.c +++ b/crypto/cpu-ppc64le.c @@ -22,8 +22,8 @@ #if !defined(PPC_FEATURE2_HAS_VCRYPTO) -/* PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER - * ABI for Linux Supplement”. */ +// PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER +// ABI for Linux Supplement”. #define PPC_FEATURE2_HAS_VCRYPTO 0x02000000 #endif @@ -35,4 +35,4 @@ int CRYPTO_is_PPC64LE_vcrypto_capable(void) { return (OPENSSL_ppc64le_hwcap2 & PPC_FEATURE2_HAS_VCRYPTO) != 0; } -#endif /* OPENSSL_PPC64LE */ +#endif // OPENSSL_PPC64LE diff --git a/crypto/crypto.c b/crypto/crypto.c index f74ef66c..aee35210 100644 --- a/crypto/crypto.c +++ b/crypto/crypto.c @@ -23,14 +23,14 @@ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \ defined(OPENSSL_PPC64LE)) -/* x86, x86_64, the ARMs and ppc64le need to record the result of a - * cpuid/getauxval call for the asm to work correctly, unless compiled without - * asm code. */ +// x86, x86_64, the ARMs and ppc64le need to record the result of a +// cpuid/getauxval call for the asm to work correctly, unless compiled without +// asm code. #define NEED_CPUID #else -/* Otherwise, don't emit a static initialiser. */ +// Otherwise, don't emit a static initialiser. #if !defined(BORINGSSL_NO_STATIC_INITIALIZER) #define BORINGSSL_NO_STATIC_INITIALIZER @@ -40,23 +40,23 @@ OPENSSL_ARM || OPENSSL_AARCH64) */ -/* The capability variables are defined in this file in order to work around a - * linker bug. When linking with a .a, if no symbols in a .o are referenced - * then the .o is discarded, even if it has constructor functions. - * - * This still means that any binaries that don't include some functionality - * that tests the capability values will still skip the constructor but, so - * far, the init constructor function only sets the capability variables. */ +// The capability variables are defined in this file in order to work around a +// linker bug. When linking with a .a, if no symbols in a .o are referenced +// then the .o is discarded, even if it has constructor functions. +// +// This still means that any binaries that don't include some functionality +// that tests the capability values will still skip the constructor but, so +// far, the init constructor function only sets the capability variables. #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* This value must be explicitly initialised to zero in order to work around a - * bug in libtool or the linker on OS X. - * - * If not initialised then it becomes a "common symbol". When put into an - * archive, linking on OS X will fail to resolve common symbols. By - * initialising it to zero, it becomes a "data symbol", which isn't so - * affected. */ +// This value must be explicitly initialised to zero in order to work around a +// bug in libtool or the linker on OS X. +// +// If not initialised then it becomes a "common symbol". When put into an +// archive, linking on OS X will fail to resolve common symbols. By +// initialising it to zero, it becomes a "data symbol", which isn't so +// affected. uint32_t OPENSSL_ia32cap_P[4] = {0}; #elif defined(OPENSSL_PPC64LE) @@ -94,8 +94,8 @@ uint32_t OPENSSL_armcap_P = 0; #endif #if defined(BORINGSSL_FIPS) -/* In FIPS mode, the power-on self-test function calls |CRYPTO_library_init| - * because we have to ensure that CPUID detection occurs first. */ +// In FIPS mode, the power-on self-test function calls |CRYPTO_library_init| +// because we have to ensure that CPUID detection occurs first. #define BORINGSSL_NO_STATIC_INITIALIZER #endif @@ -116,21 +116,21 @@ __declspec(allocate(".CRT$XCU")) void(*library_init_constructor)(void) = static void do_library_init(void) __attribute__ ((constructor)); #endif -/* do_library_init is the actual initialization function. If - * BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static - * initializer. Otherwise, it is called by CRYPTO_library_init. */ +// do_library_init is the actual initialization function. If +// BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static +// initializer. Otherwise, it is called by CRYPTO_library_init. static void OPENSSL_CDECL do_library_init(void) { - /* WARNING: this function may only configure the capability variables. See the - * note above about the linker bug. */ + // WARNING: this function may only configure the capability variables. See the + // note above about the linker bug. #if defined(NEED_CPUID) OPENSSL_cpuid_setup(); #endif } void CRYPTO_library_init(void) { - /* TODO(davidben): It would be tidier if this build knob could be replaced - * with an internal lazy-init mechanism that would handle things correctly - * in-library. https://crbug.com/542879 */ + // TODO(davidben): It would be tidier if this build knob could be replaced + // with an internal lazy-init mechanism that would handle things correctly + // in-library. https://crbug.com/542879 #if defined(BORINGSSL_NO_STATIC_INITIALIZER) CRYPTO_once(&once, do_library_init); #endif diff --git a/crypto/curve25519/curve25519.c b/crypto/curve25519/curve25519.c index c91e78ea..e49a8b3f 100644 --- a/crypto/curve25519/curve25519.c +++ b/crypto/curve25519/curve25519.c @@ -12,12 +12,12 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP - * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as - * public domain but this file has the ISC license just to keep licencing - * simple. - * - * The field functions are shared by Ed25519 and X25519 where possible. */ +// This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP +// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as +// public domain but this file has the ISC license just to keep licencing +// simple. +// +// The field functions are shared by Ed25519 and X25519 where possible. #include @@ -55,7 +55,7 @@ static uint64_t load_4(const uint8_t *in) { } static void fe_frombytes(fe h, const uint8_t *s) { - /* Ignores top bit of h. */ + // Ignores top bit of h. int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; int64_t h2 = load_3(s + 7) << 5; @@ -101,28 +101,28 @@ static void fe_frombytes(fe h, const uint8_t *s) { h[9] = h9; } -/* Preconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. - * - * Write p=2^255-19; q=floor(h/p). - * Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). - * - * Proof: - * Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. - * Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4. - * - * Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). - * Then 0> 26; q = (h9 + q) >> 25; - /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */ + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. h0 += 19 * q; - /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */ + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. h1 += h0 >> 26; h0 &= kBottom26Bits; h2 += h1 >> 25; h1 &= kBottom25Bits; @@ -162,12 +162,12 @@ static void fe_tobytes(uint8_t *s, const fe h) { h8 += h7 >> 25; h7 &= kBottom25Bits; h9 += h8 >> 26; h8 &= kBottom26Bits; h9 &= kBottom25Bits; - /* h10 = carry9 */ + // h10 = carry9 - /* Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - * Have h0+...+2^230 h9 between 0 and 2^255-1; - * evidently 2^255 h10-2^255 q = 0. - * Goal: Output h0+...+2^230 h9. */ + // Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h0+...+2^230 h9 between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h0+...+2^230 h9. s[0] = h0 >> 0; s[1] = h0 >> 8; @@ -203,29 +203,29 @@ static void fe_tobytes(uint8_t *s, const fe h) { s[31] = h9 >> 18; } -/* h = f */ +// h = f static void fe_copy(fe h, const fe f) { OPENSSL_memmove(h, f, sizeof(int32_t) * 10); } -/* h = 0 */ +// h = 0 static void fe_0(fe h) { OPENSSL_memset(h, 0, sizeof(int32_t) * 10); } -/* h = 1 */ +// h = 1 static void fe_1(fe h) { OPENSSL_memset(h, 0, sizeof(int32_t) * 10); h[0] = 1; } -/* h = f + g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ +// h = f + g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. static void fe_add(fe h, const fe f, const fe g) { unsigned i; for (i = 0; i < 10; i++) { @@ -233,15 +233,15 @@ static void fe_add(fe h, const fe f, const fe g) { } } -/* h = f - g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ +// h = f - g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. static void fe_sub(fe h, const fe f, const fe g) { unsigned i; for (i = 0; i < 10; i++) { @@ -249,33 +249,33 @@ static void fe_sub(fe h, const fe f, const fe g) { } } -/* h = f * g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * Notes on implementation strategy: - * - * Using schoolbook multiplication. - * Karatsuba would save a little in some cost models. - * - * Most multiplications by 2 and 19 are 32-bit precomputations; - * cheaper than 64-bit postcomputations. - * - * There is one remaining multiplication by 19 in the carry chain; - * one *19 precomputation can be merged into this, - * but the resulting data flow is considerably less clean. - * - * There are 12 carries below. - * 10 of them are 2-way parallelizable and vectorizable. - * Can get away with 11 carries, but then data flow is much deeper. - * - * With tighter constraints on inputs can squeeze carries into int32. */ +// h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. static void fe_mul(fe h, const fe f, const fe g) { int32_t f0 = f[0]; int32_t f1 = f[1]; @@ -297,8 +297,8 @@ static void fe_mul(fe h, const fe f, const fe g) { int32_t g7 = g[7]; int32_t g8 = g[8]; int32_t g9 = g[9]; - int32_t g1_19 = 19 * g1; /* 1.959375*2^29 */ - int32_t g2_19 = 19 * g2; /* 1.959375*2^30; still ok */ + int32_t g1_19 = 19 * g1; // 1.959375*2^29 + int32_t g2_19 = 19 * g2; // 1.959375*2^30; still ok int32_t g3_19 = 19 * g3; int32_t g4_19 = 19 * g4; int32_t g5_19 = 19 * g5; @@ -432,53 +432,53 @@ static void fe_mul(fe h, const fe f, const fe g) { int64_t carry8; int64_t carry9; - /* |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38)) - * i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8 - * |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19)) - * i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 */ + // |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.71*2^59 */ - /* |h5| <= 1.71*2^59 */ + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.71*2^59 + // |h5| <= 1.71*2^59 carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.41*2^60 */ - /* |h6| <= 1.41*2^60 */ + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.41*2^60 + // |h6| <= 1.41*2^60 carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.71*2^59 */ - /* |h7| <= 1.71*2^59 */ + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.71*2^59 + // |h7| <= 1.71*2^59 carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.72*2^34 */ - /* |h8| <= 1.41*2^60 */ + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.72*2^34 + // |h8| <= 1.41*2^60 carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.71*2^59 */ + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.71*2^59 carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.1*2^39 */ + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.1*2^39 carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 h[0] = h0; h[1] = h1; @@ -492,16 +492,16 @@ static void fe_mul(fe h, const fe f, const fe g) { h[9] = h9; } -/* h = f * f - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * See fe_mul.c for discussion of implementation strategy. */ +// h = f * f +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// +// See fe_mul.c for discussion of implementation strategy. static void fe_sq(fe h, const fe f) { int32_t f0 = f[0]; int32_t f1 = f[1]; @@ -521,11 +521,11 @@ static void fe_sq(fe h, const fe f) { int32_t f5_2 = 2 * f5; int32_t f6_2 = 2 * f6; int32_t f7_2 = 2 * f7; - int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */ - int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */ - int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */ - int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */ - int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */ + int32_t f5_38 = 38 * f5; // 1.959375*2^30 + int32_t f6_19 = 19 * f6; // 1.959375*2^30 + int32_t f7_38 = 38 * f7; // 1.959375*2^30 + int32_t f8_19 = 19 * f8; // 1.959375*2^30 + int32_t f9_38 = 38 * f9; // 1.959375*2^30 int64_t f0f0 = f0 * (int64_t) f0; int64_t f0f1_2 = f0_2 * (int64_t) f1; int64_t f0f2_2 = f0_2 * (int64_t) f2; @@ -691,13 +691,13 @@ static void fe_invert(fe out, const fe z) { fe_mul(out, t1, t0); } -/* h = -f - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */ +// h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. static void fe_neg(fe h, const fe f) { unsigned i; for (i = 0; i < 10; i++) { @@ -705,10 +705,10 @@ static void fe_neg(fe h, const fe f) { } } -/* Replace (f,g) with (g,g) if b == 1; - * replace (f,g) with (f,g) if b == 0. - * - * Preconditions: b in {0,1}. */ +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. static void fe_cmov(fe f, const fe g, unsigned b) { b = 0-b; unsigned i; @@ -719,11 +719,11 @@ static void fe_cmov(fe f, const fe g, unsigned b) { } } -/* return 0 if f == 0 - * return 1 if f != 0 - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ +// return 0 if f == 0 +// return 1 if f != 0 +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. static int fe_isnonzero(const fe f) { uint8_t s[32]; fe_tobytes(s, f); @@ -732,27 +732,27 @@ static int fe_isnonzero(const fe f) { return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0; } -/* return 1 if f is in {1,3,5,...,q-2} - * return 0 if f is in {0,2,4,...,q-1} - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ +// return 1 if f is in {1,3,5,...,q-2} +// return 0 if f is in {0,2,4,...,q-1} +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. static int fe_isnegative(const fe f) { uint8_t s[32]; fe_tobytes(s, f); return s[0] & 1; } -/* h = 2 * f * f - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * See fe_mul.c for discussion of implementation strategy. */ +// h = 2 * f * f +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// +// See fe_mul.c for discussion of implementation strategy. static void fe_sq2(fe h, const fe f) { int32_t f0 = f[0]; int32_t f1 = f[1]; @@ -772,11 +772,11 @@ static void fe_sq2(fe h, const fe f) { int32_t f5_2 = 2 * f5; int32_t f6_2 = 2 * f6; int32_t f7_2 = 2 * f7; - int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */ - int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */ - int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */ - int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */ - int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */ + int32_t f5_38 = 38 * f5; // 1.959375*2^30 + int32_t f6_19 = 19 * f6; // 1.959375*2^30 + int32_t f7_38 = 38 * f7; // 1.959375*2^30 + int32_t f8_19 = 19 * f8; // 1.959375*2^30 + int32_t f9_38 = 38 * f9; // 1.959375*2^30 int64_t f0f0 = f0 * (int64_t) f0; int64_t f0f1_2 = f0_2 * (int64_t) f1; int64_t f0f2_2 = f0_2 * (int64_t) f2; @@ -993,24 +993,24 @@ int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s) { fe_1(h->Z); fe_sq(u, h->Y); fe_mul(v, u, d); - fe_sub(u, u, h->Z); /* u = y^2-1 */ - fe_add(v, v, h->Z); /* v = dy^2+1 */ + fe_sub(u, u, h->Z); // u = y^2-1 + fe_add(v, v, h->Z); // v = dy^2+1 fe_sq(v3, v); - fe_mul(v3, v3, v); /* v3 = v^3 */ + fe_mul(v3, v3, v); // v3 = v^3 fe_sq(h->X, v3); fe_mul(h->X, h->X, v); - fe_mul(h->X, h->X, u); /* x = uv^7 */ + fe_mul(h->X, h->X, u); // x = uv^7 - fe_pow22523(h->X, h->X); /* x = (uv^7)^((q-5)/8) */ + fe_pow22523(h->X, h->X); // x = (uv^7)^((q-5)/8) fe_mul(h->X, h->X, v3); - fe_mul(h->X, h->X, u); /* x = uv^3(uv^7)^((q-5)/8) */ + fe_mul(h->X, h->X, u); // x = uv^3(uv^7)^((q-5)/8) fe_sq(vxx, h->X); fe_mul(vxx, vxx, v); - fe_sub(check, vxx, u); /* vx^2-u */ + fe_sub(check, vxx, u); // vx^2-u if (fe_isnonzero(check)) { - fe_add(check, vxx, u); /* vx^2+u */ + fe_add(check, vxx, u); // vx^2+u if (fe_isnonzero(check)) { return -1; } @@ -1051,7 +1051,7 @@ static void ge_precomp_0(ge_precomp *h) { fe_0(h->xy2d); } -/* r = p */ +// r = p static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) { fe_copy(r->X, p->X); fe_copy(r->Y, p->Y); @@ -1061,7 +1061,7 @@ static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) { static const fe d2 = {-21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199}; -/* r = p */ +// r = p void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) { fe_add(r->YplusX, p->Y, p->X); fe_sub(r->YminusX, p->Y, p->X); @@ -1069,14 +1069,14 @@ void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) { fe_mul(r->T2d, p->T, d2); } -/* r = p */ +// r = p void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) { fe_mul(r->X, p->X, p->T); fe_mul(r->Y, p->Y, p->Z); fe_mul(r->Z, p->Z, p->T); } -/* r = p */ +// r = p void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) { fe_mul(r->X, p->X, p->T); fe_mul(r->Y, p->Y, p->Z); @@ -1084,14 +1084,14 @@ void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) { fe_mul(r->T, p->X, p->Y); } -/* r = p */ +// r = p static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) { ge_p3 t; x25519_ge_p1p1_to_p3(&t, p); x25519_ge_p3_to_cached(r, &t); } -/* r = 2 * p */ +// r = 2 * p static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) { fe t0; @@ -1106,14 +1106,14 @@ static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) { fe_sub(r->T, r->T, r->Z); } -/* r = 2 * p */ +// r = 2 * p static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) { ge_p2 q; ge_p3_to_p2(&q, p); ge_p2_dbl(r, &q); } -/* r = p + q */ +// r = p + q static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe t0; @@ -1129,7 +1129,7 @@ static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe_sub(r->T, t0, r->T); } -/* r = p - q */ +// r = p - q static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe t0; @@ -1145,7 +1145,7 @@ static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe_add(r->T, t0, r->T); } -/* r = p + q */ +// r = p + q void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { fe t0; @@ -1162,7 +1162,7 @@ void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { fe_sub(r->T, t0, r->T); } -/* r = p - q */ +// r = p - q void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { fe t0; @@ -1182,10 +1182,10 @@ void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { static uint8_t equal(signed char b, signed char c) { uint8_t ub = b; uint8_t uc = c; - uint8_t x = ub ^ uc; /* 0: yes; 1..255: no */ - uint32_t y = x; /* 0: yes; 1..255: no */ - y -= 1; /* 4294967295: yes; 0..254: no */ - y >>= 31; /* 1: yes; 0: no */ + uint8_t x = ub ^ uc; // 0: yes; 1..255: no + uint32_t y = x; // 0: yes; 1..255: no + y -= 1; // 4294967295: yes; 0..254: no + y >>= 31; // 1: yes; 0: no return y; } @@ -1197,8 +1197,8 @@ static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) { void x25519_ge_scalarmult_small_precomp( ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) { - /* precomp_table is first expanded into matching |ge_precomp| - * elements. */ + // precomp_table is first expanded into matching |ge_precomp| + // elements. ge_precomp multiples[15]; unsigned i; @@ -1215,9 +1215,9 @@ void x25519_ge_scalarmult_small_precomp( fe_mul(out->xy2d, out->xy2d, d2); } - /* See the comment above |k25519SmallPrecomp| about the structure of the - * precomputed elements. This loop does 64 additions and 64 doublings to - * calculate the result. */ + // See the comment above |k25519SmallPrecomp| about the structure of the + // precomputed elements. This loop does 64 additions and 64 doublings to + // calculate the result. ge_p3_0(h); for (i = 63; i < 64; i--) { @@ -1249,14 +1249,14 @@ void x25519_ge_scalarmult_small_precomp( #if defined(OPENSSL_SMALL) -/* This block of code replaces the standard base-point table with a much smaller - * one. The standard table is 30,720 bytes while this one is just 960. - * - * This table contains 15 pairs of group elements, (x, y), where each field - * element is serialised with |fe_tobytes|. If |i| is the index of the group - * element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀ - * is the most significant bit). The value of the group element is then: - * (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. */ +// This block of code replaces the standard base-point table with a much smaller +// one. The standard table is 30,720 bytes while this one is just 960. +// +// This table contains 15 pairs of group elements, (x, y), where each field +// element is serialised with |fe_tobytes|. If |i| is the index of the group +// element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀ +// is the most significant bit). The value of the group element is then: +// (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = { 0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95, 0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0, @@ -1346,7 +1346,7 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) { #else -/* k25519Precomp[i][j] = (j+1)*256^i*B */ +// k25519Precomp[i][j] = (j+1)*256^i*B static const ge_precomp k25519Precomp[32][8] = { { { @@ -3464,7 +3464,7 @@ static const ge_precomp k25519Precomp[32][8] = { static uint8_t negative(signed char b) { uint32_t x = b; - x >>= 31; /* 1: yes; 0: no */ + x >>= 31; // 1: yes; 0: no return x; } @@ -3488,12 +3488,12 @@ static void table_select(ge_precomp *t, int pos, signed char b) { cmov(t, &minust, bnegative); } -/* h = a * B - * where a = a[0]+256*a[1]+...+256^31 a[31] - * B is the Ed25519 base point (x,4/5) with x positive. - * - * Preconditions: - * a[31] <= 127 */ +// h = a * B +// where a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) { signed char e[64]; signed char carry; @@ -3506,8 +3506,8 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) { e[2 * i + 0] = (a[i] >> 0) & 15; e[2 * i + 1] = (a[i] >> 4) & 15; } - /* each e[i] is between 0 and 15 */ - /* e[63] is between 0 and 7 */ + // each e[i] is between 0 and 15 + // e[63] is between 0 and 7 carry = 0; for (i = 0; i < 63; ++i) { @@ -3517,7 +3517,7 @@ void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) { e[i] -= carry << 4; } e[63] += carry; - /* each e[i] is between -8 and 8 */ + // each e[i] is between -8 and 8 ge_p3_0(h); for (i = 1; i < 64; i += 2) { @@ -3551,8 +3551,8 @@ static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) { fe_cmov(t->T2d, u->T2d, b); } -/* r = scalar * A. - * where a = a[0]+256*a[1]+...+256^31 a[31]. */ +// r = scalar * A. +// where a = a[0]+256*a[1]+...+256^31 a[31]. void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) { ge_p2 Ai_p2[8]; ge_cached Ai[16]; @@ -3706,15 +3706,15 @@ static const ge_precomp Bi[8] = { }, }; -/* r = a * A + b * B - * where a = a[0]+256*a[1]+...+256^31 a[31]. - * and b = b[0]+256*b[1]+...+256^31 b[31]. - * B is the Ed25519 base point (x,4/5) with x positive. */ +// r = a * A + b * B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a, const ge_p3 *A, const uint8_t *b) { signed char aslide[256]; signed char bslide[256]; - ge_cached Ai[8]; /* A,3A,5A,7A,9A,11A,13A,15A */ + ge_cached Ai[8]; // A,3A,5A,7A,9A,11A,13A,15A ge_p1p1 t; ge_p3 u; ge_p3 A2; @@ -3779,16 +3779,16 @@ static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a, } } -/* The set of scalars is \Z/l - * where l = 2^252 + 27742317777372353535851937790883648493. */ +// The set of scalars is \Z/l +// where l = 2^252 + 27742317777372353535851937790883648493. -/* Input: - * s[0]+256*s[1]+...+256^63*s[63] = s - * - * Output: - * s[0]+256*s[1]+...+256^31*s[31] = s mod l - * where l = 2^252 + 27742317777372353535851937790883648493. - * Overwrites s in place. */ +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +// Overwrites s in place. void x25519_sc_reduce(uint8_t *s) { int64_t s0 = 2097151 & load_3(s); int64_t s1 = 2097151 & (load_4(s + 2) >> 5); @@ -4122,14 +4122,14 @@ void x25519_sc_reduce(uint8_t *s) { s[31] = s11 >> 17; } -/* Input: - * a[0]+256*a[1]+...+256^31*a[31] = a - * b[0]+256*b[1]+...+256^31*b[31] = b - * c[0]+256*c[1]+...+256^31*c[31] = c - * - * Output: - * s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l - * where l = 2^252 + 27742317777372353535851937790883648493. */ +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b, const uint8_t *c) { int64_t a0 = 2097151 & load_3(a); @@ -4716,10 +4716,10 @@ static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], #else -/* Replace (f,g) with (g,f) if b == 1; - * replace (f,g) with (f,g) if b == 0. - * - * Preconditions: b in {0,1}. */ +// Replace (f,g) with (g,f) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. static void fe_cswap(fe f, fe g, unsigned int b) { b = 0-b; unsigned i; @@ -4731,14 +4731,14 @@ static void fe_cswap(fe f, fe g, unsigned int b) { } } -/* h = f * 121666 - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */ +// h = f * 121666 +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. static void fe_mul121666(fe h, fe f) { int32_t f0 = f[0]; int32_t f1 = f[1]; @@ -4858,25 +4858,25 @@ static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], x25519_scalar_mult_generic(out, scalar, point); } -#endif /* BORINGSSL_X25519_X86_64 */ +#endif // BORINGSSL_X25519_X86_64 void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) { RAND_bytes(out_private_key, 32); - /* All X25519 implementations should decode scalars correctly (see - * https://tools.ietf.org/html/rfc7748#section-5). However, if an - * implementation doesn't then it might interoperate with random keys a - * fraction of the time because they'll, randomly, happen to be correctly - * formed. - * - * Thus we do the opposite of the masking here to make sure that our private - * keys are never correctly masked and so, hopefully, any incorrect - * implementations are deterministically broken. - * - * This does not affect security because, although we're throwing away - * entropy, a valid implementation of scalarmult should throw away the exact - * same bits anyway. */ + // All X25519 implementations should decode scalars correctly (see + // https://tools.ietf.org/html/rfc7748#section-5). However, if an + // implementation doesn't then it might interoperate with random keys a + // fraction of the time because they'll, randomly, happen to be correctly + // formed. + // + // Thus we do the opposite of the masking here to make sure that our private + // keys are never correctly masked and so, hopefully, any incorrect + // implementations are deterministically broken. + // + // This does not affect security because, although we're throwing away + // entropy, a valid implementation of scalarmult should throw away the exact + // same bits anyway. out_private_key[0] |= 7; out_private_key[31] &= 63; out_private_key[31] |= 128; @@ -4888,15 +4888,15 @@ int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], const uint8_t peer_public_value[32]) { static const uint8_t kZeros[32] = {0}; x25519_scalar_mult(out_shared_key, private_key, peer_public_value); - /* The all-zero output results when the input is a point of small order. */ + // The all-zero output results when the input is a point of small order. return CRYPTO_memcmp(kZeros, out_shared_key, 32) != 0; } #if defined(BORINGSSL_X25519_X86_64) -/* When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with - * the Montgomery ladder because it's faster. Otherwise it's done using the - * Ed25519 tables. */ +// When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with +// the Montgomery ladder because it's faster. Otherwise it's done using the +// Ed25519 tables. void X25519_public_from_private(uint8_t out_public_value[32], const uint8_t private_key[32]) { @@ -4925,8 +4925,8 @@ void X25519_public_from_private(uint8_t out_public_value[32], ge_p3 A; x25519_ge_scalarmult_base(&A, e); - /* We only need the u-coordinate of the curve25519 point. The map is - * u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). */ + // We only need the u-coordinate of the curve25519 point. The map is + // u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). fe zplusy, zminusy, zminusy_inv; fe_add(zplusy, A.Z, A.Y); fe_sub(zminusy, A.Z, A.Y); @@ -4935,4 +4935,4 @@ void X25519_public_from_private(uint8_t out_public_value[32], fe_tobytes(out_public_value, zplusy); } -#endif /* BORINGSSL_X25519_X86_64 */ +#endif // BORINGSSL_X25519_X86_64 diff --git a/crypto/curve25519/internal.h b/crypto/curve25519/internal.h index ee865bdd..9487a6c1 100644 --- a/crypto/curve25519/internal.h +++ b/crypto/curve25519/internal.h @@ -32,15 +32,15 @@ void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32], #if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE) #define BORINGSSL_X25519_NEON -/* x25519_NEON is defined in asm/x25519-arm.S. */ +// x25519_NEON is defined in asm/x25519-arm.S. void x25519_NEON(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]); #endif -/* fe means field element. Here the field is \Z/(2^255-19). An element t, - * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 - * t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on - * context. */ +// fe means field element. Here the field is \Z/(2^255-19). An element t, +// entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. typedef int32_t fe[10]; /* ge means group element. @@ -103,7 +103,7 @@ void x25519_sc_reduce(uint8_t *s); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CURVE25519_INTERNAL_H */ +#endif // OPENSSL_HEADER_CURVE25519_INTERNAL_H diff --git a/crypto/curve25519/spake25519.c b/crypto/curve25519/spake25519.c index 5b794b37..8ebedf99 100644 --- a/crypto/curve25519/spake25519.c +++ b/crypto/curve25519/spake25519.c @@ -25,80 +25,82 @@ #include "../internal.h" -/* The following precomputation tables are for the following - * points used in the SPAKE2 protocol. - * - * N: - * x: 49918732221787544735331783592030787422991506689877079631459872391322455579424 - * y: 54629554431565467720832445949441049581317094546788069926228343916274969994000 - * encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778 - * - * M: - * x: 31406539342727633121250288103050113562375374900226415211311216773867585644232 - * y: 21177308356423958466833845032658859666296341766942662650232962324899758529114 - * encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e - * - * These points and their precomputation tables are generated with the - * following Python code. For a description of the precomputation table, - * see curve25519.c in this directory. - * - * Exact copies of the source code are kept in bug 27296743. - * - * import hashlib - * import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py - * - * SEED_N = 'edwards25519 point generation seed (N)' - * SEED_M = 'edwards25519 point generation seed (M)' - * - * def genpoint(seed): - * v = hashlib.sha256(seed).digest() - * it = 1 - * while True: - * try: - * x,y = E.decodepoint(v) - * except Exception, e: - * print e - * it += 1 - * v = hashlib.sha256(v).digest() - * continue - * print "Found in %d iterations:" % it - * print " x = %d" % x - * print " y = %d" % y - * print " Encoded (hex)" - * print E.encodepoint((x,y)).encode('hex') - * return (x,y) - * - * def gentable(P): - * t = [] - * for i in range(1,16): - * k = (i >> 3 & 1) * (1 << 192) + \ - * (i >> 2 & 1) * (1 << 128) + \ - * (i >> 1 & 1) * (1 << 64) + \ - * (i & 1) - * t.append(E.scalarmult(P, k)) - * return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t) - * - * def printtable(table, name): - * print "static const uint8_t %s[15 * 2 * 32] = {" % name, - * for i in range(15 * 2 * 32): - * if i % 12 == 0: - * print "\n ", - * print " 0x%02x," % ord(table[i]), - * print "\n};" - * - * if __name__ == "__main__": - * print "Searching for N" - * N = genpoint(SEED_N) - * print "Generating precomputation table for N" - * Ntable = gentable(N) - * printtable(Ntable, "kSpakeNSmallPrecomp") - * - * print "Searching for M" - * M = genpoint(SEED_M) - * print "Generating precomputation table for M" - * Mtable = gentable(M) - * printtable(Mtable, "kSpakeMSmallPrecomp") - */ +// The following precomputation tables are for the following +// points used in the SPAKE2 protocol. +// +// N: +// x: 49918732221787544735331783592030787422991506689877079631459872391322455579424 +// y: 54629554431565467720832445949441049581317094546788069926228343916274969994000 +// encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778 +// +// M: +// x: 31406539342727633121250288103050113562375374900226415211311216773867585644232 +// y: 21177308356423958466833845032658859666296341766942662650232962324899758529114 +// encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e +// +// These points and their precomputation tables are generated with the +// following Python code. For a description of the precomputation table, +// see curve25519.c in this directory. +// +// Exact copies of the source code are kept in bug 27296743. +// +// import hashlib +// import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py +// +// SEED_N = 'edwards25519 point generation seed (N)' +// SEED_M = 'edwards25519 point generation seed (M)' + +/* +def genpoint(seed): + v = hashlib.sha256(seed).digest() + it = 1 + while True: + try: + x,y = E.decodepoint(v) + except Exception, e: + print e + it += 1 + v = hashlib.sha256(v).digest() + continue + print "Found in %d iterations:" % it + print " x = %d" % x + print " y = %d" % y + print " Encoded (hex)" + print E.encodepoint((x,y)).encode('hex') + return (x,y) + +def gentable(P): + t = [] + for i in range(1,16): + k = (i >> 3 & 1) * (1 << 192) + \ + (i >> 2 & 1) * (1 << 128) + \ + (i >> 1 & 1) * (1 << 64) + \ + (i & 1) + t.append(E.scalarmult(P, k)) + return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t) + +def printtable(table, name): + print "static const uint8_t %s[15 * 2 * 32] = {" % name, + for i in range(15 * 2 * 32): + if i % 12 == 0: + print "\n ", + print " 0x%02x," % ord(table[i]), + print "\n};" + +if __name__ == "__main__": + print "Searching for N" + N = genpoint(SEED_N) + print "Generating precomputation table for N" + Ntable = gentable(N) + printtable(Ntable, "kSpakeNSmallPrecomp") + + print "Searching for M" + M = genpoint(SEED_M) + print "Generating precomputation table for M" + Mtable = gentable(M) + printtable(Mtable, "kSpakeMSmallPrecomp") +*/ + static const uint8_t kSpakeNSmallPrecomp[15 * 2 * 32] = { 0x20, 0x1b, 0xc5, 0xb3, 0x43, 0x17, 0x71, 0x10, 0x44, 0x1e, 0x73, 0xb3, 0xae, 0x3f, 0xbf, 0x9f, 0xf5, 0x44, 0xc8, 0x13, 0x8f, 0xd1, 0x01, 0xc2, @@ -317,8 +319,8 @@ void SPAKE2_CTX_free(SPAKE2_CTX *ctx) { OPENSSL_free(ctx); } -/* left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian - * order. */ +// left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian +// order. static void left_shift_3(uint8_t n[32]) { uint8_t carry = 0; unsigned i; @@ -344,15 +346,15 @@ int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, uint8_t private_tmp[64]; RAND_bytes(private_tmp, sizeof(private_tmp)); x25519_sc_reduce(private_tmp); - /* Multiply by the cofactor (eight) so that we'll clear it when operating on - * the peer's point later in the protocol. */ + // Multiply by the cofactor (eight) so that we'll clear it when operating on + // the peer's point later in the protocol. left_shift_3(private_tmp); OPENSSL_memcpy(ctx->private_key, private_tmp, sizeof(ctx->private_key)); ge_p3 P; x25519_ge_scalarmult_base(&P, ctx->private_key); - /* mask = h(password) * . */ + // mask = h(password) * . uint8_t password_tmp[SHA512_DIGEST_LENGTH]; SHA512(password, password_len, password_tmp); OPENSSL_memcpy(ctx->password_hash, password_tmp, sizeof(ctx->password_hash)); @@ -365,13 +367,13 @@ int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, ? kSpakeMSmallPrecomp : kSpakeNSmallPrecomp); - /* P* = P + mask. */ + // P* = P + mask. ge_cached mask_cached; x25519_ge_p3_to_cached(&mask_cached, &mask); ge_p1p1 Pstar; x25519_ge_add(&Pstar, &P, &mask_cached); - /* Encode P* */ + // Encode P* ge_p2 Pstar_proj; x25519_ge_p1p1_to_p2(&Pstar_proj, &Pstar); x25519_ge_tobytes(ctx->my_msg, &Pstar_proj); @@ -408,11 +410,11 @@ int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len, ge_p3 Qstar; if (0 != x25519_ge_frombytes_vartime(&Qstar, their_msg)) { - /* Point received from peer was not on the curve. */ + // Point received from peer was not on the curve. return 0; } - /* Unmask peer's value. */ + // Unmask peer's value. ge_p3 peers_mask; x25519_ge_scalarmult_small_precomp(&peers_mask, ctx->password_scalar, ctx->my_role == spake2_role_alice diff --git a/crypto/curve25519/spake25519_test.cc b/crypto/curve25519/spake25519_test.cc index 97f17b7a..cdf4ff58 100644 --- a/crypto/curve25519/spake25519_test.cc +++ b/crypto/curve25519/spake25519_test.cc @@ -25,7 +25,7 @@ #include "../internal.h" -/* TODO(agl): add tests with fixed vectors once SPAKE2 is nailed down. */ +// TODO(agl): add tests with fixed vectors once SPAKE2 is nailed down. struct SPAKE2Run { bool Run() { diff --git a/crypto/curve25519/x25519-x86_64.c b/crypto/curve25519/x25519-x86_64.c index 9c3d4144..d677b52e 100644 --- a/crypto/curve25519/x25519-x86_64.c +++ b/crypto/curve25519/x25519-x86_64.c @@ -12,12 +12,12 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP - * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as - * public domain but this file has the ISC license just to keep licencing - * simple. - * - * The field functions are shared by Ed25519 and X25519 where possible. */ +// This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP +// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as +// public domain but this file has the ISC license just to keep licencing +// simple. +// +// The field functions are shared by Ed25519 and X25519 where possible. #include @@ -31,7 +31,7 @@ typedef struct { uint64_t v[5]; } fe25519; -/* These functions are defined in asm/x25519-x86_64.S */ +// These functions are defined in asm/x25519-x86_64.S void x25519_x86_64_work_cswap(fe25519 *, uint64_t); void x25519_x86_64_mul(fe25519 *out, const fe25519 *a, const fe25519 *b); void x25519_x86_64_square(fe25519 *out, const fe25519 *a); @@ -46,7 +46,7 @@ static void fe25519_setint(fe25519 *r, unsigned v) { r->v[4] = 0; } -/* Assumes input x being reduced below 2^255 */ +// Assumes input x being reduced below 2^255 static void fe25519_pack(unsigned char r[32], const fe25519 *x) { fe25519 t; t = *x; @@ -244,4 +244,4 @@ void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32], fe25519_pack(out, &t); } -#endif /* BORINGSSL_X25519_X86_64 */ +#endif // BORINGSSL_X25519_X86_64 diff --git a/crypto/dh/check.c b/crypto/dh/check.c index 55fc1c30..454ad44a 100644 --- a/crypto/dh/check.c +++ b/crypto/dh/check.c @@ -70,7 +70,7 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) { int ok = 0; - /* Check |pub_key| is greater than 1. */ + // Check |pub_key| is greater than 1. BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL || !BN_set_word(tmp, 1)) { @@ -80,7 +80,7 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) { *out_flags |= DH_CHECK_PUBKEY_TOO_SMALL; } - /* Check |pub_key| is less than |dh->p| - 1. */ + // Check |pub_key| is less than |dh->p| - 1. if (!BN_copy(tmp, dh->p) || !BN_sub_word(tmp, 1)) { goto err; @@ -90,9 +90,9 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) { } if (dh->q != NULL) { - /* Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114 - * groups which are not safe primes but pick a generator on a prime-order - * subgroup of size |dh->q|. */ + // Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114 + // groups which are not safe primes but pick a generator on a prime-order + // subgroup of size |dh->q|. if (!BN_mod_exp_mont(tmp, pub_key, dh->q, dh->p, ctx, NULL)) { goto err; } @@ -111,13 +111,12 @@ err: int DH_check(const DH *dh, int *out_flags) { - /* Check that p is a safe prime and if g is 2, 3 or 5, check that it is a - * suitable generator where: - * for 2, p mod 24 == 11 - * for 3, p mod 12 == 5 - * for 5, p mod 10 == 3 or 7 - * should hold. - */ + // Check that p is a safe prime and if g is 2, 3 or 5, check that it is a + // suitable generator where: + // for 2, p mod 24 == 11 + // for 3, p mod 12 == 5 + // for 5, p mod 10 == 3 or 7 + // should hold. int ok = 0, r; BN_CTX *ctx = NULL; BN_ULONG l; @@ -144,7 +143,7 @@ int DH_check(const DH *dh, int *out_flags) { } else if (BN_cmp(dh->g, dh->p) >= 0) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } else { - /* Check g^q == 1 mod p */ + // Check g^q == 1 mod p if (!BN_mod_exp_mont(t1, dh->g, dh->q, dh->p, ctx, NULL)) { goto err; } @@ -159,7 +158,7 @@ int DH_check(const DH *dh, int *out_flags) { if (!r) { *out_flags |= DH_CHECK_Q_NOT_PRIME; } - /* Check p == 1 mod q i.e. q divides p - 1 */ + // Check p == 1 mod q i.e. q divides p - 1 if (!BN_div(t1, t2, dh->p, dh->q, ctx)) { goto err; } diff --git a/crypto/dh/dh.c b/crypto/dh/dh.c index c884ae34..3356776f 100644 --- a/crypto/dh/dh.c +++ b/crypto/dh/dh.c @@ -138,32 +138,30 @@ void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, } int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb) { - /* We generate DH parameters as follows - * find a prime q which is prime_bits/2 bits long. - * p=(2*q)+1 or (p-1)/2 = q - * For this case, g is a generator if - * g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1. - * Since the factors of p-1 are q and 2, we just need to check - * g^2 mod p != 1 and g^q mod p != 1. - * - * Having said all that, - * there is another special case method for the generators 2, 3 and 5. - * for 2, p mod 24 == 11 - * for 3, p mod 12 == 5 <<<<< does not work for safe primes. - * for 5, p mod 10 == 3 or 7 - * - * Thanks to Phil Karn for the pointers about the - * special generators and for answering some of my questions. - * - * I've implemented the second simple method :-). - * Since DH should be using a safe prime (both p and q are prime), - * this generator function can take a very very long time to run. - */ - - /* Actually there is no reason to insist that 'generator' be a generator. - * It's just as OK (and in some sense better) to use a generator of the - * order-q subgroup. - */ + // We generate DH parameters as follows + // find a prime q which is prime_bits/2 bits long. + // p=(2*q)+1 or (p-1)/2 = q + // For this case, g is a generator if + // g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1. + // Since the factors of p-1 are q and 2, we just need to check + // g^2 mod p != 1 and g^q mod p != 1. + // + // Having said all that, + // there is another special case method for the generators 2, 3 and 5. + // for 2, p mod 24 == 11 + // for 3, p mod 12 == 5 <<<<< does not work for safe primes. + // for 5, p mod 10 == 3 or 7 + // + // Thanks to Phil Karn for the pointers about the + // special generators and for answering some of my questions. + // + // I've implemented the second simple method :-). + // Since DH should be using a safe prime (both p and q are prime), + // this generator function can take a very very long time to run. + + // Actually there is no reason to insist that 'generator' be a generator. + // It's just as OK (and in some sense better) to use a generator of the + // order-q subgroup. BIGNUM *t1, *t2; int g, ok = 0; @@ -180,7 +178,7 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c goto err; } - /* Make sure |dh| has the necessary elements */ + // Make sure |dh| has the necessary elements if (dh->p == NULL) { dh->p = BN_new(); if (dh->p == NULL) { @@ -213,14 +211,14 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c if (!BN_set_word(t2, 3)) { goto err; } - /* BN_set_word(t3,7); just have to miss - * out on these ones :-( */ + // BN_set_word(t3,7); just have to miss + // out on these ones :-( g = 5; } else { - /* in the general case, don't worry if 'generator' is a - * generator or not: since we are using safe primes, - * it will generate either an order-q or an order-2q group, - * which both is OK */ + // in the general case, don't worry if 'generator' is a + // generator or not: since we are using safe primes, + // it will generate either an order-q or an order-2q group, + // which both is OK if (!BN_set_word(t1, 2)) { goto err; } @@ -299,7 +297,7 @@ int DH_generate_key(DH *dh) { goto err; } } else { - /* secret exponent length */ + // secret exponent length unsigned priv_bits = dh->priv_length; if (priv_bits == 0) { const unsigned p_bits = BN_num_bits(dh->p); diff --git a/crypto/dh/dh_asn1.c b/crypto/dh/dh_asn1.c index 1a147eea..9d321807 100644 --- a/crypto/dh/dh_asn1.c +++ b/crypto/dh/dh_asn1.c @@ -76,7 +76,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) { static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* A DH object may be missing some components. */ + // A DH object may be missing some components. OPENSSL_PUT_ERROR(DH, ERR_R_PASSED_NULL_PARAMETER); return 0; } diff --git a/crypto/digest_extra/digest_extra.c b/crypto/digest_extra/digest_extra.c index b18759af..ab7ff593 100644 --- a/crypto/digest_extra/digest_extra.c +++ b/crypto/digest_extra/digest_extra.c @@ -82,11 +82,11 @@ static const struct nid_to_digest nid_to_digest_mapping[] = { {NID_sha384, EVP_sha384, SN_sha384, LN_sha384}, {NID_sha512, EVP_sha512, SN_sha512, LN_sha512}, {NID_md5_sha1, EVP_md5_sha1, SN_md5_sha1, LN_md5_sha1}, - /* As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding - * hash function when given a signature OID. To avoid unintended lax parsing - * of hash OIDs, this is no longer supported for lookup by OID or NID. - * Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to - * consumers so we retain it there. */ + // As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding + // hash function when given a signature OID. To avoid unintended lax parsing + // of hash OIDs, this is no longer supported for lookup by OID or NID. + // Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to + // consumers so we retain it there. {NID_undef, EVP_sha1, SN_dsaWithSHA, LN_dsaWithSHA}, {NID_undef, EVP_sha1, SN_dsaWithSHA1, LN_dsaWithSHA1}, {NID_undef, EVP_sha1, SN_ecdsa_with_SHA1, NULL}, @@ -104,7 +104,7 @@ static const struct nid_to_digest nid_to_digest_mapping[] = { const EVP_MD* EVP_get_digestbynid(int nid) { if (nid == NID_undef) { - /* Skip the |NID_undef| entries in |nid_to_digest_mapping|. */ + // Skip the |NID_undef| entries in |nid_to_digest_mapping|. return NULL; } @@ -122,19 +122,19 @@ static const struct { uint8_t oid_len; const EVP_MD *(*md_func) (void); } kMDOIDs[] = { - /* 1.2.840.113549.2.4 */ + // 1.2.840.113549.2.4 { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04}, 8, EVP_md4 }, - /* 1.2.840.113549.2.5 */ + // 1.2.840.113549.2.5 { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05}, 8, EVP_md5 }, - /* 1.3.14.3.2.26 */ + // 1.3.14.3.2.26 { {0x2b, 0x0e, 0x03, 0x02, 0x1a}, 5, EVP_sha1 }, - /* 2.16.840.1.101.3.4.2.1 */ + // 2.16.840.1.101.3.4.2.1 { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}, 9, EVP_sha256 }, - /* 2.16.840.1.101.3.4.2.2 */ + // 2.16.840.1.101.3.4.2.2 { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}, 9, EVP_sha384 }, - /* 2.16.840.1.101.3.4.2.3 */ + // 2.16.840.1.101.3.4.2.3 { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03}, 9, EVP_sha512 }, - /* 2.16.840.1.101.3.4.2.4 */ + // 2.16.840.1.101.3.4.2.4 { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04}, 9, EVP_sha224 }, }; @@ -151,7 +151,7 @@ static const EVP_MD *cbs_to_md(const CBS *cbs) { } const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) { - /* Handle objects with no corresponding OID. */ + // Handle objects with no corresponding OID. if (obj->nid != NID_undef) { return EVP_get_digestbynid(obj->nid); } @@ -175,10 +175,10 @@ const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs) { return NULL; } - /* The parameters, if present, must be NULL. Historically, whether the NULL - * was included or omitted was not well-specified. When parsing an - * AlgorithmIdentifier, we allow both. (Note this code is not used when - * verifying RSASSA-PKCS1-v1_5 signatures.) */ + // The parameters, if present, must be NULL. Historically, whether the NULL + // was included or omitted was not well-specified. When parsing an + // AlgorithmIdentifier, we allow both. (Note this code is not used when + // verifying RSASSA-PKCS1-v1_5 signatures.) if (CBS_len(&algorithm) > 0) { CBS param; if (!CBS_get_asn1(&algorithm, ¶m, CBS_ASN1_NULL) || diff --git a/crypto/digest_extra/internal.h b/crypto/digest_extra/internal.h index 264405f4..1df200ec 100644 --- a/crypto/digest_extra/internal.h +++ b/crypto/digest_extra/internal.h @@ -26,7 +26,7 @@ const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DIGEST_EXTRA_INTERNAL */ +#endif // OPENSSL_HEADER_DIGEST_EXTRA_INTERNAL diff --git a/crypto/dsa/dsa.c b/crypto/dsa/dsa.c index d445f148..1dfc567f 100644 --- a/crypto/dsa/dsa.c +++ b/crypto/dsa/dsa.c @@ -78,8 +78,8 @@ #define OPENSSL_DSA_MAX_MODULUS_BITS 10000 -/* Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of - * Rabin-Miller */ +// Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of +// Rabin-Miller #define DSS_prime_checks 50 static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; @@ -186,7 +186,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, return 0; } if (seed_len > (size_t)qsize) { - /* Only consume as much seed as is expected. */ + // Only consume as much seed as is expected. seed_len = qsize; } OPENSSL_memcpy(seed, seed_in, seed_len); @@ -217,9 +217,9 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } for (;;) { - /* Find q. */ + // Find q. for (;;) { - /* step 1 */ + // step 1 if (!BN_GENCB_call(cb, 0, m++)) { goto err; } @@ -230,12 +230,12 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } } else { - /* If we come back through, use random seed next time. */ + // If we come back through, use random seed next time. seed_in = NULL; } OPENSSL_memcpy(buf, seed, qsize); OPENSSL_memcpy(buf2, seed, qsize); - /* precompute "SEED + 1" for step 7: */ + // precompute "SEED + 1" for step 7: for (i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { @@ -243,7 +243,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } } - /* step 2 */ + // step 2 if (!EVP_Digest(seed, qsize, md, NULL, evpmd, NULL) || !EVP_Digest(buf, qsize, buf2, NULL, evpmd, NULL)) { goto err; @@ -252,14 +252,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, md[i] ^= buf2[i]; } - /* step 3 */ + // step 3 md[0] |= 0x80; md[qsize - 1] |= 0x01; if (!BN_bin2bn(md, qsize, q)) { goto err; } - /* step 4 */ + // step 4 r = BN_is_prime_fasttest_ex(q, DSS_prime_checks, ctx, use_random_seed, cb); if (r > 0) { break; @@ -268,17 +268,17 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* do a callback call */ - /* step 5 */ + // do a callback call + // step 5 } if (!BN_GENCB_call(cb, 2, 0) || !BN_GENCB_call(cb, 3, 0)) { goto err; } - /* step 6 */ + // step 6 counter = 0; - /* "offset = 2" */ + // "offset = 2" n = (bits - 1) / 160; @@ -287,11 +287,11 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 7 */ + // step 7 BN_zero(W); - /* now 'buf' contains "SEED + offset - 1" */ + // now 'buf' contains "SEED + offset - 1" for (k = 0; k <= n; k++) { - /* obtain "SEED + offset + k" by incrementing: */ + // obtain "SEED + offset + k" by incrementing: for (i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { @@ -303,7 +303,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 8 */ + // step 8 if (!BN_bin2bn(md, qsize, r0) || !BN_lshift(r0, r0, (qsize << 3) * k) || !BN_add(W, W, r0)) { @@ -311,14 +311,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } } - /* more of step 8 */ + // more of step 8 if (!BN_mask_bits(W, bits - 1) || !BN_copy(X, W) || !BN_add(X, X, test)) { goto err; } - /* step 9 */ + // step 9 if (!BN_lshift1(r0, q) || !BN_mod(c, X, r0, ctx) || !BN_sub(r0, c, BN_value_one()) || @@ -326,23 +326,23 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 10 */ + // step 10 if (BN_cmp(p, test) >= 0) { - /* step 11 */ + // step 11 r = BN_is_prime_fasttest_ex(p, DSS_prime_checks, ctx, 1, cb); if (r > 0) { - goto end; /* found it */ + goto end; // found it } if (r != 0) { goto err; } } - /* step 13 */ + // step 13 counter++; - /* "offset = offset + n + 1" */ + // "offset = offset + n + 1" - /* step 14 */ + // step 14 if (counter >= 4096) { break; } @@ -353,8 +353,8 @@ end: goto err; } - /* We now need to generate g */ - /* Set r0=(p-1)/q */ + // We now need to generate g + // Set r0=(p-1)/q if (!BN_sub(test, p, BN_value_one()) || !BN_div(r0, NULL, test, q, ctx)) { goto err; @@ -366,7 +366,7 @@ end: } for (;;) { - /* g=test^r0%p */ + // g=test^r0%p if (!BN_mod_exp_mont(g, test, r0, p, ctx, mont)) { goto err; } @@ -544,9 +544,9 @@ redo: } if (digest_len > BN_num_bytes(dsa->q)) { - /* if the digest length is greater than the size of q use the - * BN_num_bits(dsa->q) leftmost bits of the digest, see - * fips 186-3, 4.2 */ + // if the digest length is greater than the size of q use the + // BN_num_bits(dsa->q) leftmost bits of the digest, see + // fips 186-3, 4.2 digest_len = BN_num_bytes(dsa->q); } @@ -554,12 +554,12 @@ redo: goto err; } - /* Compute s = inv(k) (m + xr) mod q */ + // Compute s = inv(k) (m + xr) mod q if (!BN_mod_mul(&xr, dsa->priv_key, r, dsa->q, ctx)) { - goto err; /* s = xr */ + goto err; // s = xr } if (!BN_add(s, &xr, &m)) { - goto err; /* s = m + xr */ + goto err; // s = m + xr } if (BN_cmp(s, dsa->q) > 0) { if (!BN_sub(s, s, dsa->q)) { @@ -570,8 +570,8 @@ redo: goto err; } - /* Redo if r or s is zero as required by FIPS 186-3: this is - * very unlikely. */ + // Redo if r or s is zero as required by FIPS 186-3: this is + // very unlikely. if (BN_is_zero(r) || BN_is_zero(s)) { if (noredo) { reason = DSA_R_NEED_NEW_SETUP_VALUES; @@ -624,7 +624,7 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, } i = BN_num_bits(dsa->q); - /* fips 186-3 allows only different sizes for q */ + // fips 186-3 allows only different sizes for q if (i != 160 && i != 224 && i != 256) { OPENSSL_PUT_ERROR(DSA, DSA_R_BAD_Q_VALUE); return 0; @@ -655,17 +655,17 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* Calculate W = inv(S) mod Q - * save W in u2 */ + // Calculate W = inv(S) mod Q + // save W in u2 if (BN_mod_inverse(&u2, sig->s, dsa->q, ctx) == NULL) { goto err; } - /* save M in u1 */ + // save M in u1 if (digest_len > (i >> 3)) { - /* if the digest length is greater than the size of q use the - * BN_num_bits(dsa->q) leftmost bits of the digest, see - * fips 186-3, 4.2 */ + // if the digest length is greater than the size of q use the + // BN_num_bits(dsa->q) leftmost bits of the digest, see + // fips 186-3, 4.2 digest_len = (i >> 3); } @@ -673,12 +673,12 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* u1 = M * w mod q */ + // u1 = M * w mod q if (!BN_mod_mul(&u1, &u1, &u2, dsa->q, ctx)) { goto err; } - /* u2 = r * w mod q */ + // u2 = r * w mod q if (!BN_mod_mul(&u2, sig->r, &u2, dsa->q, ctx)) { goto err; } @@ -694,14 +694,14 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* BN_copy(&u1,&t1); */ - /* let u1 = u1 mod q */ + // BN_copy(&u1,&t1); + // let u1 = u1 mod q if (!BN_mod(&u1, &t1, dsa->q, ctx)) { goto err; } - /* V is now in u1. If the signature is correct, it will be - * equal to R. */ + // V is now in u1. If the signature is correct, it will be + // equal to R. *out_valid = BN_ucmp(&u1, sig->r) == 0; ret = 1; @@ -758,7 +758,7 @@ int DSA_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* Ensure that the signature uses DER and doesn't have trailing garbage. */ + // Ensure that the signature uses DER and doesn't have trailing garbage. int der_len = i2d_DSA_SIG(s, &der); if (der_len < 0 || (size_t)der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len)) { @@ -773,8 +773,8 @@ err: return ret; } -/* der_len_len returns the number of bytes needed to represent a length of |len| - * in DER. */ +// der_len_len returns the number of bytes needed to represent a length of |len| +// in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; @@ -789,18 +789,18 @@ static size_t der_len_len(size_t len) { int DSA_size(const DSA *dsa) { size_t order_len = BN_num_bytes(dsa->q); - /* Compute the maximum length of an |order_len| byte integer. Defensively - * assume that the leading 0x00 is included. */ + // Compute the maximum length of an |order_len| byte integer. Defensively + // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } - /* A DSA signature is two INTEGERs. */ + // A DSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } - /* Add the header. */ + // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; @@ -835,7 +835,7 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Get random k */ + // Get random k if (!BN_rand_range_ex(&k, 1, dsa->q)) { goto err; } @@ -849,16 +849,16 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Compute r = (g^k mod p) mod q */ + // Compute r = (g^k mod p) mod q if (!BN_copy(&kq, &k)) { goto err; } - /* We do not want timing information to leak the length of k, - * so we compute g^k using an equivalent exponent of fixed length. - * - * (This is a kludge that we need because the BN_mod_exp_mont() - * does not let us specify the desired timing behaviour.) */ + // We do not want timing information to leak the length of k, + // so we compute g^k using an equivalent exponent of fixed length. + // + // (This is a kludge that we need because the BN_mod_exp_mont() + // does not let us specify the desired timing behaviour.) if (!BN_add(&kq, &kq, dsa->q)) { goto err; @@ -875,8 +875,8 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little - * Theorem. */ + // Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little + // Theorem. kinv = BN_new(); if (kinv == NULL || !bn_mod_inverse_prime(kinv, &k, dsa->q, ctx, dsa->method_mont_q)) { diff --git a/crypto/dsa/dsa_asn1.c b/crypto/dsa/dsa_asn1.c index ff5ee003..97fd07fe 100644 --- a/crypto/dsa/dsa_asn1.c +++ b/crypto/dsa/dsa_asn1.c @@ -75,7 +75,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) { static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* A DSA object may be missing some components. */ + // A DSA object may be missing some components. OPENSSL_PUT_ERROR(DSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } diff --git a/crypto/dsa/dsa_test.cc b/crypto/dsa/dsa_test.cc index 63b78036..295a7fd1 100644 --- a/crypto/dsa/dsa_test.cc +++ b/crypto/dsa/dsa_test.cc @@ -71,8 +71,8 @@ #include "../internal.h" -/* The following values are taken from the updated Appendix 5 to FIPS PUB 186 - * and also appear in Appendix 5 to FIPS PUB 186-1. */ +// The following values are taken from the updated Appendix 5 to FIPS PUB 186 +// and also appear in Appendix 5 to FIPS PUB 186-1. static const uint8_t seed[20] = { 0xd5, 0x01, 0x4e, 0x4b, 0x60, 0xef, 0x2b, 0xa8, 0xb6, 0x21, 0x1b, @@ -121,7 +121,7 @@ static const uint8_t fips_digest[] = { 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d, }; -/* fips_sig is a DER-encoded version of the r and s values in FIPS PUB 186-1. */ +// fips_sig is a DER-encoded version of the r and s values in FIPS PUB 186-1. static const uint8_t fips_sig[] = { 0x30, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92, @@ -130,7 +130,7 @@ static const uint8_t fips_sig[] = { 0xdc, 0xd8, 0xc8, }; -/* fips_sig_negative is fips_sig with r encoded as a negative number. */ +// fips_sig_negative is fips_sig with r encoded as a negative number. static const uint8_t fips_sig_negative[] = { 0x30, 0x2c, 0x02, 0x14, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92, 0xb3, @@ -139,7 +139,7 @@ static const uint8_t fips_sig_negative[] = { 0xd8, 0xc8, }; -/* fip_sig_extra is fips_sig with trailing data. */ +// fip_sig_extra is fips_sig with trailing data. static const uint8_t fips_sig_extra[] = { 0x30, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92, @@ -148,7 +148,7 @@ static const uint8_t fips_sig_extra[] = { 0xdc, 0xd8, 0xc8, 0x00, }; -/* fips_sig_lengths is fips_sig with a non-minimally encoded length. */ +// fips_sig_lengths is fips_sig with a non-minimally encoded length. static const uint8_t fips_sig_bad_length[] = { 0x30, 0x81, 0x2d, 0x02, 0x15, 0x00, 0x8b, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, @@ -157,7 +157,7 @@ static const uint8_t fips_sig_bad_length[] = { 0xb6, 0xdc, 0xd8, 0xc8, 0x00, }; -/* fips_sig_bad_r is fips_sig with a bad r value. */ +// fips_sig_bad_r is fips_sig with a bad r value. static const uint8_t fips_sig_bad_r[] = { 0x30, 0x2d, 0x02, 0x15, 0x00, 0x8c, 0xac, 0x1a, 0xb6, 0x64, 0x10, 0x43, 0x5c, 0xb7, 0x18, 0x1f, 0x95, 0xb1, 0x6a, 0xb9, 0x7c, 0x92, @@ -299,7 +299,7 @@ static bool TestVerify(const uint8_t *sig, size_t sig_len, int expect) { return false; } - /* Clear any errors from a test with expected failure. */ + // Clear any errors from a test with expected failure. ERR_clear_error(); return true; } diff --git a/crypto/ec_extra/ec_asn1.c b/crypto/ec_extra/ec_asn1.c index 0772506b..dc710a86 100644 --- a/crypto/ec_extra/ec_asn1.c +++ b/crypto/ec_extra/ec_asn1.c @@ -83,14 +83,14 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { return NULL; } - /* Parse the optional parameters field. */ + // Parse the optional parameters field. EC_GROUP *inner_group = NULL; EC_KEY *ret = NULL; if (CBS_peek_asn1_tag(&ec_private_key, kParametersTag)) { - /* Per SEC 1, as an alternative to omitting it, one is allowed to specify - * this field and put in a NULL to mean inheriting this value. This was - * omitted in a previous version of this logic without problems, so leave it - * unimplemented. */ + // Per SEC 1, as an alternative to omitting it, one is allowed to specify + // this field and put in a NULL to mean inheriting this value. This was + // omitted in a previous version of this logic without problems, so leave it + // unimplemented. CBS child; if (!CBS_get_asn1(&ec_private_key, &child, kParametersTag)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); @@ -103,7 +103,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { if (group == NULL) { group = inner_group; } else if (EC_GROUP_cmp(group, inner_group, NULL) != 0) { - /* If a group was supplied externally, it must match. */ + // If a group was supplied externally, it must match. OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); goto err; } @@ -123,9 +123,9 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Although RFC 5915 specifies the length of the key, OpenSSL historically - * got this wrong, so accept any length. See upstream's - * 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. */ + // Although RFC 5915 specifies the length of the key, OpenSSL historically + // got this wrong, so accept any length. See upstream's + // 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. ret->priv_key = BN_bin2bn(CBS_data(&private_key), CBS_len(&private_key), NULL); ret->pub_key = EC_POINT_new(group); @@ -143,12 +143,12 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { uint8_t padding; if (!CBS_get_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBS_get_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || - /* As in a SubjectPublicKeyInfo, the byte-encoded public key is then - * encoded as a BIT STRING with bits ordered as in the DER encoding. */ + // As in a SubjectPublicKeyInfo, the byte-encoded public key is then + // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBS_get_u8(&public_key, &padding) || padding != 0 || - /* Explicitly check |public_key| is non-empty to save the conversion - * form later. */ + // Explicitly check |public_key| is non-empty to save the conversion + // form later. CBS_len(&public_key) == 0 || !EC_POINT_oct2point(group, ret->pub_key, CBS_data(&public_key), CBS_len(&public_key), NULL) || @@ -157,17 +157,17 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Save the point conversion form. - * TODO(davidben): Consider removing this. */ + // Save the point conversion form. + // TODO(davidben): Consider removing this. ret->conv_form = (point_conversion_form_t)(CBS_data(&public_key)[0] & ~0x01); } else { - /* Compute the public key instead. */ + // Compute the public key instead. if (!EC_POINT_mul(group, ret->pub_key, ret->priv_key, NULL, NULL, NULL)) { goto err; } - /* Remember the original private-key-only encoding. - * TODO(davidben): Consider removing this. */ + // Remember the original private-key-only encoding. + // TODO(davidben): Consider removing this. ret->enc_flag |= EC_PKEY_NO_PUBKEY; } @@ -176,7 +176,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Ensure the resulting key is valid. */ + // Ensure the resulting key is valid. if (!EC_KEY_check_key(ret)) { goto err; } @@ -218,13 +218,13 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, } } - /* TODO(fork): replace this flexibility with sensible default? */ + // TODO(fork): replace this flexibility with sensible default? if (!(enc_flags & EC_PKEY_NO_PUBKEY) && key->pub_key != NULL) { CBB child, public_key; if (!CBB_add_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBB_add_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || - /* As in a SubjectPublicKeyInfo, the byte-encoded public key is then - * encoded as a BIT STRING with bits ordered as in the DER encoding. */ + // As in a SubjectPublicKeyInfo, the byte-encoded public key is then + // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBB_add_u8(&public_key, 0 /* padding */) || !EC_POINT_point2cbb(&public_key, key->group, key->pub_key, key->conv_form, NULL) || @@ -242,8 +242,8 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, return 1; } -/* is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and - * zero otherwise. */ +// is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and +// zero otherwise. static int is_unsigned_integer(const CBS *cbs) { if (CBS_len(cbs) == 0) { return 0; @@ -251,20 +251,20 @@ static int is_unsigned_integer(const CBS *cbs) { uint8_t byte = CBS_data(cbs)[0]; if ((byte & 0x80) || (byte == 0 && CBS_len(cbs) > 1 && (CBS_data(cbs)[1] & 0x80) == 0)) { - /* Negative or not minimally-encoded. */ + // Negative or not minimally-encoded. return 0; } return 1; } -/* kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. */ +// kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. static const uint8_t kPrimeField[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x01}; static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, CBS *out_b, CBS *out_base_x, CBS *out_base_y, CBS *out_order) { - /* See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an - * ECParameters while RFC 5480 calls it a SpecifiedECDomain. */ + // See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an + // ECParameters while RFC 5480 calls it a SpecifiedECDomain. CBS params, field_id, field_type, curve, base; uint64_t version; if (!CBS_get_asn1(in, ¶ms, CBS_ASN1_SEQUENCE) || @@ -280,7 +280,7 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, !CBS_get_asn1(¶ms, &curve, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&curve, out_a, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&curve, out_b, CBS_ASN1_OCTETSTRING) || - /* |curve| has an optional BIT STRING seed which we ignore. */ + // |curve| has an optional BIT STRING seed which we ignore. !CBS_get_asn1(¶ms, &base, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(¶ms, out_order, CBS_ASN1_INTEGER) || !is_unsigned_integer(out_order)) { @@ -288,11 +288,11 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, return 0; } - /* |params| has an optional cofactor which we ignore. With the optional seed - * in |curve|, a group already has arbitrarily many encodings. Parse enough to - * uniquely determine the curve. */ + // |params| has an optional cofactor which we ignore. With the optional seed + // in |curve|, a group already has arbitrarily many encodings. Parse enough to + // uniquely determine the curve. - /* Require that the base point use uncompressed form. */ + // Require that the base point use uncompressed form. uint8_t form; if (!CBS_get_u8(&base, &form) || form != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FORM); @@ -310,10 +310,10 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, return 1; } -/* integers_equal returns one if |a| and |b| are equal, up to leading zeros, and - * zero otherwise. */ +// integers_equal returns one if |a| and |b| are equal, up to leading zeros, and +// zero otherwise. static int integers_equal(const CBS *a, const uint8_t *b, size_t b_len) { - /* Remove leading zeros from |a| and |b|. */ + // Remove leading zeros from |a| and |b|. CBS a_copy = *a; while (CBS_len(&a_copy) > 0 && CBS_data(&a_copy)[0] == 0) { CBS_skip(&a_copy, 1); @@ -332,7 +332,7 @@ EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs) { return NULL; } - /* Look for a matching curve. */ + // Look for a matching curve. const struct built_in_curves *const curves = OPENSSL_built_in_curves(); for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { const struct built_in_curve *curve = &curves->curves[i]; @@ -374,26 +374,26 @@ EC_GROUP *EC_KEY_parse_parameters(CBS *cbs) { return EC_KEY_parse_curve_name(cbs); } - /* OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions - * of named curves. - * - * TODO(davidben): Remove support for this. */ + // OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions + // of named curves. + // + // TODO(davidben): Remove support for this. CBS prime, a, b, base_x, base_y, order; if (!parse_explicit_prime_curve(cbs, &prime, &a, &b, &base_x, &base_y, &order)) { return NULL; } - /* Look for a matching prime curve. */ + // Look for a matching prime curve. const struct built_in_curves *const curves = OPENSSL_built_in_curves(); for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { const struct built_in_curve *curve = &curves->curves[i]; const unsigned param_len = curve->param_len; - /* |curve->params| is ordered p, a, b, x, y, order, each component - * zero-padded up to the field length. Although SEC 1 states that the - * Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes - * |a| and |b|, so this comparison must allow omitting leading zeros. (This - * is relevant for P-521 whose |b| has a leading 0.) */ + // |curve->params| is ordered p, a, b, x, y, order, each component + // zero-padded up to the field length. Although SEC 1 states that the + // Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes + // |a| and |b|, so this comparison must allow omitting leading zeros. (This + // is relevant for P-521 whose |b| has a leading 0.) if (integers_equal(&prime, curve->params, param_len) && integers_equal(&a, curve->params + param_len, param_len) && integers_equal(&b, curve->params + param_len * 2, param_len) && @@ -420,8 +420,8 @@ int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point, } EC_KEY *d2i_ECPrivateKey(EC_KEY **out, const uint8_t **inp, long len) { - /* This function treats its |out| parameter differently from other |d2i| - * functions. If supplied, take the group from |*out|. */ + // This function treats its |out| parameter differently from other |d2i| + // functions. If supplied, take the group from |*out|. const EC_GROUP *group = NULL; if (out != NULL && *out != NULL) { group = EC_KEY_get0_group(*out); @@ -515,7 +515,7 @@ EC_KEY *o2i_ECPublicKey(EC_KEY **keyp, const uint8_t **inp, long len) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); return NULL; } - /* save the point conversion form */ + // save the point conversion form ret->conv_form = (point_conversion_form_t)(*inp[0] & ~0x01); *inp += len; return ret; @@ -534,7 +534,7 @@ int i2o_ECPublicKey(const EC_KEY *key, uint8_t **outp) { 0, NULL); if (outp == NULL || buf_len == 0) { - /* out == NULL => just return the length of the octet string */ + // out == NULL => just return the length of the octet string return buf_len; } diff --git a/crypto/ecdh/ecdh.c b/crypto/ecdh/ecdh.c index 22b216ef..f38de2f1 100644 --- a/crypto/ecdh/ecdh.c +++ b/crypto/ecdh/ecdh.c @@ -138,7 +138,7 @@ int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, goto err; } } else { - /* no KDF, just copy as much as we can */ + // no KDF, just copy as much as we can if (buflen < outlen) { outlen = buflen; } diff --git a/crypto/ecdsa_extra/ecdsa_asn1.c b/crypto/ecdsa_extra/ecdsa_asn1.c index 5d827dc8..8d0bc41f 100644 --- a/crypto/ecdsa_extra/ecdsa_asn1.c +++ b/crypto/ecdsa_extra/ecdsa_asn1.c @@ -120,17 +120,17 @@ int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, int ret = 0; uint8_t *der = NULL; - /* Decode the ECDSA signature. */ + // Decode the ECDSA signature. s = ECDSA_SIG_from_bytes(sig, sig_len); if (s == NULL) { goto err; } - /* Defend against potential laxness in the DER parser. */ + // Defend against potential laxness in the DER parser. size_t der_len; if (!ECDSA_SIG_to_bytes(&der, &der_len, s) || der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len) != 0) { - /* This should never happen. crypto/bytestring is strictly DER. */ + // This should never happen. crypto/bytestring is strictly DER. OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR); goto err; } @@ -219,8 +219,8 @@ int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len, return 1; } -/* der_len_len returns the number of bytes needed to represent a length of |len| - * in DER. */ +// der_len_len returns the number of bytes needed to represent a length of |len| +// in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; @@ -234,18 +234,18 @@ static size_t der_len_len(size_t len) { } size_t ECDSA_SIG_max_len(size_t order_len) { - /* Compute the maximum length of an |order_len| byte integer. Defensively - * assume that the leading 0x00 is included. */ + // Compute the maximum length of an |order_len| byte integer. Defensively + // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } - /* An ECDSA signature is two INTEGERs. */ + // An ECDSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } - /* Add the header. */ + // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; diff --git a/crypto/engine/engine.c b/crypto/engine/engine.c index 141ed230..875f1480 100644 --- a/crypto/engine/engine.c +++ b/crypto/engine/engine.c @@ -42,15 +42,15 @@ ENGINE *ENGINE_new(void) { } void ENGINE_free(ENGINE *engine) { - /* Methods are currently required to be static so are not unref'ed. */ + // Methods are currently required to be static so are not unref'ed. OPENSSL_free(engine); } -/* set_method takes a pointer to a method and its given size and sets - * |*out_member| to point to it. This function might want to be extended in the - * future to support making a copy of the method so that a stable ABI for - * ENGINEs can be supported. But, for the moment, all *_METHODS must be - * static. */ +// set_method takes a pointer to a method and its given size and sets +// |*out_member| to point to it. This function might want to be extended in the +// future to support making a copy of the method so that a stable ABI for +// ENGINEs can be supported. But, for the moment, all *_METHODS must be +// static. static int set_method(void **out_member, const void *method, size_t method_size, size_t compiled_size) { const struct openssl_method_common_st *common = method; diff --git a/crypto/err/err.c b/crypto/err/err.c index cbb1260e..2c567cec 100644 --- a/crypto/err/err.c +++ b/crypto/err/err.c @@ -129,7 +129,7 @@ extern const uint32_t kOpenSSLReasonValues[]; extern const size_t kOpenSSLReasonValuesLen; extern const char kOpenSSLReasonStringData[]; -/* err_clear_data frees the optional |data| member of the given error. */ +// err_clear_data frees the optional |data| member of the given error. static void err_clear_data(struct err_error_st *error) { if ((error->flags & ERR_FLAG_MALLOCED) != 0) { OPENSSL_free(error->data); @@ -138,17 +138,17 @@ static void err_clear_data(struct err_error_st *error) { error->flags &= ~ERR_FLAG_MALLOCED; } -/* err_clear clears the given queued error. */ +// err_clear clears the given queued error. static void err_clear(struct err_error_st *error) { err_clear_data(error); OPENSSL_memset(error, 0, sizeof(struct err_error_st)); } -/* global_next_library contains the next custom library value to return. */ +// global_next_library contains the next custom library value to return. static int global_next_library = ERR_NUM_LIBS; -/* global_next_library_mutex protects |global_next_library| from concurrent - * updates. */ +// global_next_library_mutex protects |global_next_library| from concurrent +// updates. static struct CRYPTO_STATIC_MUTEX global_next_library_mutex = CRYPTO_STATIC_MUTEX_INIT; @@ -167,7 +167,7 @@ static void err_state_free(void *statep) { OPENSSL_free(state); } -/* err_get_state gets the ERR_STATE object for the current thread. */ +// err_get_state gets the ERR_STATE object for the current thread. static ERR_STATE *err_get_state(void) { ERR_STATE *state = CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_ERR); if (state == NULL) { @@ -199,7 +199,7 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line, if (top) { assert(!inc); - /* last error */ + // last error i = state->top; } else { i = (state->bottom + 1) % ERR_NUM_ERRORS; @@ -229,11 +229,11 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line, if (flags != NULL) { *flags = error->flags & ERR_FLAG_PUBLIC_MASK; } - /* If this error is being removed, take ownership of data from - * the error. The semantics are such that the caller doesn't - * take ownership either. Instead the error system takes - * ownership and retains it until the next call that affects the - * error queue. */ + // If this error is being removed, take ownership of data from + // the error. The semantics are such that the caller doesn't + // take ownership either. Instead the error system takes + // ownership and retains it until the next call that affects the + // error queue. if (inc) { if (error->flags & ERR_FLAG_MALLOCED) { OPENSSL_free(state->to_free); @@ -342,13 +342,13 @@ char *ERR_error_string(uint32_t packed_error, char *ret) { static char buf[ERR_ERROR_STRING_BUF_LEN]; if (ret == NULL) { - /* TODO(fork): remove this. */ + // TODO(fork): remove this. ret = buf; } #if !defined(NDEBUG) - /* This is aimed to help catch callers who don't provide - * |ERR_ERROR_STRING_BUF_LEN| bytes of space. */ + // This is aimed to help catch callers who don't provide + // |ERR_ERROR_STRING_BUF_LEN| bytes of space. OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN); #endif @@ -386,15 +386,15 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { packed_error, lib_str, reason_str); if (strlen(buf) == len - 1) { - /* output may be truncated; make sure we always have 5 colon-separated - * fields, i.e. 4 colons. */ + // output may be truncated; make sure we always have 5 colon-separated + // fields, i.e. 4 colons. static const unsigned num_colons = 4; unsigned i; char *s = buf; if (len <= num_colons) { - /* In this situation it's not possible to ensure that the correct number - * of colons are included in the output. */ + // In this situation it's not possible to ensure that the correct number + // of colons are included in the output. return; } @@ -403,10 +403,10 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { char *last_pos = &buf[len - 1] - num_colons + i; if (colon == NULL || colon > last_pos) { - /* set colon |i| at last possible position (buf[len-1] is the - * terminating 0). If we're setting this colon, then all whole of the - * rest of the string must be colons in order to have the correct - * number. */ + // set colon |i| at last possible position (buf[len-1] is the + // terminating 0). If we're setting this colon, then all whole of the + // rest of the string must be colons in order to have the correct + // number. OPENSSL_memset(last_pos, ':', num_colons - i); break; } @@ -431,25 +431,25 @@ static int err_string_cmp(const void *a, const void *b) { } } -/* err_string_lookup looks up the string associated with |lib| and |key| in - * |values| and |string_data|. It returns the string or NULL if not found. */ +// err_string_lookup looks up the string associated with |lib| and |key| in +// |values| and |string_data|. It returns the string or NULL if not found. static const char *err_string_lookup(uint32_t lib, uint32_t key, const uint32_t *values, size_t num_values, const char *string_data) { - /* |values| points to data in err_data.h, which is generated by - * err_data_generate.go. It's an array of uint32_t values. Each value has the - * following structure: - * | lib | key | offset | - * |6 bits| 11 bits | 15 bits | - * - * The |lib| value is a library identifier: one of the |ERR_LIB_*| values. - * The |key| is a reason code, depending on the context. - * The |offset| is the number of bytes from the start of |string_data| where - * the (NUL terminated) string for this value can be found. - * - * Values are sorted based on treating the |lib| and |key| part as an - * unsigned integer. */ + // |values| points to data in err_data.h, which is generated by + // err_data_generate.go. It's an array of uint32_t values. Each value has the + // following structure: + // | lib | key | offset | + // |6 bits| 11 bits | 15 bits | + // + // The |lib| value is a library identifier: one of the |ERR_LIB_*| values. + // The |key| is a reason code, depending on the context. + // The |offset| is the number of bytes from the start of |string_data| where + // the (NUL terminated) string for this value can be found. + // + // Values are sorted based on treating the |lib| and |key| part as an + // unsigned integer. if (lib >= (1 << 6) || key >= (1 << 11)) { return NULL; } @@ -465,38 +465,38 @@ static const char *err_string_lookup(uint32_t lib, uint32_t key, static const char *const kLibraryNames[ERR_NUM_LIBS] = { "invalid library (0)", - "unknown library", /* ERR_LIB_NONE */ - "system library", /* ERR_LIB_SYS */ - "bignum routines", /* ERR_LIB_BN */ - "RSA routines", /* ERR_LIB_RSA */ - "Diffie-Hellman routines", /* ERR_LIB_DH */ - "public key routines", /* ERR_LIB_EVP */ - "memory buffer routines", /* ERR_LIB_BUF */ - "object identifier routines", /* ERR_LIB_OBJ */ - "PEM routines", /* ERR_LIB_PEM */ - "DSA routines", /* ERR_LIB_DSA */ - "X.509 certificate routines", /* ERR_LIB_X509 */ - "ASN.1 encoding routines", /* ERR_LIB_ASN1 */ - "configuration file routines", /* ERR_LIB_CONF */ - "common libcrypto routines", /* ERR_LIB_CRYPTO */ - "elliptic curve routines", /* ERR_LIB_EC */ - "SSL routines", /* ERR_LIB_SSL */ - "BIO routines", /* ERR_LIB_BIO */ - "PKCS7 routines", /* ERR_LIB_PKCS7 */ - "PKCS8 routines", /* ERR_LIB_PKCS8 */ - "X509 V3 routines", /* ERR_LIB_X509V3 */ - "random number generator", /* ERR_LIB_RAND */ - "ENGINE routines", /* ERR_LIB_ENGINE */ - "OCSP routines", /* ERR_LIB_OCSP */ - "UI routines", /* ERR_LIB_UI */ - "COMP routines", /* ERR_LIB_COMP */ - "ECDSA routines", /* ERR_LIB_ECDSA */ - "ECDH routines", /* ERR_LIB_ECDH */ - "HMAC routines", /* ERR_LIB_HMAC */ - "Digest functions", /* ERR_LIB_DIGEST */ - "Cipher functions", /* ERR_LIB_CIPHER */ - "HKDF functions", /* ERR_LIB_HKDF */ - "User defined functions", /* ERR_LIB_USER */ + "unknown library", // ERR_LIB_NONE + "system library", // ERR_LIB_SYS + "bignum routines", // ERR_LIB_BN + "RSA routines", // ERR_LIB_RSA + "Diffie-Hellman routines", // ERR_LIB_DH + "public key routines", // ERR_LIB_EVP + "memory buffer routines", // ERR_LIB_BUF + "object identifier routines", // ERR_LIB_OBJ + "PEM routines", // ERR_LIB_PEM + "DSA routines", // ERR_LIB_DSA + "X.509 certificate routines", // ERR_LIB_X509 + "ASN.1 encoding routines", // ERR_LIB_ASN1 + "configuration file routines", // ERR_LIB_CONF + "common libcrypto routines", // ERR_LIB_CRYPTO + "elliptic curve routines", // ERR_LIB_EC + "SSL routines", // ERR_LIB_SSL + "BIO routines", // ERR_LIB_BIO + "PKCS7 routines", // ERR_LIB_PKCS7 + "PKCS8 routines", // ERR_LIB_PKCS8 + "X509 V3 routines", // ERR_LIB_X509V3 + "random number generator", // ERR_LIB_RAND + "ENGINE routines", // ERR_LIB_ENGINE + "OCSP routines", // ERR_LIB_OCSP + "UI routines", // ERR_LIB_UI + "COMP routines", // ERR_LIB_COMP + "ECDSA routines", // ERR_LIB_ECDSA + "ECDH routines", // ERR_LIB_ECDH + "HMAC routines", // ERR_LIB_HMAC + "Digest functions", // ERR_LIB_DIGEST + "Cipher functions", // ERR_LIB_CIPHER + "HKDF functions", // ERR_LIB_HKDF + "User defined functions", // ERR_LIB_USER }; const char *ERR_lib_error_string(uint32_t packed_error) { @@ -555,8 +555,8 @@ void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) { int line, flags; uint32_t packed_error; - /* thread_hash is the least-significant bits of the |ERR_STATE| pointer value - * for this thread. */ + // thread_hash is the least-significant bits of the |ERR_STATE| pointer value + // for this thread. const unsigned long thread_hash = (uintptr_t) err_get_state(); for (;;) { @@ -585,8 +585,8 @@ void ERR_print_errors_fp(FILE *file) { ERR_print_errors_cb(print_errors_to_file, file); } -/* err_set_error_data sets the data on the most recent error. The |flags| - * argument is a combination of the |ERR_FLAG_*| values. */ +// err_set_error_data sets the data on the most recent error. The |flags| +// argument is a combination of the |ERR_FLAG_*| values. static void err_set_error_data(char *data, int flags) { ERR_STATE *const state = err_get_state(); struct err_error_st *error; @@ -634,9 +634,9 @@ void ERR_put_error(int library, int unused, int reason, const char *file, error->packed = ERR_PACK(library, reason); } -/* ERR_add_error_data_vdata takes a variable number of const char* pointers, - * concatenates them and sets the result as the data on the most recent - * error. */ +// ERR_add_error_data_vdata takes a variable number of const char* pointers, +// concatenates them and sets the result as the data on the most recent +// error. static void err_add_error_vdata(unsigned num, va_list args) { size_t alloced, new_len, len = 0, substr_len; char *buf; @@ -661,7 +661,7 @@ static void err_add_error_vdata(unsigned num, va_list args) { char *new_buf; if (alloced + 20 + 1 < alloced) { - /* overflow. */ + // overflow. OPENSSL_free(buf); return; } @@ -695,9 +695,9 @@ void ERR_add_error_dataf(const char *format, ...) { char *buf; static const unsigned buf_len = 256; - /* A fixed-size buffer is used because va_copy (which would be needed in - * order to call vsnprintf twice and measure the buffer) wasn't defined until - * C99. */ + // A fixed-size buffer is used because va_copy (which would be needed in + // order to call vsnprintf twice and measure the buffer) wasn't defined until + // C99. buf = OPENSSL_malloc(buf_len + 1); if (buf == NULL) { return; diff --git a/crypto/err/err_test.cc b/crypto/err/err_test.cc index 8e820b85..5d04ae2a 100644 --- a/crypto/err/err_test.cc +++ b/crypto/err/err_test.cc @@ -30,9 +30,9 @@ TEST(ErrTest, Overflow) { for (unsigned i = 0; i < ERR_NUM_ERRORS - 1; i++) { SCOPED_TRACE(i); uint32_t err = ERR_get_error(); - /* Errors are returned in order they were pushed, with the least recent ones - * removed, up to |ERR_NUM_ERRORS - 1| errors. So the errors returned are - * |ERR_NUM_ERRORS + 2| through |ERR_NUM_ERRORS * 2|, inclusive. */ + // Errors are returned in order they were pushed, with the least recent ones + // removed, up to |ERR_NUM_ERRORS - 1| errors. So the errors returned are + // |ERR_NUM_ERRORS + 2| through |ERR_NUM_ERRORS * 2|, inclusive. EXPECT_NE(0u, err); EXPECT_EQ(static_cast(i + ERR_NUM_ERRORS + 2), ERR_GET_REASON(err)); } diff --git a/crypto/evp/digestsign.c b/crypto/evp/digestsign.c index de41f879..6e4d305f 100644 --- a/crypto/evp/digestsign.c +++ b/crypto/evp/digestsign.c @@ -196,8 +196,8 @@ int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len, const uint8_t *data, size_t data_len) { if (uses_prehash(ctx, evp_sign)) { - /* If |out_sig| is NULL, the caller is only querying the maximum output - * length. |data| should only be incorporated in the final call. */ + // If |out_sig| is NULL, the caller is only querying the maximum output + // length. |data| should only be incorporated in the final call. if (out_sig != NULL && !EVP_DigestSignUpdate(ctx, data, data_len)) { return 0; diff --git a/crypto/evp/evp.c b/crypto/evp/evp.c index 117e774f..ad5f85bf 100644 --- a/crypto/evp/evp.c +++ b/crypto/evp/evp.c @@ -127,7 +127,7 @@ int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (a->ameth) { int ret; - /* Compare parameters if the algorithm has them */ + // Compare parameters if the algorithm has them if (a->ameth->param_cmp) { ret = a->ameth->param_cmp(a, b); if (ret <= 0) { @@ -187,9 +187,9 @@ int EVP_PKEY_id(const EVP_PKEY *pkey) { return pkey->type; } -/* evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which - * should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is - * unknown. */ +// evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which +// should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is +// unknown. static const EVP_PKEY_ASN1_METHOD *evp_pkey_asn1_find(int nid) { switch (nid) { case EVP_PKEY_RSA: diff --git a/crypto/evp/evp_asn1.c b/crypto/evp/evp_asn1.c index 1f8d3eb0..bcb86d76 100644 --- a/crypto/evp/evp_asn1.c +++ b/crypto/evp/evp_asn1.c @@ -94,7 +94,7 @@ static int parse_key_type(CBS *cbs, int *out_type) { } EVP_PKEY *EVP_parse_public_key(CBS *cbs) { - /* Parse the SubjectPublicKeyInfo. */ + // Parse the SubjectPublicKeyInfo. CBS spki, algorithm, key; int type; uint8_t padding; @@ -103,22 +103,22 @@ EVP_PKEY *EVP_parse_public_key(CBS *cbs) { !parse_key_type(&algorithm, &type) || !CBS_get_asn1(&spki, &key, CBS_ASN1_BITSTRING) || CBS_len(&spki) != 0 || - /* Every key type defined encodes the key as a byte string with the same - * conversion to BIT STRING. */ + // Every key type defined encodes the key as a byte string with the same + // conversion to BIT STRING. !CBS_get_u8(&key, &padding) || padding != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return NULL; } - /* Set up an |EVP_PKEY| of the appropriate type. */ + // Set up an |EVP_PKEY| of the appropriate type. EVP_PKEY *ret = EVP_PKEY_new(); if (ret == NULL || !EVP_PKEY_set_type(ret, type)) { goto err; } - /* Call into the type-specific SPKI decoding function. */ + // Call into the type-specific SPKI decoding function. if (ret->ameth->pub_decode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); goto err; @@ -144,7 +144,7 @@ int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key) { } EVP_PKEY *EVP_parse_private_key(CBS *cbs) { - /* Parse the PrivateKeyInfo. */ + // Parse the PrivateKeyInfo. CBS pkcs8, algorithm, key; uint64_t version; int type; @@ -158,16 +158,16 @@ EVP_PKEY *EVP_parse_private_key(CBS *cbs) { return NULL; } - /* A PrivateKeyInfo ends with a SET of Attributes which we ignore. */ + // A PrivateKeyInfo ends with a SET of Attributes which we ignore. - /* Set up an |EVP_PKEY| of the appropriate type. */ + // Set up an |EVP_PKEY| of the appropriate type. EVP_PKEY *ret = EVP_PKEY_new(); if (ret == NULL || !EVP_PKEY_set_type(ret, type)) { goto err; } - /* Call into the type-specific PrivateKeyInfo decoding function. */ + // Call into the type-specific PrivateKeyInfo decoding function. if (ret->ameth->priv_decode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); goto err; @@ -240,12 +240,12 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, return NULL; } - /* Parse with the legacy format. */ + // Parse with the legacy format. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *ret = old_priv_decode(&cbs, type); if (ret == NULL) { - /* Try again with PKCS#8. */ + // Try again with PKCS#8. ERR_clear_error(); CBS_init(&cbs, *inp, (size_t)len); ret = EVP_parse_private_key(&cbs); @@ -267,8 +267,8 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, return ret; } -/* num_elements parses one SEQUENCE from |in| and returns the number of elements - * in it. On parse error, it returns zero. */ +// num_elements parses one SEQUENCE from |in| and returns the number of elements +// in it. On parse error, it returns zero. static size_t num_elements(const uint8_t *in, size_t in_len) { CBS cbs, sequence; CBS_init(&cbs, in, (size_t)in_len); @@ -295,7 +295,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) { return NULL; } - /* Parse the input as a PKCS#8 PrivateKeyInfo. */ + // Parse the input as a PKCS#8 PrivateKeyInfo. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *ret = EVP_parse_private_key(&cbs); @@ -309,7 +309,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) { } ERR_clear_error(); - /* Count the elements to determine the legacy key format. */ + // Count the elements to determine the legacy key format. switch (num_elements(*inp, (size_t)len)) { case 4: return d2i_PrivateKey(EVP_PKEY_EC, out, inp, len); diff --git a/crypto/evp/evp_ctx.c b/crypto/evp/evp_ctx.c index 8d092ee7..3599f778 100644 --- a/crypto/evp/evp_ctx.c +++ b/crypto/evp/evp_ctx.c @@ -369,11 +369,11 @@ int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer) { return 0; } - /* ran@cryptocom.ru: For clarity. The error is if parameters in peer are - * present (!missing) but don't match. EVP_PKEY_cmp_parameters may return - * 1 (match), 0 (don't match) and -2 (comparison is not defined). -1 - * (different key types) is impossible here because it is checked earlier. - * -2 is OK for us here, as well as 1, so we can check for 0 only. */ + // ran@cryptocom.ru: For clarity. The error is if parameters in peer are + // present (!missing) but don't match. EVP_PKEY_cmp_parameters may return + // 1 (match), 0 (don't match) and -2 (comparison is not defined). -1 + // (different key types) is impossible here because it is checked earlier. + // -2 is OK for us here, as well as 1, so we can check for 0 only. if (!EVP_PKEY_missing_parameters(peer) && !EVP_PKEY_cmp_parameters(ctx->pkey, peer)) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_PARAMETERS); diff --git a/crypto/evp/internal.h b/crypto/evp/internal.h index 5e9aab02..4aefa352 100644 --- a/crypto/evp/internal.h +++ b/crypto/evp/internal.h @@ -71,33 +71,33 @@ struct evp_pkey_asn1_method_st { uint8_t oid[9]; uint8_t oid_len; - /* pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo - * and writes the result into |out|. It returns one on success and zero on - * error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER - * type field, and |key| is the contents of the subjectPublicKey with the - * leading padding byte checked and removed. Although X.509 uses BIT STRINGs - * to represent SubjectPublicKeyInfo, every key type defined encodes the key - * as a byte string with the same conversion to BIT STRING. */ + // pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo + // and writes the result into |out|. It returns one on success and zero on + // error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER + // type field, and |key| is the contents of the subjectPublicKey with the + // leading padding byte checked and removed. Although X.509 uses BIT STRINGs + // to represent SubjectPublicKeyInfo, every key type defined encodes the key + // as a byte string with the same conversion to BIT STRING. int (*pub_decode)(EVP_PKEY *out, CBS *params, CBS *key); - /* pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result - * to |out|. It returns one on success and zero on error. */ + // pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result + // to |out|. It returns one on success and zero on error. int (*pub_encode)(CBB *out, const EVP_PKEY *key); int (*pub_cmp)(const EVP_PKEY *a, const EVP_PKEY *b); - /* priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the - * result into |out|. It returns one on success and zero on error. |params| is - * the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key| - * is the contents of the OCTET STRING privateKey field. */ + // priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the + // result into |out|. It returns one on success and zero on error. |params| is + // the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key| + // is the contents of the OCTET STRING privateKey field. int (*priv_decode)(EVP_PKEY *out, CBS *params, CBS *key); - /* priv_encode encodes |key| as a PrivateKeyInfo and appends the result to - * |out|. It returns one on success and zero on error. */ + // priv_encode encodes |key| as a PrivateKeyInfo and appends the result to + // |out|. It returns one on success and zero on error. int (*priv_encode)(CBB *out, const EVP_PKEY *key); - /* pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by - * custom implementations which do not expose key material and parameters.*/ + // pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by + // custom implementations which do not expose key material and parameters. int (*pkey_opaque)(const EVP_PKEY *pk); int (*pkey_size)(const EVP_PKEY *pk); @@ -130,33 +130,33 @@ struct evp_pkey_asn1_method_st { #define EVP_PKEY_OP_TYPE_GEN EVP_PKEY_OP_KEYGEN -/* EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype| - * arguments can be -1 to specify that any type and operation are acceptable, - * otherwise |keytype| must match the type of |ctx| and the bits of |optype| - * must intersect the operation flags set on |ctx|. - * - * The |p1| and |p2| arguments depend on the value of |cmd|. - * - * It returns one on success and zero on error. */ +// EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype| +// arguments can be -1 to specify that any type and operation are acceptable, +// otherwise |keytype| must match the type of |ctx| and the bits of |optype| +// must intersect the operation flags set on |ctx|. +// +// The |p1| and |p2| arguments depend on the value of |cmd|. +// +// It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2); #define EVP_PKEY_CTRL_MD 1 #define EVP_PKEY_CTRL_GET_MD 2 -/* EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|: - * 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key. - * If the return value is <= 0, the key is rejected. - * 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a - * peer key. If the return value is <= 0, the key is rejected. - * 2: Is called with |p2| == NULL to test whether the peer's key was used. - * (EC)DH always return one in this case. - * 3: Is called with |p2| == NULL to set whether the peer's key was used. - * (EC)DH always return one in this case. This was only used for GOST. */ +// EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|: +// 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key. +// If the return value is <= 0, the key is rejected. +// 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a +// peer key. If the return value is <= 0, the key is rejected. +// 2: Is called with |p2| == NULL to test whether the peer's key was used. +// (EC)DH always return one in this case. +// 3: Is called with |p2| == NULL to set whether the peer's key was used. +// (EC)DH always return one in this case. This was only used for GOST. #define EVP_PKEY_CTRL_PEER_KEY 3 -/* EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl - * commands are numbered. */ +// EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl +// commands are numbered. #define EVP_PKEY_ALG_CTRL 0x1000 #define EVP_PKEY_CTRL_RSA_PADDING (EVP_PKEY_ALG_CTRL + 1) @@ -173,17 +173,17 @@ OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, #define EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 12) struct evp_pkey_ctx_st { - /* Method associated with this operation */ + // Method associated with this operation const EVP_PKEY_METHOD *pmeth; - /* Engine that implements this method or NULL if builtin */ + // Engine that implements this method or NULL if builtin ENGINE *engine; - /* Key: may be NULL */ + // Key: may be NULL EVP_PKEY *pkey; - /* Peer key for key agreement, may be NULL */ + // Peer key for key agreement, may be NULL EVP_PKEY *peerkey; - /* operation contains one of the |EVP_PKEY_OP_*| values. */ + // operation contains one of the |EVP_PKEY_OP_*| values. int operation; - /* Algorithm specific data */ + // Algorithm specific data void *data; } /* EVP_PKEY_CTX */; @@ -226,8 +226,8 @@ typedef struct { union { uint8_t priv[64]; struct { - /* Shift the location of the public key to align with where it is in the - * private key representation. */ + // Shift the location of the public key to align with where it is in the + // private key representation. uint8_t pad[32]; uint8_t value[32]; } pub; @@ -246,7 +246,7 @@ extern const EVP_PKEY_METHOD ed25519_pkey_meth; #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_EVP_INTERNAL_H */ +#endif // OPENSSL_HEADER_EVP_INTERNAL_H diff --git a/crypto/evp/p_dsa_asn1.c b/crypto/evp/p_dsa_asn1.c index 0e5cdeec..16d78b72 100644 --- a/crypto/evp/p_dsa_asn1.c +++ b/crypto/evp/p_dsa_asn1.c @@ -65,9 +65,9 @@ static int dsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 3279, section 2.3.2. */ + // See RFC 3279, section 2.3.2. - /* Parameters may or may not be present. */ + // Parameters may or may not be present. DSA *dsa; if (CBS_len(params) == 0) { dsa = DSA_new(); @@ -105,7 +105,7 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) { const DSA *dsa = key->pkey.dsa; const int has_params = dsa->p != NULL && dsa->q != NULL && dsa->g != NULL; - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -125,9 +125,9 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) { } static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See PKCS#11, v2.40, section 2.5. */ + // See PKCS#11, v2.40, section 2.5. - /* Decode parameters. */ + // Decode parameters. BN_CTX *ctx = NULL; DSA *dsa = DSA_parse_parameters(params); if (dsa == NULL || CBS_len(params) != 0) { @@ -141,14 +141,14 @@ static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { goto err; } - /* Decode the key. */ + // Decode the key. if (!BN_parse_asn1_unsigned(key, dsa->priv_key) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); goto err; } - /* Calculate the public key. */ + // Calculate the public key. ctx = BN_CTX_new(); if (ctx == NULL || !BN_mod_exp_mont(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, ctx, @@ -173,7 +173,7 @@ static int dsa_priv_encode(CBB *out, const EVP_PKEY *key) { return 0; } - /* See PKCS#11, v2.40, section 2.5. */ + // See PKCS#11, v2.40, section 2.5. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || @@ -245,7 +245,7 @@ static void int_dsa_free(EVP_PKEY *pkey) { DSA_free(pkey->pkey.dsa); } const EVP_PKEY_ASN1_METHOD dsa_asn1_meth = { EVP_PKEY_DSA, - /* 1.2.840.10040.4.1 */ + // 1.2.840.10040.4.1 {0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x01}, 7, dsa_pub_decode, diff --git a/crypto/evp/p_ec.c b/crypto/evp/p_ec.c index e2502a34..d311d220 100644 --- a/crypto/evp/p_ec.c +++ b/crypto/evp/p_ec.c @@ -74,7 +74,7 @@ typedef struct { - /* message digest */ + // message digest const EVP_MD *md; } EC_PKEY_CTX; @@ -161,8 +161,8 @@ static int pkey_ec_derive(EVP_PKEY_CTX *ctx, uint8_t *key, } pubkey = EC_KEY_get0_public_key(ctx->peerkey->pkey.ec); - /* NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is - * not an error, the result is truncated. */ + // NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is + // not an error, the result is truncated. outlen = *keylen; @@ -196,7 +196,7 @@ static int pkey_ec_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { return 1; case EVP_PKEY_CTRL_PEER_KEY: - /* Default behaviour is OK */ + // Default behaviour is OK return 1; default: diff --git a/crypto/evp/p_ec_asn1.c b/crypto/evp/p_ec_asn1.c index 1f1bf2fc..c5828d93 100644 --- a/crypto/evp/p_ec_asn1.c +++ b/crypto/evp/p_ec_asn1.c @@ -70,7 +70,7 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) { const EC_GROUP *group = EC_KEY_get0_group(ec_key); const EC_POINT *public_key = EC_KEY_get0_public_key(ec_key); - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -90,9 +90,9 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) { } static int eckey_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. - /* The parameters are a named curve. */ + // The parameters are a named curve. EC_POINT *point = NULL; EC_KEY *eckey = NULL; EC_GROUP *group = EC_KEY_parse_curve_name(params); @@ -141,7 +141,7 @@ static int eckey_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { } static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 5915. */ + // See RFC 5915. EC_GROUP *group = EC_KEY_parse_parameters(params); if (group == NULL || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); @@ -164,13 +164,13 @@ static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { static int eckey_priv_encode(CBB *out, const EVP_PKEY *key) { const EC_KEY *ec_key = key->pkey.ec; - /* Omit the redundant copy of the curve name. This contradicts RFC 5915 but - * aligns with PKCS #11. SEC 1 only says they may be omitted if known by other - * means. Both OpenSSL and NSS omit the redundant parameters, so we omit them - * as well. */ + // Omit the redundant copy of the curve name. This contradicts RFC 5915 but + // aligns with PKCS #11. SEC 1 only says they may be omitted if known by other + // means. Both OpenSSL and NSS omit the redundant parameters, so we omit them + // as well. unsigned enc_flags = EC_KEY_get_enc_flags(ec_key) | EC_PKEY_NO_PARAMETERS; - /* See RFC 5915. */ + // See RFC 5915. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || @@ -219,7 +219,7 @@ static int ec_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) { const EC_GROUP *group_a = EC_KEY_get0_group(a->pkey.ec), *group_b = EC_KEY_get0_group(b->pkey.ec); if (EC_GROUP_cmp(group_a, group_b, NULL) != 0) { - /* mismatch */ + // mismatch return 0; } return 1; @@ -233,7 +233,7 @@ static int eckey_opaque(const EVP_PKEY *pkey) { const EVP_PKEY_ASN1_METHOD ec_asn1_meth = { EVP_PKEY_EC, - /* 1.2.840.10045.2.1 */ + // 1.2.840.10045.2.1 {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01}, 7, eckey_pub_decode, diff --git a/crypto/evp/p_ed25519.c b/crypto/evp/p_ed25519.c index 07226248..554a379c 100644 --- a/crypto/evp/p_ed25519.c +++ b/crypto/evp/p_ed25519.c @@ -20,7 +20,7 @@ #include "internal.h" -/* Ed25519 has no parameters to copy. */ +// Ed25519 has no parameters to copy. static int pkey_ed25519_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { return 1; } static int pkey_ed25519_sign_message(EVP_PKEY_CTX *ctx, uint8_t *sig, diff --git a/crypto/evp/p_ed25519_asn1.c b/crypto/evp/p_ed25519_asn1.c index 8cb359e1..37aebe00 100644 --- a/crypto/evp/p_ed25519_asn1.c +++ b/crypto/evp/p_ed25519_asn1.c @@ -61,9 +61,9 @@ static int set_privkey(EVP_PKEY *pkey, const uint8_t privkey[64]) { } static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See draft-ietf-curdle-pkix-04, section 4. */ + // See draft-ietf-curdle-pkix-04, section 4. - /* The parameters must be omitted. Public keys have length 32. */ + // The parameters must be omitted. Public keys have length 32. if (CBS_len(params) != 0 || CBS_len(key) != 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); @@ -76,7 +76,7 @@ static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { static int ed25519_pub_encode(CBB *out, const EVP_PKEY *pkey) { const ED25519_KEY *key = pkey->pkey.ptr; - /* See draft-ietf-curdle-pkix-04, section 4. */ + // See draft-ietf-curdle-pkix-04, section 4. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -100,10 +100,10 @@ static int ed25519_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { } static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See draft-ietf-curdle-pkix-04, section 7. */ + // See draft-ietf-curdle-pkix-04, section 7. - /* Parameters must be empty. The key is a 32-byte value wrapped in an extra - * OCTET STRING layer. */ + // Parameters must be empty. The key is a 32-byte value wrapped in an extra + // OCTET STRING layer. CBS inner; if (CBS_len(params) != 0 || !CBS_get_asn1(key, &inner, CBS_ASN1_OCTETSTRING) || @@ -113,8 +113,8 @@ static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { return 0; } - /* The PKCS#8 encoding stores only the 32-byte seed, so we must recover the - * full representation which we use from it. */ + // The PKCS#8 encoding stores only the 32-byte seed, so we must recover the + // full representation which we use from it. uint8_t pubkey[32], privkey[64]; ED25519_keypair_from_seed(pubkey, privkey, CBS_data(&inner)); return set_privkey(out, privkey); @@ -127,7 +127,7 @@ static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) { return 0; } - /* See draft-ietf-curdle-pkix-04, section 7. */ + // See draft-ietf-curdle-pkix-04, section 7. CBB pkcs8, algorithm, oid, private_key, inner; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || @@ -136,8 +136,8 @@ static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) { !CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !CBB_add_asn1(&private_key, &inner, CBS_ASN1_OCTETSTRING) || - /* The PKCS#8 encoding stores only the 32-byte seed which is the first 32 - * bytes of the private key. */ + // The PKCS#8 encoding stores only the 32-byte seed which is the first 32 + // bytes of the private key. !CBB_add_bytes(&inner, key->key.priv, 32) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); diff --git a/crypto/evp/p_rsa.c b/crypto/evp/p_rsa.c index 36aa5246..08c01c2e 100644 --- a/crypto/evp/p_rsa.c +++ b/crypto/evp/p_rsa.c @@ -73,21 +73,21 @@ typedef struct { - /* Key gen parameters */ + // Key gen parameters int nbits; BIGNUM *pub_exp; - /* RSA padding mode */ + // RSA padding mode int pad_mode; - /* message digest */ + // message digest const EVP_MD *md; - /* message digest for MGF1 */ + // message digest for MGF1 const EVP_MD *mgf1md; - /* PSS salt length */ + // PSS salt length int saltlen; - /* tbuf is a buffer which is either NULL, or is the size of the RSA modulus. - * It's used to store the output of RSA operations. */ + // tbuf is a buffer which is either NULL, or is the size of the RSA modulus. + // It's used to store the output of RSA operations. uint8_t *tbuf; - /* OAEP label */ + // OAEP label uint8_t *oaep_label; size_t oaep_labellen; } RSA_PKEY_CTX; @@ -260,7 +260,7 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, return 0; } - /* Assemble the encoded hash, using a placeholder hash value. */ + // Assemble the encoded hash, using a placeholder hash value. static const uint8_t kDummyHash[EVP_MAX_MD_SIZE] = {0}; const size_t hash_len = EVP_MD_size(rctx->md); uint8_t *asn1_prefix; @@ -278,7 +278,7 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, if (!RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, sig_len, RSA_PKCS1_PADDING) || rslen != asn1_prefix_len || - /* Compare all but the hash suffix. */ + // Compare all but the hash suffix. CRYPTO_memcmp(rctx->tbuf, asn1_prefix, asn1_prefix_len - hash_len) != 0) { ok = 0; } diff --git a/crypto/evp/p_rsa_asn1.c b/crypto/evp/p_rsa_asn1.c index 866fc594..3231ffb6 100644 --- a/crypto/evp/p_rsa_asn1.c +++ b/crypto/evp/p_rsa_asn1.c @@ -77,7 +77,7 @@ void EVP_set_buggy_rsa_parser(int buggy) { } static int rsa_pub_encode(CBB *out, const EVP_PKEY *key) { - /* See RFC 3279, section 2.3.1. */ + // See RFC 3279, section 2.3.1. CBB spki, algorithm, oid, null, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -101,9 +101,9 @@ static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { buggy = g_buggy; CRYPTO_STATIC_MUTEX_unlock_read(&g_buggy_lock); - /* See RFC 3279, section 2.3.1. */ + // See RFC 3279, section 2.3.1. - /* The parameters must be NULL. */ + // The parameters must be NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || @@ -112,12 +112,12 @@ static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { return 0; } - /* Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Switch this to the strict version in March 2016 or when - * Chromium can force client certificates down a different codepath, whichever - * comes first. */ + // Estonian IDs issued between September 2014 to September 2015 are + // broken. See https://crbug.com/532048 and https://crbug.com/534766. + // + // TODO(davidben): Switch this to the strict version in March 2016 or when + // Chromium can force client certificates down a different codepath, whichever + // comes first. RSA *rsa = buggy ? RSA_parse_public_key_buggy(key) : RSA_parse_public_key(key); if (rsa == NULL || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); @@ -153,7 +153,7 @@ static int rsa_priv_encode(CBB *out, const EVP_PKEY *key) { } static int rsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* Per RFC 3447, A.1, the parameters have type NULL. */ + // Per RFC 3447, A.1, the parameters have type NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || @@ -189,7 +189,7 @@ static void int_rsa_free(EVP_PKEY *pkey) { RSA_free(pkey->pkey.rsa); } const EVP_PKEY_ASN1_METHOD rsa_asn1_meth = { EVP_PKEY_RSA, - /* 1.2.840.113549.1.1.1 */ + // 1.2.840.113549.1.1.1 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01}, 9, rsa_pub_decode, diff --git a/crypto/evp/pbkdf.c b/crypto/evp/pbkdf.c index daebb2d0..f23a74bd 100644 --- a/crypto/evp/pbkdf.c +++ b/crypto/evp/pbkdf.c @@ -65,7 +65,7 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, unsigned iterations, const EVP_MD *digest, size_t key_len, uint8_t *out_key) { - /* See RFC 8018, section 5.2. */ + // See RFC 8018, section 5.2. int ret = 0; size_t md_len = EVP_MD_size(digest); uint32_t i = 1; @@ -88,7 +88,7 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, i_buf[2] = (uint8_t)((i >> 8) & 0xff); i_buf[3] = (uint8_t)(i & 0xff); - /* Compute U_1. */ + // Compute U_1. uint8_t digest_tmp[EVP_MAX_MD_SIZE]; if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) || !HMAC_Update(&hctx, salt, salt_len) || @@ -99,7 +99,7 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, OPENSSL_memcpy(out_key, digest_tmp, todo); for (unsigned j = 1; j < iterations; j++) { - /* Compute the remaining U_* values and XOR. */ + // Compute the remaining U_* values and XOR. if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) || !HMAC_Update(&hctx, digest_tmp, md_len) || !HMAC_Final(&hctx, digest_tmp, NULL)) { @@ -115,17 +115,17 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, i++; } - /* RFC 8018 describes iterations (c) as being a "positive integer", so a - * value of 0 is an error. - * - * Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return - * value, expecting it to succeed and unconditionally using |out_key|. As a - * precaution for such callsites in external code, the old behavior of - * iterations < 1 being treated as iterations == 1 is preserved, but - * additionally an error result is returned. - * - * TODO(eroman): Figure out how to remove this compatibility hack, or change - * the default to something more sensible like 2048. */ + // RFC 8018 describes iterations (c) as being a "positive integer", so a + // value of 0 is an error. + // + // Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return + // value, expecting it to succeed and unconditionally using |out_key|. As a + // precaution for such callsites in external code, the old behavior of + // iterations < 1 being treated as iterations == 1 is preserved, but + // additionally an error result is returned. + // + // TODO(eroman): Figure out how to remove this compatibility hack, or change + // the default to something more sensible like 2048. if (iterations == 0) { goto err; } diff --git a/crypto/evp/print.c b/crypto/evp/print.c index a7740879..3621d5f2 100644 --- a/crypto/evp/print.c +++ b/crypto/evp/print.c @@ -131,7 +131,7 @@ static void update_buflen(const BIGNUM *b, size_t *pbuflen) { } } -/* RSA keys. */ +// RSA keys. static int do_rsa_print(BIO *out, const RSA *rsa, int off, int include_private) { @@ -212,7 +212,7 @@ static int rsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent, } -/* DSA keys. */ +// DSA keys. static int do_dsa_print(BIO *bp, const DSA *x, int off, int ptype) { uint8_t *m = NULL; @@ -288,7 +288,7 @@ static int dsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent, } -/* EC keys. */ +// EC keys. static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) { uint8_t *buffer = NULL; @@ -379,7 +379,7 @@ static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) { if (pub_key_bytes != NULL) { BIO_hexdump(bp, pub_key_bytes, pub_key_bytes_len, off); } - /* TODO(fork): implement */ + // TODO(fork): implement /* if (!ECPKParameters_print(bp, group, off)) goto err; */ diff --git a/crypto/evp/scrypt.c b/crypto/evp/scrypt.c index 3f102145..ed186eed 100644 --- a/crypto/evp/scrypt.c +++ b/crypto/evp/scrypt.c @@ -18,25 +18,25 @@ #include "../internal.h" -/* This file implements scrypt, described in RFC 7914. - * - * Note scrypt refers to both "blocks" and a "block size" parameter, r. These - * are two different notions of blocks. A Salsa20 block is 64 bytes long, - * represented in this implementation by 16 |uint32_t|s. |r| determines the - * number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r| - * Salsa20 blocks. This implementation refers to them as Salsa20 blocks and - * scrypt blocks, respectively. */ - -/* A block_t is a Salsa20 block. */ +// This file implements scrypt, described in RFC 7914. +// +// Note scrypt refers to both "blocks" and a "block size" parameter, r. These +// are two different notions of blocks. A Salsa20 block is 64 bytes long, +// represented in this implementation by 16 |uint32_t|s. |r| determines the +// number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r| +// Salsa20 blocks. This implementation refers to them as Salsa20 blocks and +// scrypt blocks, respectively. + +// A block_t is a Salsa20 block. typedef struct { uint32_t words[16]; } block_t; OPENSSL_COMPILE_ASSERT(sizeof(block_t) == 64, block_t_has_padding); #define R(a, b) (((a) << (b)) | ((a) >> (32 - (b)))) -/* salsa208_word_specification implements the Salsa20/8 core function, also - * described in RFC 7914, section 3. It modifies the block at |inout| - * in-place. */ +// salsa208_word_specification implements the Salsa20/8 core function, also +// described in RFC 7914, section 3. It modifies the block at |inout| +// in-place. static void salsa208_word_specification(block_t *inout) { block_t x; OPENSSL_memcpy(&x, inout, sizeof(x)); @@ -81,16 +81,16 @@ static void salsa208_word_specification(block_t *inout) { } } -/* xor_block sets |*out| to be |*a| XOR |*b|. */ +// xor_block sets |*out| to be |*a| XOR |*b|. static void xor_block(block_t *out, const block_t *a, const block_t *b) { for (size_t i = 0; i < 16; i++) { out->words[i] = a->words[i] ^ b->words[i]; } } -/* scryptBlockMix implements the function described in RFC 7914, section 4. B' - * is written to |out|. |out| and |B| may not alias and must be each one scrypt - * block (2 * |r| Salsa20 blocks) long. */ +// scryptBlockMix implements the function described in RFC 7914, section 4. B' +// is written to |out|. |out| and |B| may not alias and must be each one scrypt +// block (2 * |r| Salsa20 blocks) long. static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) { assert(out != B); @@ -100,19 +100,19 @@ static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) { xor_block(&X, &X, &B[i]); salsa208_word_specification(&X); - /* This implements the permutation in step 3. */ + // This implements the permutation in step 3. OPENSSL_memcpy(&out[i / 2 + (i & 1) * r], &X, sizeof(X)); } } -/* scryptROMix implements the function described in RFC 7914, section 5. |B| is - * an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and - * |V| are scratch space allocated by the caller. |T| must have space for one - * scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt - * blocks (2 * |r| * |N| Salsa20 blocks). */ +// scryptROMix implements the function described in RFC 7914, section 5. |B| is +// an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and +// |V| are scratch space allocated by the caller. |T| must have space for one +// scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt +// blocks (2 * |r| * |N| Salsa20 blocks). static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, block_t *V) { - /* Steps 1 and 2. */ + // Steps 1 and 2. OPENSSL_memcpy(V, B, 2 * r * sizeof(block_t)); for (uint64_t i = 1; i < N; i++) { scryptBlockMix(&V[2 * r * i /* scrypt block i */], @@ -120,9 +120,9 @@ static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, } scryptBlockMix(B, &V[2 * r * (N - 1) /* scrypt block N-1 */], r); - /* Step 3. */ + // Step 3. for (uint64_t i = 0; i < N; i++) { - /* Note this assumes |N| <= 2^32 and is a power of 2. */ + // Note this assumes |N| <= 2^32 and is a power of 2. uint32_t j = B[2 * r - 1].words[0] & (N - 1); for (size_t k = 0; k < 2 * r; k++) { xor_block(&T[k], &B[k], &V[2 * r * j + k]); @@ -131,16 +131,16 @@ static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, } } -/* SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the - * bounds on p in section 6: - * - * p <= ((2^32-1) * hLen) / MFLen iff - * p <= ((2^32-1) * 32) / (128 * r) iff - * p * r <= (2^30-1) */ +// SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the +// bounds on p in section 6: +// +// p <= ((2^32-1) * hLen) / MFLen iff +// p <= ((2^32-1) * 32) / (128 * r) iff +// p * r <= (2^30-1) #define SCRYPT_PR_MAX ((1 << 30) - 1) -/* SCRYPT_MAX_MEM is the default maximum memory that may be allocated by - * |EVP_PBE_scrypt|. */ +// SCRYPT_MAX_MEM is the default maximum memory that may be allocated by +// |EVP_PBE_scrypt|. #define SCRYPT_MAX_MEM (1024 * 1024 * 32) int EVP_PBE_scrypt(const char *password, size_t password_len, @@ -148,18 +148,18 @@ int EVP_PBE_scrypt(const char *password, size_t password_len, uint64_t p, size_t max_mem, uint8_t *out_key, size_t key_len) { if (r == 0 || p == 0 || p > SCRYPT_PR_MAX / r || - /* |N| must be a power of two. */ + // |N| must be a power of two. N < 2 || (N & (N - 1)) || - /* We only support |N| <= 2^32 in |scryptROMix|. */ + // We only support |N| <= 2^32 in |scryptROMix|. N > UINT64_C(1) << 32 || - /* Check that |N| < 2^(128×r / 8). */ + // Check that |N| < 2^(128×r / 8). (16 * r <= 63 && N >= UINT64_C(1) << (16 * r))) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PARAMETERS); return 0; } - /* Determine the amount of memory needed. B, T, and V are |p|, 1, and |N| - * scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s. */ + // Determine the amount of memory needed. B, T, and V are |p|, 1, and |N| + // scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s. if (max_mem == 0) { max_mem = SCRYPT_MAX_MEM; } @@ -171,8 +171,8 @@ int EVP_PBE_scrypt(const char *password, size_t password_len, return 0; } - /* Allocate and divide up the scratch space. |max_mem| fits in a size_t, which - * is no bigger than uint64_t, so none of these operations may overflow. */ + // Allocate and divide up the scratch space. |max_mem| fits in a size_t, which + // is no bigger than uint64_t, so none of these operations may overflow. OPENSSL_COMPILE_ASSERT(UINT64_MAX >= ((size_t)-1), size_t_exceeds_u64); size_t B_blocks = p * 2 * r; size_t B_bytes = B_blocks * sizeof(block_t); diff --git a/crypto/ex_data.c b/crypto/ex_data.c index 27b9a516..af7e7e2d 100644 --- a/crypto/ex_data.c +++ b/crypto/ex_data.c @@ -124,8 +124,8 @@ DEFINE_STACK_OF(CRYPTO_EX_DATA_FUNCS) struct crypto_ex_data_func_st { - long argl; /* Arbitary long */ - void *argp; /* Arbitary void pointer */ + long argl; // Arbitary long + void *argp; // Arbitary void pointer CRYPTO_EX_free *free_func; }; @@ -179,7 +179,7 @@ int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val) { n = sk_void_num(ad->sk); - /* Add NULL values until the stack is long enough. */ + // Add NULL values until the stack is long enough. for (i = n; i <= index; i++) { if (!sk_void_push(ad->sk, NULL)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); @@ -198,19 +198,19 @@ void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx) { return sk_void_value(ad->sk, idx); } -/* get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any, - * for the given class. If there are some pointers, it sets |*out| to point to - * a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on - * success or zero on error. */ +// get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any, +// for the given class. If there are some pointers, it sets |*out| to point to +// a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on +// success or zero on error. static int get_func_pointers(STACK_OF(CRYPTO_EX_DATA_FUNCS) **out, CRYPTO_EX_DATA_CLASS *ex_data_class) { size_t n; *out = NULL; - /* CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a - * shallow copy of the list under lock and then use the structures without - * the lock held. */ + // CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a + // shallow copy of the list under lock and then use the structures without + // the lock held. CRYPTO_STATIC_MUTEX_lock_read(&ex_data_class->lock); n = sk_CRYPTO_EX_DATA_FUNCS_num(ex_data_class->meth); if (n > 0) { @@ -233,13 +233,13 @@ void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad) { void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad) { if (ad->sk == NULL) { - /* Nothing to do. */ + // Nothing to do. return; } STACK_OF(CRYPTO_EX_DATA_FUNCS) *func_pointers; if (!get_func_pointers(&func_pointers, ex_data_class)) { - /* TODO(davidben): This leaks memory on malloc error. */ + // TODO(davidben): This leaks memory on malloc error. return; } diff --git a/crypto/fipsmodule/aes/aes.c b/crypto/fipsmodule/aes/aes.c index c68a5d55..a988b395 100644 --- a/crypto/fipsmodule/aes/aes.c +++ b/crypto/fipsmodule/aes/aes.c @@ -59,16 +59,16 @@ #if defined(OPENSSL_NO_ASM) || \ (!defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) && !defined(OPENSSL_ARM)) -/* Te0[x] = S [x].[02, 01, 01, 03]; - * Te1[x] = S [x].[03, 02, 01, 01]; - * Te2[x] = S [x].[01, 03, 02, 01]; - * Te3[x] = S [x].[01, 01, 03, 02]; - * - * Td0[x] = Si[x].[0e, 09, 0d, 0b]; - * Td1[x] = Si[x].[0b, 0e, 09, 0d]; - * Td2[x] = Si[x].[0d, 0b, 0e, 09]; - * Td3[x] = Si[x].[09, 0d, 0b, 0e]; - * Td4[x] = Si[x].[01]; */ +// Te0[x] = S [x].[02, 01, 01, 03]; +// Te1[x] = S [x].[03, 02, 01, 01]; +// Te2[x] = S [x].[01, 03, 02, 01]; +// Te3[x] = S [x].[01, 01, 03, 02]; +// +// Td0[x] = Si[x].[0e, 09, 0d, 0b]; +// Td1[x] = Si[x].[0b, 0e, 09, 0d]; +// Td2[x] = Si[x].[0d, 0b, 0e, 09]; +// Td3[x] = Si[x].[09, 0d, 0b, 0e]; +// Td4[x] = Si[x].[01]; static const uint32_t Te0[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, @@ -531,7 +531,7 @@ static const uint8_t Td4[256] = { static const uint32_t rcon[] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, - /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ + // for 128-bit blocks, Rijndael never uses more than 10 rcon values }; int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { @@ -634,7 +634,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { int i, j, status; uint32_t temp; - /* first, start with an encryption schedule */ + // first, start with an encryption schedule status = AES_set_encrypt_key(key, bits, aeskey); if (status < 0) { return status; @@ -642,7 +642,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { rk = aeskey->rd_key; - /* invert the order of the round keys: */ + // invert the order of the round keys: for (i = 0, j = 4 * aeskey->rounds; i < j; i += 4, j -= 4) { temp = rk[i]; rk[i] = rk[j]; @@ -657,8 +657,8 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; } - /* apply the inverse MixColumn transform to all round keys but the first and - * the last: */ + // apply the inverse MixColumn transform to all round keys but the first and + // the last: for (i = 1; i < (int)aeskey->rounds; i++) { rk += 4; rk[0] = @@ -682,19 +682,19 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { uint32_t s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; -#endif /* ?FULL_UNROLL */ +#endif // ?FULL_UNROLL assert(in && out && key); rk = key->rd_key; - /* map byte array block to cipher state - * and add initial round key: */ + // map byte array block to cipher state + // and add initial round key: s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL - /* round 1: */ + // round 1: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[4]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -703,7 +703,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[6]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[7]; - /* round 2: */ + // round 2: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[8]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -712,7 +712,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[10]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; - /* round 3: */ + // round 3: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -721,7 +721,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[14]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; - /* round 4: */ + // round 4: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -730,7 +730,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[18]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; - /* round 5: */ + // round 5: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -739,7 +739,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[22]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; - /* round 6: */ + // round 6: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -748,7 +748,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[26]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; - /* round 7: */ + // round 7: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -757,7 +757,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[30]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; - /* round 8: */ + // round 8: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -766,7 +766,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[34]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; - /* round 9: */ + // round 9: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -776,7 +776,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; if (key->rounds > 10) { - /* round 10: */ + // round 10: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -785,7 +785,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[42]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; - /* round 11: */ + // round 11: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -795,7 +795,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; if (key->rounds > 12) { - /* round 12: */ + // round 12: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -804,7 +804,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[50]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; - /* round 13: */ + // round 13: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -816,10 +816,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { } } rk += key->rounds << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ +#else // !FULL_UNROLL + // Nr - 1 full rounds: r = key->rounds >> 1; for (;;) { t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ @@ -845,8 +843,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[(t2) & 0xff] ^ rk[3]; } -#endif /* ?FULL_UNROLL */ - /* apply last round and map cipher state to byte array block: */ +#endif // ?FULL_UNROLL + // apply last round and map cipher state to byte array block: s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^ rk[0]; @@ -870,19 +868,19 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { uint32_t s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; -#endif /* ?FULL_UNROLL */ +#endif // ?FULL_UNROLL assert(in && out && key); rk = key->rd_key; - /* map byte array block to cipher state - * and add initial round key: */ + // map byte array block to cipher state + // and add initial round key: s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL - /* round 1: */ + // round 1: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[4]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -891,7 +889,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[6]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[7]; - /* round 2: */ + // round 2: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[8]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -900,7 +898,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[10]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11]; - /* round 3: */ + // round 3: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -909,7 +907,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[14]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15]; - /* round 4: */ + // round 4: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -918,7 +916,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[18]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19]; - /* round 5: */ + // round 5: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -927,7 +925,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[22]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23]; - /* round 6: */ + // round 6: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -936,7 +934,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[26]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27]; - /* round 7: */ + // round 7: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -945,7 +943,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[30]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31]; - /* round 8: */ + // round 8: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -954,7 +952,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[34]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35]; - /* round 9: */ + // round 9: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -964,7 +962,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39]; if (key->rounds > 10) { - /* round 10: */ + // round 10: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -973,7 +971,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[42]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43]; - /* round 11: */ + // round 11: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -983,7 +981,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47]; if (key->rounds > 12) { - /* round 12: */ + // round 12: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -992,7 +990,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[50]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51]; - /* round 13: */ + // round 13: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -1004,10 +1002,8 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { } } rk += key->rounds << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ +#else // !FULL_UNROLL + // Nr - 1 full rounds: r = key->rounds >> 1; for (;;) { t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ @@ -1033,9 +1029,9 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[(t0) & 0xff] ^ rk[3]; } -#endif /* ?FULL_UNROLL */ - /* apply last round and - * map cipher state to byte array block: */ +#endif // ?FULL_UNROLL + // apply last round and + // map cipher state to byte array block: s0 = ((uint32_t)Td4[(t0 >> 24)] << 24) ^ ((uint32_t)Td4[(t3 >> 16) & 0xff] << 16) ^ ((uint32_t)Td4[(t2 >> 8) & 0xff] << 8) ^ @@ -1060,10 +1056,10 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { #else -/* In this case several functions are provided by asm code. However, one cannot - * control asm symbol visibility with command line flags and such so they are - * always hidden and wrapped by these C functions, which can be so - * controlled. */ +// In this case several functions are provided by asm code. However, one cannot +// control asm symbol visibility with command line flags and such so they are +// always hidden and wrapped by these C functions, which can be so +// controlled. void asm_AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { @@ -1101,4 +1097,4 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { } } -#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) */ +#endif // OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) diff --git a/crypto/fipsmodule/aes/internal.h b/crypto/fipsmodule/aes/internal.h index 01cff846..45db9eec 100644 --- a/crypto/fipsmodule/aes/internal.h +++ b/crypto/fipsmodule/aes/internal.h @@ -30,7 +30,7 @@ extern "C" { static int hwaes_capable(void) { return CRYPTO_is_ARMv8_AES_capable(); } -#endif /* !NO_ASM && (AES || AARCH64) */ +#endif // !NO_ASM && (AES || AARCH64) #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE) #define HWAES @@ -38,7 +38,7 @@ static int hwaes_capable(void) { static int hwaes_capable(void) { return CRYPTO_is_PPC64LE_vcrypto_capable(); } -#endif /* !NO_ASM && PPC64LE */ +#endif // !NO_ASM && PPC64LE #if defined(HWAES) @@ -56,8 +56,8 @@ void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, #else -/* If HWAES isn't defined then we provide dummy functions for each of the hwaes - * functions. */ +// If HWAES isn't defined then we provide dummy functions for each of the hwaes +// functions. static int hwaes_capable(void) { return 0; } static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, @@ -91,10 +91,10 @@ static void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, abort(); } -#endif /* !HWAES */ +#endif // !HWAES #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_AES_INTERNAL_H */ +#endif // OPENSSL_HEADER_AES_INTERNAL_H diff --git a/crypto/fipsmodule/aes/key_wrap.c b/crypto/fipsmodule/aes/key_wrap.c index 73de17fa..feee0c72 100644 --- a/crypto/fipsmodule/aes/key_wrap.c +++ b/crypto/fipsmodule/aes/key_wrap.c @@ -56,7 +56,7 @@ #include "../../internal.h" -/* kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. */ +// kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. static const uint8_t kDefaultIV[] = { 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, }; @@ -65,7 +65,7 @@ static const unsigned kBound = 6; int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { - /* See RFC 3394, section 2.2.1. */ + // See RFC 3394, section 2.2.1. if (in_len > INT_MAX - 8 || in_len < 8 || in_len % 8 != 0) { return -1; @@ -101,7 +101,7 @@ int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { - /* See RFC 3394, section 2.2.2. */ + // See RFC 3394, section 2.2.2. if (in_len > INT_MAX || in_len < 16 || in_len % 8 != 0) { return -1; diff --git a/crypto/fipsmodule/aes/mode_wrappers.c b/crypto/fipsmodule/aes/mode_wrappers.c index 4929920f..34514db5 100644 --- a/crypto/fipsmodule/aes/mode_wrappers.c +++ b/crypto/fipsmodule/aes/mode_wrappers.c @@ -92,7 +92,7 @@ void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, asm_AES_cbc_encrypt(in, out, len, key, ivec, enc); } -#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */ +#endif // OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int *num) { diff --git a/crypto/fipsmodule/bcm.c b/crypto/fipsmodule/bcm.c index c6ea796e..b506b43e 100644 --- a/crypto/fipsmodule/bcm.c +++ b/crypto/fipsmodule/bcm.c @@ -13,7 +13,7 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_GNU_SOURCE) -#define _GNU_SOURCE /* needed for syscall() on Linux. */ +#define _GNU_SOURCE // needed for syscall() on Linux. #endif #include @@ -145,7 +145,7 @@ static RSA *self_test_rsa_key(void) { 0xa7, 0x10, 0x93, 0x43, 0x53, 0x4e, 0xe3, 0x16, 0x73, 0x55, 0xce, 0xf2, 0x94, 0xc0, 0xbe, 0xb3, }; - static const uint8_t kE[] = {0x01, 0x00, 0x01}; /* 65537 */ + static const uint8_t kE[] = {0x01, 0x00, 0x01}; // 65537 static const uint8_t kD[] = { 0x2f, 0x2c, 0x1e, 0xd2, 0x3d, 0x2c, 0xb1, 0x9b, 0x21, 0x02, 0xce, 0xb8, 0x95, 0x5f, 0x4f, 0xd9, 0x21, 0x38, 0x11, 0x36, 0xb0, 0x9a, 0x36, 0xab, @@ -288,8 +288,8 @@ static EC_KEY *self_test_ecdsa_key(void) { } #if !defined(OPENSSL_ASAN) -/* These symbols are filled in by delocate.go. They point to the start and end - * of the module, and the location of the integrity hash, respectively. */ +// These symbols are filled in by delocate.go. They point to the start and end +// of the module, and the location of the integrity hash, respectively. extern const uint8_t BORINGSSL_bcm_text_start[]; extern const uint8_t BORINGSSL_bcm_text_end[]; extern const uint8_t BORINGSSL_bcm_text_hash[]; @@ -300,8 +300,8 @@ BORINGSSL_bcm_power_on_self_test(void) { CRYPTO_library_init(); #if !defined(OPENSSL_ASAN) - /* Integrity tests cannot run under ASAN because it involves reading the full - * .text section, which triggers the global-buffer overflow detection. */ + // Integrity tests cannot run under ASAN because it involves reading the full + // .text section, which triggers the global-buffer overflow detection. const uint8_t *const start = BORINGSSL_bcm_text_start; const uint8_t *const end = BORINGSSL_bcm_text_end; @@ -478,7 +478,7 @@ BORINGSSL_bcm_power_on_self_test(void) { uint8_t aes_iv[16]; uint8_t output[256]; - /* AES-CBC Encryption KAT */ + // AES-CBC Encryption KAT memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_encrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { goto err; @@ -490,7 +490,7 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* AES-CBC Decryption KAT */ + // AES-CBC Decryption KAT memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_decrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { goto err; @@ -511,7 +511,7 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* AES-GCM Encryption KAT */ + // AES-GCM Encryption KAT if (!EVP_AEAD_CTX_seal(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kPlaintext, sizeof(kPlaintext), NULL, 0) || @@ -520,7 +520,7 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* AES-GCM Decryption KAT */ + // AES-GCM Decryption KAT if (!EVP_AEAD_CTX_open(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kAESGCMCiphertext, sizeof(kAESGCMCiphertext), NULL, @@ -538,7 +538,7 @@ BORINGSSL_bcm_power_on_self_test(void) { DES_set_key(&kDESKey2, &des2); DES_set_key(&kDESKey3, &des3); - /* 3DES Encryption KAT */ + // 3DES Encryption KAT memcpy(&des_iv, &kDESIV, sizeof(des_iv)); DES_ede3_cbc_encrypt(kPlaintext, output, sizeof(kPlaintext), &des1, &des2, &des3, &des_iv, DES_ENCRYPT); @@ -547,7 +547,7 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* 3DES Decryption KAT */ + // 3DES Decryption KAT memcpy(&des_iv, &kDESIV, sizeof(des_iv)); DES_ede3_cbc_encrypt(kDESCiphertext, output, sizeof(kDESCiphertext), &des1, &des2, &des3, &des_iv, DES_DECRYPT); @@ -556,21 +556,21 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* SHA-1 KAT */ + // SHA-1 KAT SHA1(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA1, output, sizeof(kPlaintextSHA1), "SHA-1 KAT")) { goto err; } - /* SHA-256 KAT */ + // SHA-256 KAT SHA256(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA256, output, sizeof(kPlaintextSHA256), "SHA-256 KAT")) { goto err; } - /* SHA-512 KAT */ + // SHA-512 KAT SHA512(kPlaintext, sizeof(kPlaintext), output); if (!check_test(kPlaintextSHA512, output, sizeof(kPlaintextSHA512), "SHA-512 KAT")) { @@ -583,11 +583,11 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* RSA Sign KAT */ + // RSA Sign KAT unsigned sig_len; - /* Disable blinding for the power-on tests because it's not needed and - * triggers an entropy draw. */ + // Disable blinding for the power-on tests because it's not needed and + // triggers an entropy draw. rsa_key->flags |= RSA_FLAG_NO_BLINDING; if (!RSA_sign(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), output, @@ -597,7 +597,7 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* RSA Verify KAT */ + // RSA Verify KAT if (!RSA_verify(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), kRSASignature, sizeof(kRSASignature), rsa_key)) { printf("RSA Verify KAT failed.\n"); @@ -612,9 +612,9 @@ BORINGSSL_bcm_power_on_self_test(void) { goto err; } - /* ECDSA Sign/Verify PWCT */ + // ECDSA Sign/Verify PWCT - /* The 'k' value for ECDSA is fixed to avoid an entropy draw. */ + // The 'k' value for ECDSA is fixed to avoid an entropy draw. ec_key->fixed_k = BN_new(); if (ec_key->fixed_k == NULL || !BN_set_word(ec_key->fixed_k, 42)) { @@ -641,7 +641,7 @@ BORINGSSL_bcm_power_on_self_test(void) { ECDSA_SIG_free(sig); EC_KEY_free(ec_key); - /* DBRG KAT */ + // DBRG KAT CTR_DRBG_STATE drbg; if (!CTR_DRBG_init(&drbg, kDRBGEntropy, kDRBGPersonalization, sizeof(kDRBGPersonalization)) || @@ -676,4 +676,4 @@ void BORINGSSL_FIPS_abort(void) { exit(1); } } -#endif /* BORINGSSL_FIPS */ +#endif // BORINGSSL_FIPS diff --git a/crypto/fipsmodule/bn/add.c b/crypto/fipsmodule/bn/add.c index 5848543b..bbe275ed 100644 --- a/crypto/fipsmodule/bn/add.c +++ b/crypto/fipsmodule/bn/add.c @@ -68,20 +68,19 @@ int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { const BIGNUM *tmp; int a_neg = a->neg, ret; - /* a + b a+b - * a + -b a-b - * -a + b b-a - * -a + -b -(a+b) - */ + // a + b a+b + // a + -b a-b + // -a + b b-a + // -a + -b -(a+b) if (a_neg ^ b->neg) { - /* only one is negative */ + // only one is negative if (a_neg) { tmp = a; a = b; b = tmp; } - /* we are now a - b */ + // we are now a - b if (BN_ucmp(a, b) < 0) { if (!BN_usub(r, b, a)) { return 0; @@ -142,7 +141,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { } } if (carry) { - /* carry != 0 => dif == 0 */ + // carry != 0 => dif == 0 *rp = 1; r->top++; } @@ -150,7 +149,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { if (dif && rp != ap) { while (dif--) { - /* copy remaining words if ap != rp */ + // copy remaining words if ap != rp *(rp++) = *(ap++); } } @@ -165,17 +164,17 @@ int BN_add_word(BIGNUM *a, BN_ULONG w) { w &= BN_MASK2; - /* degenerate case: w is zero */ + // degenerate case: w is zero if (!w) { return 1; } - /* degenerate case: a is zero */ + // degenerate case: a is zero if (BN_is_zero(a)) { return BN_set_word(a, w); } - /* handle 'a' when negative */ + // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_sub_word(a, w); @@ -206,11 +205,10 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int add = 0, neg = 0; const BIGNUM *tmp; - /* a - b a-b - * a - -b a+b - * -a - b -(a+b) - * -a - -b b-a - */ + // a - b a-b + // a - -b a+b + // -a - b -(a+b) + // -a - -b b-a if (a->neg) { if (b->neg) { tmp = a; @@ -236,7 +234,7 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { return 1; } - /* We are actually doing a - b :-) */ + // We are actually doing a - b :-) max = (a->top > b->top) ? a->top : b->top; if (!bn_wexpand(r, max)) { @@ -267,7 +265,7 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { min = b->top; dif = max - min; - if (dif < 0) /* hmm... should not be happening */ + if (dif < 0) // hmm... should not be happening { OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3); return 0; @@ -295,10 +293,10 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { *(rp++) = t1 & BN_MASK2; } - if (carry) /* subtracted */ + if (carry) // subtracted { if (!dif) { - /* error: a < b */ + // error: a < b return 0; } @@ -329,12 +327,12 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) { w &= BN_MASK2; - /* degenerate case: w is zero */ + // degenerate case: w is zero if (!w) { return 1; } - /* degenerate case: a is zero */ + // degenerate case: a is zero if (BN_is_zero(a)) { i = BN_set_word(a, w); if (i != 0) { @@ -343,7 +341,7 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) { return i; } - /* handle 'a' when negative */ + // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_add_word(a, w); diff --git a/crypto/fipsmodule/bn/asm/x86_64-gcc.c b/crypto/fipsmodule/bn/asm/x86_64-gcc.c index 72e7689c..a65b86f5 100644 --- a/crypto/fipsmodule/bn/asm/x86_64-gcc.c +++ b/crypto/fipsmodule/bn/asm/x86_64-gcc.c @@ -52,7 +52,7 @@ #include -/* TODO(davidben): Get this file working on Windows x64. */ +// TODO(davidben): Get this file working on Windows x64. #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__GNUC__) #include "../internal.h" @@ -63,11 +63,9 @@ #define asm __asm__ -/* - * "m"(a), "+m"(r) is the way to favor DirectPath µ-code; - * "g"(0) let the compiler to decide where does it - * want to keep the value of zero; - */ +// "m"(a), "+m"(r) is the way to favor DirectPath µ-code; +// "g"(0) let the compiler to decide where does it +// want to keep the value of zero; #define mul_add(r, a, word, carry) \ do { \ register BN_ULONG high, low; \ @@ -197,7 +195,7 @@ BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, } asm volatile ( - " subq %0,%0 \n" /* clear carry */ + " subq %0,%0 \n" // clear carry " jmp 1f \n" ".p2align 4 \n" "1:" @@ -224,7 +222,7 @@ BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, } asm volatile ( - " subq %0,%0 \n" /* clear borrow */ + " subq %0,%0 \n" // clear borrow " jmp 1f \n" ".p2align 4 \n" "1:" @@ -241,14 +239,13 @@ BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, return ret & 1; } -/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ -/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ -/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ -/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) - */ +// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) +// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) +// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) +// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) -/* Keep in mind that carrying into high part of multiplication result can not - * overflow, because it cannot be all-ones. */ +// Keep in mind that carrying into high part of multiplication result can not +// overflow, because it cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG t1, t2; \ @@ -539,4 +536,4 @@ void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) { #undef mul_add_c2 #undef sqr_add_c2 -#endif /* !NO_ASM && X86_64 && __GNUC__ */ +#endif // !NO_ASM && X86_64 && __GNUC__ diff --git a/crypto/fipsmodule/bn/bn.c b/crypto/fipsmodule/bn/bn.c index af093e0a..9ba1913d 100644 --- a/crypto/fipsmodule/bn/bn.c +++ b/crypto/fipsmodule/bn/bn.c @@ -175,8 +175,8 @@ DEFINE_METHOD_FUNCTION(BIGNUM, BN_value_one) { out->flags = BN_FLG_STATIC_DATA; } -/* BN_num_bits_word returns the minimum number of bits needed to represent the - * value in |l|. */ +// BN_num_bits_word returns the minimum number of bits needed to represent the +// value in |l|. unsigned BN_num_bits_word(BN_ULONG l) { static const unsigned char bits[256] = { 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, @@ -290,7 +290,7 @@ int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { return 0; } OPENSSL_memmove(bn->d, words, num * sizeof(BN_ULONG)); - /* |bn_wexpand| verified that |num| isn't too large. */ + // |bn_wexpand| verified that |num| isn't too large. bn->top = (int)num; bn_correct_top(bn); bn->neg = 0; diff --git a/crypto/fipsmodule/bn/bn_test.cc b/crypto/fipsmodule/bn/bn_test.cc index 3cb5f75c..fe03e5fa 100644 --- a/crypto/fipsmodule/bn/bn_test.cc +++ b/crypto/fipsmodule/bn/bn_test.cc @@ -67,9 +67,9 @@ * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems * Laboratories. */ -/* Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are - * unavailable in C++ unless some macros are defined. C++11 overruled this - * decision, but older Android NDKs still require it. */ +// Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are +// unavailable in C++ unless some macros are defined. C++11 overruled this +// decision, but older Android NDKs still require it. #if !defined(__STDC_CONSTANT_MACROS) #define __STDC_CONSTANT_MACROS #endif diff --git a/crypto/fipsmodule/bn/bytes.c b/crypto/fipsmodule/bn/bytes.c index 0988870d..328d56e7 100644 --- a/crypto/fipsmodule/bn/bytes.c +++ b/crypto/fipsmodule/bn/bytes.c @@ -90,8 +90,8 @@ BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { return NULL; } - /* |bn_wexpand| must check bounds on |num_words| to write it into - * |ret->dmax|. */ + // |bn_wexpand| must check bounds on |num_words| to write it into + // |ret->dmax|. assert(num_words <= INT_MAX); ret->top = (int)num_words; ret->neg = 0; @@ -105,8 +105,8 @@ BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { } } - /* need to call this due to clear byte at top if avoiding having the top bit - * set (-ve number) */ + // need to call this due to clear byte at top if avoiding having the top bit + // set (-ve number) bn_correct_top(ret); return ret; } @@ -128,7 +128,7 @@ BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) { return ret; } - /* Reserve enough space in |ret|. */ + // Reserve enough space in |ret|. size_t num_words = ((len - 1) / BN_BYTES) + 1; if (!bn_wexpand(ret, num_words)) { BN_free(bn); @@ -136,11 +136,11 @@ BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) { } ret->top = num_words; - /* Make sure the top bytes will be zeroed. */ + // Make sure the top bytes will be zeroed. ret->d[num_words - 1] = 0; - /* We only support little-endian platforms, so we can simply memcpy the - * internal representation. */ + // We only support little-endian platforms, so we can simply memcpy the + // internal representation. OPENSSL_memcpy(ret->d, in, len); bn_correct_top(ret); @@ -160,24 +160,24 @@ size_t BN_bn2bin(const BIGNUM *in, uint8_t *out) { } int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) { - /* If we don't have enough space, fail out. */ + // If we don't have enough space, fail out. size_t num_bytes = BN_num_bytes(in); if (len < num_bytes) { return 0; } - /* We only support little-endian platforms, so we can simply memcpy into the - * internal representation. */ + // We only support little-endian platforms, so we can simply memcpy into the + // internal representation. OPENSSL_memcpy(out, in->d, num_bytes); - /* Pad out the rest of the buffer with zeroes. */ + // Pad out the rest of the buffer with zeroes. OPENSSL_memset(out + num_bytes, 0, len - num_bytes); return 1; } -/* constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its - * behavior is undefined if |v| takes any other value. */ +// constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its +// behavior is undefined if |v| takes any other value. static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) { BN_ULONG mask = v; mask--; @@ -185,35 +185,35 @@ static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) { return (~mask & x) | (mask & y); } -/* constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y| - * must not have their MSBs set. */ +// constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y| +// must not have their MSBs set. static int constant_time_le_size_t(size_t x, size_t y) { return ((x - y - 1) >> (sizeof(size_t) * 8 - 1)) & 1; } -/* read_word_padded returns the |i|'th word of |in|, if it is not out of - * bounds. Otherwise, it returns 0. It does so without branches on the size of - * |in|, however it necessarily does not have the same memory access pattern. If - * the access would be out of bounds, it reads the last word of |in|. |in| must - * not be zero. */ +// read_word_padded returns the |i|'th word of |in|, if it is not out of +// bounds. Otherwise, it returns 0. It does so without branches on the size of +// |in|, however it necessarily does not have the same memory access pattern. If +// the access would be out of bounds, it reads the last word of |in|. |in| must +// not be zero. static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) { - /* Read |in->d[i]| if valid. Otherwise, read the last word. */ + // Read |in->d[i]| if valid. Otherwise, read the last word. BN_ULONG l = in->d[constant_time_select_ulong( constant_time_le_size_t(in->dmax, i), in->dmax - 1, i)]; - /* Clamp to zero if above |d->top|. */ + // Clamp to zero if above |d->top|. return constant_time_select_ulong(constant_time_le_size_t(in->top, i), 0, l); } int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) { - /* Special case for |in| = 0. Just branch as the probability is negligible. */ + // Special case for |in| = 0. Just branch as the probability is negligible. if (BN_is_zero(in)) { OPENSSL_memset(out, 0, len); return 1; } - /* Check if the integer is too big. This case can exit early in non-constant - * time. */ + // Check if the integer is too big. This case can exit early in non-constant + // time. if ((size_t)in->top > (len + (BN_BYTES - 1)) / BN_BYTES) { return 0; } @@ -224,13 +224,13 @@ int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) { } } - /* Write the bytes out one by one. Serialization is done without branching on - * the bits of |in| or on |in->top|, but if the routine would otherwise read - * out of bounds, the memory access pattern can't be fixed. However, for an - * RSA key of size a multiple of the word size, the probability of BN_BYTES - * leading zero octets is low. - * - * See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */ + // Write the bytes out one by one. Serialization is done without branching on + // the bits of |in| or on |in->top|, but if the routine would otherwise read + // out of bounds, the memory access pattern can't be fixed. However, for an + // RSA key of size a multiple of the word size, the probability of BN_BYTES + // leading zero octets is low. + // + // See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. size_t i = len; while (i--) { BN_ULONG l = read_word_padded(in, i / BN_BYTES); diff --git a/crypto/fipsmodule/bn/cmp.c b/crypto/fipsmodule/bn/cmp.c index 71c04658..78647073 100644 --- a/crypto/fipsmodule/bn/cmp.c +++ b/crypto/fipsmodule/bn/cmp.c @@ -159,14 +159,14 @@ int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { if (dl < 0) { for (i = dl; i < 0; i++) { if (b[n - i] != 0) { - return -1; /* a < b */ + return -1; // a < b } } } if (dl > 0) { for (i = dl; i > 0; i--) { if (a[n + i] != 0) { - return 1; /* a > b */ + return 1; // a > b } } } diff --git a/crypto/fipsmodule/bn/ctx.c b/crypto/fipsmodule/bn/ctx.c index 38197751..af50de93 100644 --- a/crypto/fipsmodule/bn/ctx.c +++ b/crypto/fipsmodule/bn/ctx.c @@ -62,24 +62,24 @@ #include "../../internal.h" -/* How many bignums are in each "pool item"; */ +// How many bignums are in each "pool item"; #define BN_CTX_POOL_SIZE 16 -/* The stack frame info is resizing, set a first-time expansion size; */ +// The stack frame info is resizing, set a first-time expansion size; #define BN_CTX_START_FRAMES 32 -/* A bundle of bignums that can be linked with other bundles */ +// A bundle of bignums that can be linked with other bundles typedef struct bignum_pool_item { - /* The bignum values */ + // The bignum values BIGNUM vals[BN_CTX_POOL_SIZE]; - /* Linked-list admin */ + // Linked-list admin struct bignum_pool_item *prev, *next; } BN_POOL_ITEM; typedef struct bignum_pool { - /* Linked-list admin */ + // Linked-list admin BN_POOL_ITEM *head, *current, *tail; - /* Stack depth and allocation size */ + // Stack depth and allocation size unsigned used, size; } BN_POOL; @@ -88,15 +88,14 @@ static void BN_POOL_finish(BN_POOL *); static BIGNUM *BN_POOL_get(BN_POOL *); static void BN_POOL_release(BN_POOL *, unsigned int); -/************/ -/* BN_STACK */ -/************/ -/* A wrapper to manage the "stack frames" */ +// BN_STACK + +// A wrapper to manage the "stack frames" typedef struct bignum_ctx_stack { - /* Array of indexes into the bignum stack */ + // Array of indexes into the bignum stack unsigned int *indexes; - /* Number of stack frames, and the size of the allocated array */ + // Number of stack frames, and the size of the allocated array unsigned int depth, size; } BN_STACK; @@ -105,21 +104,20 @@ static void BN_STACK_finish(BN_STACK *); static int BN_STACK_push(BN_STACK *, unsigned int); static unsigned int BN_STACK_pop(BN_STACK *); -/**********/ -/* BN_CTX */ -/**********/ -/* The opaque BN_CTX type */ +// BN_CTX + +// The opaque BN_CTX type struct bignum_ctx { - /* The bignum bundles */ + // The bignum bundles BN_POOL pool; - /* The "stack frames", if you will */ + // The "stack frames", if you will BN_STACK stack; - /* The number of bignums currently assigned */ + // The number of bignums currently assigned unsigned int used; - /* Depth of stack overflow */ + // Depth of stack overflow int err_stack; - /* Block "gets" until an "end" (compatibility behaviour) */ + // Block "gets" until an "end" (compatibility behaviour) int too_many; }; @@ -130,7 +128,7 @@ BN_CTX *BN_CTX_new(void) { return NULL; } - /* Initialise the structure */ + // Initialise the structure BN_POOL_init(&ret->pool); BN_STACK_init(&ret->stack); ret->used = 0; @@ -150,11 +148,11 @@ void BN_CTX_free(BN_CTX *ctx) { } void BN_CTX_start(BN_CTX *ctx) { - /* If we're already overflowing ... */ + // If we're already overflowing ... if (ctx->err_stack || ctx->too_many) { ctx->err_stack++; } else if (!BN_STACK_push(&ctx->stack, ctx->used)) { - /* (Try to) get a new frame pointer */ + // (Try to) get a new frame pointer OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); ctx->err_stack++; } @@ -168,14 +166,14 @@ BIGNUM *BN_CTX_get(BN_CTX *ctx) { ret = BN_POOL_get(&ctx->pool); if (ret == NULL) { - /* Setting too_many prevents repeated "get" attempts from - * cluttering the error stack. */ + // Setting too_many prevents repeated "get" attempts from + // cluttering the error stack. ctx->too_many = 1; OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); return NULL; } - /* OK, make sure the returned bignum is "zero" */ + // OK, make sure the returned bignum is "zero" BN_zero(ret); ctx->used++; return ret; @@ -186,20 +184,19 @@ void BN_CTX_end(BN_CTX *ctx) { ctx->err_stack--; } else { unsigned int fp = BN_STACK_pop(&ctx->stack); - /* Does this stack frame have anything to release? */ + // Does this stack frame have anything to release? if (fp < ctx->used) { BN_POOL_release(&ctx->pool, ctx->used - fp); } ctx->used = fp; - /* Unjam "too_many" in case "get" had failed */ + // Unjam "too_many" in case "get" had failed ctx->too_many = 0; } } -/************/ -/* BN_STACK */ -/************/ + +// BN_STACK static void BN_STACK_init(BN_STACK *st) { st->indexes = NULL; @@ -212,7 +209,7 @@ static void BN_STACK_finish(BN_STACK *st) { static int BN_STACK_push(BN_STACK *st, unsigned int idx) { if (st->depth == st->size) { - /* Need to expand */ + // Need to expand unsigned int newsize = (st->size ? (st->size * 3 / 2) : BN_CTX_START_FRAMES); unsigned int *newitems = OPENSSL_malloc(newsize * sizeof(unsigned int)); @@ -235,6 +232,7 @@ static unsigned int BN_STACK_pop(BN_STACK *st) { return st->indexes[--(st->depth)]; } + static void BN_POOL_init(BN_POOL *p) { p->head = p->current = p->tail = NULL; p->used = p->size = 0; @@ -259,14 +257,14 @@ static BIGNUM *BN_POOL_get(BN_POOL *p) { return NULL; } - /* Initialise the structure */ + // Initialise the structure for (size_t i = 0; i < BN_CTX_POOL_SIZE; i++) { BN_init(&item->vals[i]); } item->prev = p->tail; item->next = NULL; - /* Link it in */ + // Link it in if (!p->head) { p->head = p->current = p->tail = item; } else { @@ -277,7 +275,7 @@ static BIGNUM *BN_POOL_get(BN_POOL *p) { p->size += BN_CTX_POOL_SIZE; p->used++; - /* Return the first bignum from the new pool */ + // Return the first bignum from the new pool return item->vals; } diff --git a/crypto/fipsmodule/bn/div.c b/crypto/fipsmodule/bn/div.c index dae5656c..1bcff507 100644 --- a/crypto/fipsmodule/bn/div.c +++ b/crypto/fipsmodule/bn/div.c @@ -65,8 +65,8 @@ #if !defined(BN_ULLONG) -/* bn_div_words divides a double-width |h|,|l| by |d| and returns the result, - * which must fit in a |BN_ULONG|. */ +// bn_div_words divides a double-width |h|,|l| by |d| and returns the result, +// which must fit in a |BN_ULONG|. static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { BN_ULONG dh, dl, q, ret = 0, th, tl, t; int i, count = 2; @@ -135,26 +135,26 @@ static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { ret |= q; return ret; } -#endif /* !defined(BN_ULLONG) */ +#endif // !defined(BN_ULLONG) static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) { - /* GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when - * the |BN_ULLONG|-based C code is used. - * - * GCC bugs: - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668 - * - * Clang bugs: - * * https://llvm.org/bugs/show_bug.cgi?id=6397 - * * https://llvm.org/bugs/show_bug.cgi?id=12418 - * - * These issues aren't specific to x86 and x86_64, so it might be worthwhile - * to add more assembly language implementations. */ + // GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when + // the |BN_ULLONG|-based C code is used. + // + // GCC bugs: + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668 + // + // Clang bugs: + // * https://llvm.org/bugs/show_bug.cgi?id=6397 + // * https://llvm.org/bugs/show_bug.cgi?id=12418 + // + // These issues aren't specific to x86 and x86_64, so it might be worthwhile + // to add more assembly language implementations. #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__GNUC__) __asm__ volatile ( "divl %4" @@ -178,17 +178,17 @@ static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, #endif } -/* BN_div computes dv := num / divisor, rounding towards - * zero, and sets up rm such that dv*divisor + rm = num holds. - * Thus: - * dv->neg == num->neg ^ divisor->neg (unless the result is zero) - * rm->neg == num->neg (unless the remainder is zero) - * If 'dv' or 'rm' is NULL, the respective value is not returned. - * - * This was specifically designed to contain fewer branches that may leak - * sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL - * and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and - * Jean-Pierre Seifert. */ +// BN_div computes dv := num / divisor, rounding towards +// zero, and sets up rm such that dv*divisor + rm = num holds. +// Thus: +// dv->neg == num->neg ^ divisor->neg (unless the result is zero) +// rm->neg == num->neg (unless the remainder is zero) +// If 'dv' or 'rm' is NULL, the respective value is not returned. +// +// This was specifically designed to contain fewer branches that may leak +// sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL +// and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and +// Jean-Pierre Seifert. int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, BN_CTX *ctx) { int norm_shift, i, loop; @@ -197,8 +197,8 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, BN_ULONG d0, d1; int num_n, div_n; - /* Invalid zero-padding would have particularly bad consequences - * so don't just rely on bn_check_top() here */ + // Invalid zero-padding would have particularly bad consequences + // so don't just rely on bn_check_top() here if ((num->top > 0 && num->d[num->top - 1] == 0) || (divisor->top > 0 && divisor->d[divisor->top - 1] == 0)) { OPENSSL_PUT_ERROR(BN, BN_R_NOT_INITIALIZED); @@ -223,7 +223,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, goto err; } - /* First we normalise the numbers */ + // First we normalise the numbers norm_shift = BN_BITS2 - ((BN_num_bits(divisor)) % BN_BITS2); if (!(BN_lshift(sdiv, divisor, norm_shift))) { goto err; @@ -235,9 +235,9 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, } snum->neg = 0; - /* Since we don't want to have special-case logic for the case where snum is - * larger than sdiv, we pad snum with enough zeroes without changing its - * value. */ + // Since we don't want to have special-case logic for the case where snum is + // larger than sdiv, we pad snum with enough zeroes without changing its + // value. if (snum->top <= sdiv->top + 1) { if (!bn_wexpand(snum, sdiv->top + 2)) { goto err; @@ -257,24 +257,24 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, div_n = sdiv->top; num_n = snum->top; loop = num_n - div_n; - /* Lets setup a 'window' into snum - * This is the part that corresponds to the current - * 'area' being divided */ + // Lets setup a 'window' into snum + // This is the part that corresponds to the current + // 'area' being divided wnum.neg = 0; wnum.d = &(snum->d[loop]); wnum.top = div_n; - /* only needed when BN_ucmp messes up the values between top and max */ - wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */ + // only needed when BN_ucmp messes up the values between top and max + wnum.dmax = snum->dmax - loop; // so we don't step out of bounds - /* Get the top 2 words of sdiv */ - /* div_n=sdiv->top; */ + // Get the top 2 words of sdiv + // div_n=sdiv->top; d0 = sdiv->d[div_n - 1]; d1 = (div_n == 1) ? 0 : sdiv->d[div_n - 2]; - /* pointer to the 'top' of snum */ + // pointer to the 'top' of snum wnump = &(snum->d[num_n - 1]); - /* Setup to 'res' */ + // Setup to 'res' res->neg = (num->neg ^ divisor->neg); if (!bn_wexpand(res, (loop + 1))) { goto err; @@ -282,13 +282,13 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, res->top = loop - 1; resp = &(res->d[loop - 1]); - /* space for temp */ + // space for temp if (!bn_wexpand(tmp, (div_n + 1))) { goto err; } - /* if res->top == 0 then clear the neg value otherwise decrease - * the resp pointer */ + // if res->top == 0 then clear the neg value otherwise decrease + // the resp pointer if (res->top == 0) { res->neg = 0; } else { @@ -297,8 +297,8 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, for (i = 0; i < loop - 1; i++, wnump--, resp--) { BN_ULONG q, l0; - /* the first part of the loop uses the top two words of snum and sdiv to - * calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv */ + // the first part of the loop uses the top two words of snum and sdiv to + // calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv BN_ULONG n0, n1, rem = 0; n0 = wnump[0]; @@ -306,7 +306,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, if (n0 == d0) { q = BN_MASK2; } else { - /* n0 < d0 */ + // n0 < d0 bn_div_rem_words(&q, &rem, n0, n1, d0); #ifdef BN_ULLONG @@ -318,11 +318,11 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, q--; rem += d0; if (rem < d0) { - break; /* don't let rem overflow */ + break; // don't let rem overflow } t2 -= d1; } -#else /* !BN_ULLONG */ +#else // !BN_ULLONG BN_ULONG t2l, t2h; BN_UMULT_LOHI(t2l, t2h, d1, q); for (;;) { @@ -332,43 +332,41 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, q--; rem += d0; if (rem < d0) { - break; /* don't let rem overflow */ + break; // don't let rem overflow } if (t2l < d1) { t2h--; } t2l -= d1; } -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG } l0 = bn_mul_words(tmp->d, sdiv->d, div_n, q); tmp->d[div_n] = l0; wnum.d--; - /* ingore top values of the bignums just sub the two - * BN_ULONG arrays with bn_sub_words */ + // ingore top values of the bignums just sub the two + // BN_ULONG arrays with bn_sub_words if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n + 1)) { - /* Note: As we have considered only the leading - * two BN_ULONGs in the calculation of q, sdiv * q - * might be greater than wnum (but then (q-1) * sdiv - * is less or equal than wnum) - */ + // Note: As we have considered only the leading + // two BN_ULONGs in the calculation of q, sdiv * q + // might be greater than wnum (but then (q-1) * sdiv + // is less or equal than wnum) q--; if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n)) { - /* we can't have an overflow here (assuming - * that q != 0, but if q == 0 then tmp is - * zero anyway) */ + // we can't have an overflow here (assuming + // that q != 0, but if q == 0 then tmp is + // zero anyway) (*wnump)++; } } - /* store part of the result */ + // store part of the result *resp = q; } bn_correct_top(snum); if (rm != NULL) { - /* Keep a copy of the neg flag in num because if rm==num - * BN_rshift() will overwrite it. - */ + // Keep a copy of the neg flag in num because if rm==num + // BN_rshift() will overwrite it. int neg = num->neg; if (!BN_rshift(rm, snum, norm_shift)) { goto err; @@ -394,7 +392,7 @@ int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) { return 1; } - /* now -|d| < r < 0, so we have to set r := r + |d|. */ + // now -|d| < r < 0, so we have to set r := r + |d|. return (d->neg ? BN_sub : BN_add)(r, r, d); } @@ -425,8 +423,8 @@ int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, return BN_nnmod(r, r, m, ctx); } -/* BN_mod_sub variant that may be used if both a and b are non-negative - * and less than m */ +// BN_mod_sub variant that may be used if both a and b are non-negative +// and less than m int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m) { if (!BN_sub(r, a, b)) { @@ -475,7 +473,7 @@ int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) { return 0; } - /* r->neg == 0, thus we don't need BN_nnmod */ + // r->neg == 0, thus we don't need BN_nnmod return BN_mod(r, r, m, ctx); } @@ -512,9 +510,9 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) { while (n > 0) { int max_shift; - /* 0 < r < m */ + // 0 < r < m max_shift = BN_num_bits(m) - BN_num_bits(r); - /* max_shift >= 0 */ + // max_shift >= 0 if (max_shift < 0) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); @@ -537,7 +535,7 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) { --n; } - /* BN_num_bits(r) <= BN_num_bits(m) */ + // BN_num_bits(r) <= BN_num_bits(m) if (BN_cmp(r, m) >= 0) { if (!BN_sub(r, r, m)) { return 0; @@ -574,7 +572,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { w &= BN_MASK2; if (!w) { - /* actually this an error (division by zero) */ + // actually this an error (division by zero) return (BN_ULONG) - 1; } @@ -582,7 +580,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { return 0; } - /* normalize input for |bn_div_rem_words|. */ + // normalize input for |bn_div_rem_words|. j = BN_BITS2 - BN_num_bits_word(w); w <<= j; if (!BN_lshift(a, a, j)) { @@ -623,8 +621,8 @@ BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) { } #ifndef BN_ULLONG - /* If |w| is too long and we don't have |BN_ULLONG| then we need to fall back - * to using |BN_div_word|. */ + // If |w| is too long and we don't have |BN_ULLONG| then we need to fall back + // to using |BN_div_word|. if (w > ((BN_ULONG)1 << BN_BITS4)) { BIGNUM *tmp = BN_dup(a); if (tmp == NULL) { @@ -656,27 +654,27 @@ int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { size_t num_words = 1 + ((e - 1) / BN_BITS2); - /* If |a| definitely has less than |e| bits, just BN_copy. */ + // If |a| definitely has less than |e| bits, just BN_copy. if ((size_t) a->top < num_words) { return BN_copy(r, a) != NULL; } - /* Otherwise, first make sure we have enough space in |r|. - * Note that this will fail if num_words > INT_MAX. */ + // Otherwise, first make sure we have enough space in |r|. + // Note that this will fail if num_words > INT_MAX. if (!bn_wexpand(r, num_words)) { return 0; } - /* Copy the content of |a| into |r|. */ + // Copy the content of |a| into |r|. OPENSSL_memcpy(r->d, a->d, num_words * sizeof(BN_ULONG)); - /* If |e| isn't word-aligned, we have to mask off some of our bits. */ + // If |e| isn't word-aligned, we have to mask off some of our bits. size_t top_word_exponent = e % (sizeof(BN_ULONG) * 8); if (top_word_exponent != 0) { r->d[num_words - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1; } - /* Fill in the remaining fields of |r|. */ + // Fill in the remaining fields of |r|. r->neg = a->neg; r->top = (int) num_words; bn_correct_top(r); @@ -688,41 +686,41 @@ int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { return 0; } - /* If the returned value was non-negative, we're done. */ + // If the returned value was non-negative, we're done. if (BN_is_zero(r) || !r->neg) { return 1; } size_t num_words = 1 + (e - 1) / BN_BITS2; - /* Expand |r| to the size of our modulus. */ + // Expand |r| to the size of our modulus. if (!bn_wexpand(r, num_words)) { return 0; } - /* Clear the upper words of |r|. */ + // Clear the upper words of |r|. OPENSSL_memset(&r->d[r->top], 0, (num_words - r->top) * BN_BYTES); - /* Set parameters of |r|. */ + // Set parameters of |r|. r->neg = 0; r->top = (int) num_words; - /* Now, invert every word. The idea here is that we want to compute 2^e-|x|, - * which is actually equivalent to the twos-complement representation of |x| - * in |e| bits, which is -x = ~x + 1. */ + // Now, invert every word. The idea here is that we want to compute 2^e-|x|, + // which is actually equivalent to the twos-complement representation of |x| + // in |e| bits, which is -x = ~x + 1. for (int i = 0; i < r->top; i++) { r->d[i] = ~r->d[i]; } - /* If our exponent doesn't span the top word, we have to mask the rest. */ + // If our exponent doesn't span the top word, we have to mask the rest. size_t top_word_exponent = e % BN_BITS2; if (top_word_exponent != 0) { r->d[r->top - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1; } - /* Keep the correct_top invariant for BN_add. */ + // Keep the correct_top invariant for BN_add. bn_correct_top(r); - /* Finally, add one, for the reason described above. */ + // Finally, add one, for the reason described above. return BN_add(r, r, BN_value_one()); } diff --git a/crypto/fipsmodule/bn/exponentiation.c b/crypto/fipsmodule/bn/exponentiation.c index 187b845c..ae78ff99 100644 --- a/crypto/fipsmodule/bn/exponentiation.c +++ b/crypto/fipsmodule/bn/exponentiation.c @@ -188,12 +188,12 @@ err: return ret; } -/* maximum precomputation table size for *variable* sliding windows */ +// maximum precomputation table size for *variable* sliding windows #define TABLE_SIZE 32 typedef struct bn_recp_ctx_st { - BIGNUM N; /* the divisor */ - BIGNUM Nr; /* the reciprocal */ + BIGNUM N; // the divisor + BIGNUM Nr; // the reciprocal int num_bits; int shift; int flags; @@ -227,10 +227,10 @@ static int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *d, BN_CTX *ctx) { return 1; } -/* len is the expected size of the result We actually calculate with an extra - * word of precision, so we can do faster division if the remainder is not - * required. - * r := 2^len / m */ +// len is the expected size of the result We actually calculate with an extra +// word of precision, so we can do faster division if the remainder is not +// required. +// r := 2^len / m static int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx) { int ret = -1; BIGNUM *t; @@ -289,34 +289,34 @@ static int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, return 1; } - /* We want the remainder - * Given input of ABCDEF / ab - * we need multiply ABCDEF by 3 digests of the reciprocal of ab */ + // We want the remainder + // Given input of ABCDEF / ab + // we need multiply ABCDEF by 3 digests of the reciprocal of ab - /* i := max(BN_num_bits(m), 2*BN_num_bits(N)) */ + // i := max(BN_num_bits(m), 2*BN_num_bits(N)) i = BN_num_bits(m); j = recp->num_bits << 1; if (j > i) { i = j; } - /* Nr := round(2^i / N) */ + // Nr := round(2^i / N) if (i != recp->shift) { recp->shift = BN_reciprocal(&(recp->Nr), &(recp->N), i, - ctx); /* BN_reciprocal returns i, or -1 for an error */ + ctx); // BN_reciprocal returns i, or -1 for an error } if (recp->shift == -1) { goto err; } - /* d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - - * BN_num_bits(N)))| - * = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - - * BN_num_bits(N)))| - * <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)| - * = |m/N| */ + // d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - + // BN_num_bits(N)))| + // = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - + // BN_num_bits(N)))| + // <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)| + // = |m/N| if (!BN_rshift(a, m, recp->num_bits)) { goto err; } @@ -383,7 +383,7 @@ static int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, } ca = a; } else { - ca = x; /* Just do the mod */ + ca = x; // Just do the mod } ret = BN_div_recp(NULL, r, ca, recp, ctx); @@ -393,29 +393,29 @@ err: return ret; } -/* BN_window_bits_for_exponent_size -- macro for sliding window mod_exp - * functions - * - * For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of - * multiplications is a constant plus on average - * - * 2^(w-1) + (b-w)/(w+1); - * - * here 2^(w-1) is for precomputing the table (we actually need entries only - * for windows that have the lowest bit set), and (b-w)/(w+1) is an - * approximation for the expected number of w-bit windows, not counting the - * first one. - * - * Thus we should use - * - * w >= 6 if b > 671 - * w = 5 if 671 > b > 239 - * w = 4 if 239 > b > 79 - * w = 3 if 79 > b > 23 - * w <= 2 if 23 > b - * - * (with draws in between). Very small exponents are often selected - * with low Hamming weight, so we use w = 1 for b <= 23. */ +// BN_window_bits_for_exponent_size -- macro for sliding window mod_exp +// functions +// +// For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of +// multiplications is a constant plus on average +// +// 2^(w-1) + (b-w)/(w+1); +// +// here 2^(w-1) is for precomputing the table (we actually need entries only +// for windows that have the lowest bit set), and (b-w)/(w+1) is an +// approximation for the expected number of w-bit windows, not counting the +// first one. +// +// Thus we should use +// +// w >= 6 if b > 671 +// w = 5 if 671 > b > 239 +// w = 4 if 239 > b > 79 +// w = 3 if 79 > b > 23 +// w <= 2 if 23 > b +// +// (with draws in between). Very small exponents are often selected +// with low Hamming weight, so we use w = 1 for b <= 23. #define BN_window_bits_for_exponent_size(b) \ ((b) > 671 ? 6 : \ (b) > 239 ? 5 : \ @@ -427,14 +427,14 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, int i, j, bits, ret = 0, wstart, window; int start = 1; BIGNUM *aa; - /* Table of variables obtained from 'ctx' */ + // Table of variables obtained from 'ctx' BIGNUM *val[TABLE_SIZE]; BN_RECP_CTX recp; bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(r); return 1; @@ -451,7 +451,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_RECP_CTX_init(&recp); if (m->neg) { - /* ignore sign of 'm' */ + // ignore sign of 'm' if (!BN_copy(aa, m)) { goto err; } @@ -466,7 +466,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } if (!BN_nnmod(val[0], a, m, ctx)) { - goto err; /* 1 */ + goto err; // 1 } if (BN_is_zero(val[0])) { BN_zero(r); @@ -477,7 +477,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) { - goto err; /* 2 */ + goto err; // 2 } j = 1 << (window - 1); for (i = 1; i < j; i++) { @@ -488,18 +488,18 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - start = 1; /* This is used to avoid multiplication etc - * when there is only the value '1' in the - * buffer. */ - wstart = bits - 1; /* The top bit of the window */ + start = 1; // This is used to avoid multiplication etc + // when there is only the value '1' in the + // buffer. + wstart = bits - 1; // The top bit of the window if (!BN_one(r)) { goto err; } for (;;) { - int wvalue; /* The 'value' of the window */ - int wend; /* The bottom bit of the window */ + int wvalue; // The 'value' of the window + int wend; // The bottom bit of the window if (BN_is_bit_set(p, wstart) == 0) { if (!start) { @@ -514,10 +514,10 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, continue; } - /* We now have wstart on a 'set' bit, we now need to work out - * how bit a window to do. To do this we need to scan - * forward until the last set bit before the end of the - * window */ + // We now have wstart on a 'set' bit, we now need to work out + // how bit a window to do. To do this we need to scan + // forward until the last set bit before the end of the + // window wvalue = 1; wend = 0; for (i = 1; i < window; i++) { @@ -531,9 +531,9 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - /* wend is the size of the current window */ + // wend is the size of the current window j = wend + 1; - /* add the 'bytes above' */ + // add the 'bytes above' if (!start) { for (i = 0; i < j; i++) { if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) { @@ -542,12 +542,12 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - /* wvalue will be an odd number < 2^window */ + // wvalue will be an odd number < 2^window if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) { goto err; } - /* move the 'window' down further */ + // move the 'window' down further wstart -= wend + 1; start = 0; if (wstart < 0) { @@ -577,7 +577,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, int start = 1; BIGNUM *d, *r; const BIGNUM *aa; - /* Table of variables obtained from 'ctx' */ + // Table of variables obtained from 'ctx' BIGNUM *val[TABLE_SIZE]; BN_MONT_CTX *new_mont = NULL; @@ -587,7 +587,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(rr); return 1; @@ -603,7 +603,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -627,13 +627,13 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } if (!BN_to_montgomery(val[0], aa, mont, ctx)) { - goto err; /* 1 */ + goto err; // 1 } window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx)) { - goto err; /* 2 */ + goto err; // 2 } j = 1 << (window - 1); for (i = 1; i < j; i++) { @@ -644,32 +644,32 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } } - start = 1; /* This is used to avoid multiplication etc - * when there is only the value '1' in the - * buffer. */ - wstart = bits - 1; /* The top bit of the window */ + start = 1; // This is used to avoid multiplication etc + // when there is only the value '1' in the + // buffer. + wstart = bits - 1; // The top bit of the window - j = m->top; /* borrow j */ + j = m->top; // borrow j if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { if (!bn_wexpand(r, j)) { goto err; } - /* 2^(top*BN_BITS2) - m */ + // 2^(top*BN_BITS2) - m r->d[0] = (0 - m->d[0]) & BN_MASK2; for (i = 1; i < j; i++) { r->d[i] = (~m->d[i]) & BN_MASK2; } r->top = j; - /* Upper words will be zero if the corresponding words of 'm' - * were 0xfff[...], so decrement r->top accordingly. */ + // Upper words will be zero if the corresponding words of 'm' + // were 0xfff[...], so decrement r->top accordingly. bn_correct_top(r); } else if (!BN_to_montgomery(r, BN_value_one(), mont, ctx)) { goto err; } for (;;) { - int wvalue; /* The 'value' of the window */ - int wend; /* The bottom bit of the window */ + int wvalue; // The 'value' of the window + int wend; // The bottom bit of the window if (BN_is_bit_set(p, wstart) == 0) { if (!start && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) { @@ -682,9 +682,9 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, continue; } - /* We now have wstart on a 'set' bit, we now need to work out how bit a - * window to do. To do this we need to scan forward until the last set bit - * before the end of the window */ + // We now have wstart on a 'set' bit, we now need to work out how bit a + // window to do. To do this we need to scan forward until the last set bit + // before the end of the window wvalue = 1; wend = 0; for (i = 1; i < window; i++) { @@ -698,9 +698,9 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } } - /* wend is the size of the current window */ + // wend is the size of the current window j = wend + 1; - /* add the 'bytes above' */ + // add the 'bytes above' if (!start) { for (i = 0; i < j; i++) { if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) { @@ -709,12 +709,12 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } } - /* wvalue will be an odd number < 2^window */ + // wvalue will be an odd number < 2^window if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx)) { goto err; } - /* move the 'window' down further */ + // move the 'window' down further wstart -= wend + 1; start = 0; if (wstart < 0) { @@ -733,10 +733,10 @@ err: return ret; } -/* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific - * layout so that accessing any of these table values shows the same access - * pattern as far as cache lines are concerned. The following functions are - * used to transfer a BIGNUM from/to that table. */ +// BN_mod_exp_mont_consttime() stores the precomputed powers in a specific +// layout so that accessing any of these table values shows the same access +// pattern as far as cache lines are concerned. The following functions are +// used to transfer a BIGNUM from/to that table. static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx, int window) { int i, j; @@ -744,7 +744,7 @@ static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx, BN_ULONG *table = (BN_ULONG *) buf; if (top > b->top) { - top = b->top; /* this works because 'buf' is explicitly zeroed */ + top = b->top; // this works because 'buf' is explicitly zeroed } for (i = 0, j = idx; i < top; i++, j += width) { @@ -778,8 +778,8 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, int xstride = 1 << (window - 2); BN_ULONG y0, y1, y2, y3; - i = idx >> (window - 2); /* equivalent of idx / xstride */ - idx &= xstride - 1; /* equivalent of idx % xstride */ + i = idx >> (window - 2); // equivalent of idx / xstride + idx &= xstride - 1; // equivalent of idx % xstride y0 = (BN_ULONG)0 - (constant_time_eq_int(i, 0) & 1); y1 = (BN_ULONG)0 - (constant_time_eq_int(i, 1) & 1); @@ -804,23 +804,23 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, return 1; } -/* BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache - * line width of the target processor is at least the following value. */ +// BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache +// line width of the target processor is at least the following value. #define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH (64) #define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK \ (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1) -/* Window sizes optimized for fixed window size modular exponentiation - * algorithm (BN_mod_exp_mont_consttime). - * - * To achieve the security goals of BN_mode_exp_mont_consttime, the maximum - * size of the window must not exceed - * log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH). - * - * Window size thresholds are defined for cache line sizes of 32 and 64, cache - * line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of - * 7 should only be used on processors that have a 128 byte or greater cache - * line size. */ +// Window sizes optimized for fixed window size modular exponentiation +// algorithm (BN_mod_exp_mont_consttime). +// +// To achieve the security goals of BN_mode_exp_mont_consttime, the maximum +// size of the window must not exceed +// log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH). +// +// Window size thresholds are defined for cache line sizes of 32 and 64, cache +// line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of +// 7 should only be used on processors that have a 128 byte or greater cache +// line size. #if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64 #define BN_window_bits_for_ctime_exponent_size(b) \ @@ -835,19 +835,18 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, #endif -/* Given a pointer value, compute the next address that is a cache line - * multiple. */ +// Given a pointer value, compute the next address that is a cache line +// multiple. #define MOD_EXP_CTIME_ALIGN(x_) \ ((unsigned char *)(x_) + \ (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - \ (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK)))) -/* This variant of BN_mod_exp_mont() uses fixed windows and the special - * precomputation memory layout to limit data-dependency to a minimum - * to protect secret exponents (cf. the hyper-threading timing attacks - * pointed out by Colin Percival, - * http://www.daemonology.net/hyperthreading-considered-harmful/) - */ +// This variant of BN_mod_exp_mont() uses fixed windows and the special +// precomputation memory layout to limit data-dependency to a minimum +// to protect secret exponents (cf. the hyper-threading timing attacks +// pointed out by Colin Percival, +// http://www.daemonology.net/hyperthreading-considered-harmful/) int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { @@ -871,7 +870,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(rr); return 1; @@ -879,7 +878,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, return BN_one(rr); } - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -898,9 +897,9 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #ifdef RSAZ_ENABLED - /* If the size of the operands allow it, perform the optimized - * RSAZ exponentiation. For further information see - * crypto/bn/rsaz_exp.c and accompanying assembly modules. */ + // If the size of the operands allow it, perform the optimized + // RSAZ exponentiation. For further information see + // crypto/bn/rsaz_exp.c and accompanying assembly modules. if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) && rsaz_avx2_eligible()) { if (!bn_wexpand(rr, 16)) { @@ -915,19 +914,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #endif - /* Get the window size to use with size of p. */ + // Get the window size to use with size of p. window = BN_window_bits_for_ctime_exponent_size(bits); #if defined(OPENSSL_BN_ASM_MONT5) if (window >= 5) { - window = 5; /* ~5% improvement for RSA2048 sign, and even for RSA4096 */ - /* reserve space for mont->N.d[] copy */ + window = 5; // ~5% improvement for RSA2048 sign, and even for RSA4096 + // reserve space for mont->N.d[] copy powerbufLen += top * sizeof(mont->N.d[0]); } #endif - /* Allocate a buffer large enough to hold all of the pre-computed - * powers of am, am itself and tmp. - */ + // Allocate a buffer large enough to hold all of the pre-computed + // powers of am, am itself and tmp. numPowers = 1 << window; powerbufLen += sizeof(m->d[0]) * @@ -953,7 +951,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #endif - /* lay down tmp and am right after powers table */ + // lay down tmp and am right after powers table tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); am.d = tmp.d + top; tmp.top = am.top = 0; @@ -961,10 +959,10 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, tmp.neg = am.neg = 0; tmp.flags = am.flags = BN_FLG_STATIC_DATA; -/* prepare a^0 in Montgomery domain */ -/* by Shay Gueron's suggestion */ +// prepare a^0 in Montgomery domain +// by Shay Gueron's suggestion if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { - /* 2^(top*BN_BITS2) - m */ + // 2^(top*BN_BITS2) - m tmp.d[0] = (0 - m->d[0]) & BN_MASK2; for (i = 1; i < top; i++) { tmp.d[i] = (~m->d[i]) & BN_MASK2; @@ -974,7 +972,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } - /* prepare a^1 in Montgomery domain */ + // prepare a^1 in Montgomery domain assert(!a->neg); assert(BN_ucmp(a, m) < 0); if (!BN_to_montgomery(&am, a, mont, ctx)) { @@ -982,18 +980,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #if defined(OPENSSL_BN_ASM_MONT5) - /* This optimization uses ideas from http://eprint.iacr.org/2011/239, - * specifically optimization of cache-timing attack countermeasures - * and pre-computation optimization. */ + // This optimization uses ideas from http://eprint.iacr.org/2011/239, + // specifically optimization of cache-timing attack countermeasures + // and pre-computation optimization. - /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as - * 512-bit RSA is hardly relevant, we omit it to spare size... */ + // Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as + // 512-bit RSA is hardly relevant, we omit it to spare size... if (window == 5 && top > 1) { const BN_ULONG *n0 = mont->n0; BN_ULONG *np; - /* BN_to_montgomery can contaminate words above .top - * [in BN_DEBUG[_DEBUG] build]... */ + // BN_to_montgomery can contaminate words above .top + // [in BN_DEBUG[_DEBUG] build]... for (i = am.top; i < top; i++) { am.d[i] = 0; } @@ -1001,7 +999,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, tmp.d[i] = 0; } - /* copy mont->N.d[] to improve cache locality */ + // copy mont->N.d[] to improve cache locality for (np = am.d + top, i = 0; i < top; i++) { np[i] = mont->N.d[i]; } @@ -1011,7 +1009,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2); - /* same as above, but uses squaring for 1/2 of operations */ + // same as above, but uses squaring for 1/2 of operations for (i = 4; i < 32; i *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, i); @@ -1042,13 +1040,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } bn_gather5(tmp.d, top, powerbuf, wvalue); - /* At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit - * that has not been read yet.) */ + // At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit + // that has not been read yet.) assert(bits >= -1 && (bits == -1 || bits % 5 == 4)); - /* Scan the exponent one window at a time starting from the most - * significant bits. - */ + // Scan the exponent one window at a time starting from the most + // significant bits. if (top & 7) { while (bits >= 0) { for (wvalue = 0, i = 0; i < 5; i++, bits--) { @@ -1066,16 +1063,16 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const uint8_t *p_bytes = (const uint8_t *)p->d; int max_bits = p->top * BN_BITS2; assert(bits < max_bits); - /* |p = 0| has been handled as a special case, so |max_bits| is at least - * one word. */ + // |p = 0| has been handled as a special case, so |max_bits| is at least + // one word. assert(max_bits >= 64); - /* If the first bit to be read lands in the last byte, unroll the first - * iteration to avoid reading past the bounds of |p->d|. (After the first - * iteration, we are guaranteed to be past the last byte.) Note |bits| - * here is the top bit, inclusive. */ + // If the first bit to be read lands in the last byte, unroll the first + // iteration to avoid reading past the bounds of |p->d|. (After the first + // iteration, we are guaranteed to be past the last byte.) Note |bits| + // here is the top bit, inclusive. if (bits - 4 >= max_bits - 8) { - /* Read five bits from |bits-4| through |bits|, inclusive. */ + // Read five bits from |bits-4| through |bits|, inclusive. wvalue = p_bytes[p->top * BN_BYTES - 1]; wvalue >>= (bits - 4) & 7; wvalue &= 0x1f; @@ -1083,7 +1080,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } while (bits >= 0) { - /* Read five bits from |bits-4| through |bits|, inclusive. */ + // Read five bits from |bits-4| through |bits|, inclusive. int first_bit = bits - 4; uint16_t val; OPENSSL_memcpy(&val, p_bytes + (first_bit >> 3), sizeof(val)); @@ -1101,7 +1098,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, if (!BN_copy(rr, &tmp)) { ret = 0; } - goto err; /* non-zero ret means it's not error */ + goto err; // non-zero ret means it's not error } } else #endif @@ -1111,18 +1108,17 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } - /* If the window size is greater than 1, then calculate - * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) - * (even powers could instead be computed as (a^(i/2))^2 - * to use the slight performance advantage of sqr over mul). - */ + // If the window size is greater than 1, then calculate + // val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) + // (even powers could instead be computed as (a^(i/2))^2 + // to use the slight performance advantage of sqr over mul). if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx) || !copy_to_prebuf(&tmp, top, powerbuf, 2, window)) { goto err; } for (i = 3; i < numPowers; i++) { - /* Calculate a^i = a^(i-1) * a */ + // Calculate a^i = a^(i-1) * a if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx) || !copy_to_prebuf(&tmp, top, powerbuf, i, window)) { goto err; @@ -1138,13 +1134,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } - /* Scan the exponent one window at a time starting from the most - * significant bits. - */ + // Scan the exponent one window at a time starting from the most + // significant bits. while (bits >= 0) { - wvalue = 0; /* The 'value' of the window */ + wvalue = 0; // The 'value' of the window - /* Scan the window, squaring the result as we go */ + // Scan the window, squaring the result as we go for (i = 0; i < window; i++, bits--) { if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx)) { goto err; @@ -1152,19 +1147,19 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } - /* Fetch the appropriate pre-computed value from the pre-buf */ + // Fetch the appropriate pre-computed value from the pre-buf if (!copy_from_prebuf(&am, top, powerbuf, wvalue, window)) { goto err; } - /* Multiply the result into the intermediate result */ + // Multiply the result into the intermediate result if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx)) { goto err; } } } - /* Convert the final result from montgomery to standard format */ + // Convert the final result from montgomery to standard format if (!BN_from_montgomery(rr, &tmp, mont, ctx)) { goto err; } @@ -1212,7 +1207,7 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1, int ret = 0; BN_MONT_CTX *new_mont = NULL; - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -1221,9 +1216,9 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1, mont = new_mont; } - /* BN_mod_mul_montgomery removes one Montgomery factor, so passing one - * Montgomery-encoded and one non-Montgomery-encoded value gives a - * non-Montgomery-encoded result. */ + // BN_mod_mul_montgomery removes one Montgomery factor, so passing one + // Montgomery-encoded and one non-Montgomery-encoded value gives a + // non-Montgomery-encoded result. if (!BN_mod_exp_mont(rr, a1, p1, m, ctx, mont) || !BN_mod_exp_mont(&tmp, a2, p2, m, ctx, mont) || !BN_to_montgomery(rr, rr, mont, ctx) || diff --git a/crypto/fipsmodule/bn/gcd.c b/crypto/fipsmodule/bn/gcd.c index 7c20b8e2..850d4467 100644 --- a/crypto/fipsmodule/bn/gcd.c +++ b/crypto/fipsmodule/bn/gcd.c @@ -118,9 +118,9 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { BIGNUM *t; int shifts = 0; - /* 0 <= b <= a */ + // 0 <= b <= a while (!BN_is_zero(b)) { - /* 0 < b <= a */ + // 0 < b <= a if (BN_is_odd(a)) { if (BN_is_odd(b)) { @@ -136,7 +136,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { b = t; } } else { - /* a odd - b even */ + // a odd - b even if (!BN_rshift1(b, b)) { goto err; } @@ -147,7 +147,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { } } } else { - /* a is even */ + // a is even if (BN_is_odd(b)) { if (!BN_rshift1(a, a)) { goto err; @@ -158,7 +158,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { b = t; } } else { - /* a even - b even */ + // a even - b even if (!BN_rshift1(a, a)) { goto err; } @@ -168,7 +168,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { shifts++; } } - /* 0 <= b <= a */ + // 0 <= b <= a } if (shifts) { @@ -224,7 +224,7 @@ err: return ret; } -/* solves ax == 1 (mod n) */ +// solves ax == 1 (mod n) static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); @@ -264,30 +264,29 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } A->neg = 0; sign = -1; - /* From B = a mod |n|, A = |n| it follows that - * - * 0 <= B < A, - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - */ - - /* Binary inversion algorithm; requires odd modulus. This is faster than the - * general algorithm if the modulus is sufficiently small (about 400 .. 500 - * bits on 32-bit systems, but much more on 64-bit systems) */ + // From B = a mod |n|, A = |n| it follows that + // + // 0 <= B < A, + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). + + // Binary inversion algorithm; requires odd modulus. This is faster than the + // general algorithm if the modulus is sufficiently small (about 400 .. 500 + // bits on 32-bit systems, but much more on 64-bit systems) int shift; while (!BN_is_zero(B)) { - /* 0 < B < |n|, - * 0 < A <= |n|, - * (1) -sign*X*a == B (mod |n|), - * (2) sign*Y*a == A (mod |n|) */ - - /* Now divide B by the maximum possible power of two in the integers, - * and divide X by the same value mod |n|. - * When we're done, (1) still holds. */ + // 0 < B < |n|, + // 0 < A <= |n|, + // (1) -sign*X*a == B (mod |n|), + // (2) sign*Y*a == A (mod |n|) + + // Now divide B by the maximum possible power of two in the integers, + // and divide X by the same value mod |n|. + // When we're done, (1) still holds. shift = 0; while (!BN_is_bit_set(B, shift)) { - /* note that 0 < B */ + // note that 0 < B shift++; if (BN_is_odd(X)) { @@ -295,7 +294,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } } - /* now X is even, so we can easily divide it by two */ + // now X is even, so we can easily divide it by two if (!BN_rshift1(X, X)) { goto err; } @@ -306,10 +305,10 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } } - /* Same for A and Y. Afterwards, (2) still holds. */ + // Same for A and Y. Afterwards, (2) still holds. shift = 0; while (!BN_is_bit_set(A, shift)) { - /* note that 0 < A */ + // note that 0 < A shift++; if (BN_is_odd(Y)) { @@ -317,7 +316,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } } - /* now Y is even */ + // now Y is even if (!BN_rshift1(Y, Y)) { goto err; } @@ -328,32 +327,32 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } } - /* We still have (1) and (2). - * Both A and B are odd. - * The following computations ensure that - * - * 0 <= B < |n|, - * 0 < A < |n|, - * (1) -sign*X*a == B (mod |n|), - * (2) sign*Y*a == A (mod |n|), - * - * and that either A or B is even in the next iteration. */ + // We still have (1) and (2). + // Both A and B are odd. + // The following computations ensure that + // + // 0 <= B < |n|, + // 0 < A < |n|, + // (1) -sign*X*a == B (mod |n|), + // (2) sign*Y*a == A (mod |n|), + // + // and that either A or B is even in the next iteration. if (BN_ucmp(B, A) >= 0) { - /* -sign*(X + Y)*a == B - A (mod |n|) */ + // -sign*(X + Y)*a == B - A (mod |n|) if (!BN_uadd(X, X, Y)) { goto err; } - /* NB: we could use BN_mod_add_quick(X, X, Y, n), but that - * actually makes the algorithm slower */ + // NB: we could use BN_mod_add_quick(X, X, Y, n), but that + // actually makes the algorithm slower if (!BN_usub(B, B, A)) { goto err; } } else { - /* sign*(X + Y)*a == A - B (mod |n|) */ + // sign*(X + Y)*a == A - B (mod |n|) if (!BN_uadd(Y, Y, X)) { goto err; } - /* as above, BN_mod_add_quick(Y, Y, X, n) would slow things down */ + // as above, BN_mod_add_quick(Y, Y, X, n) would slow things down if (!BN_usub(A, A, B)) { goto err; } @@ -366,20 +365,20 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } - /* The while loop (Euclid's algorithm) ends when - * A == gcd(a,n); - * we have - * sign*Y*a == A (mod |n|), - * where Y is non-negative. */ + // The while loop (Euclid's algorithm) ends when + // A == gcd(a,n); + // we have + // sign*Y*a == A (mod |n|), + // where Y is non-negative. if (sign < 0) { if (!BN_sub(Y, n, Y)) { goto err; } } - /* Now Y*a == A (mod |n|). */ + // Now Y*a == A (mod |n|). - /* Y*a == 1 (mod |n|) */ + // Y*a == 1 (mod |n|) if (!Y->neg && BN_ucmp(Y, n) < 0) { if (!BN_copy(R, Y)) { goto err; @@ -470,11 +469,11 @@ err: return ret; } -/* bn_mod_inverse_general is the general inversion algorithm that works for - * both even and odd |n|. It was specifically designed to contain fewer - * branches that may leak sensitive information; see "New Branch Prediction - * Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by - * Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. */ +// bn_mod_inverse_general is the general inversion algorithm that works for +// both even and odd |n|. It was specifically designed to contain fewer +// branches that may leak sensitive information; see "New Branch Prediction +// Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by +// Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { @@ -505,58 +504,53 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, A->neg = 0; sign = -1; - /* From B = a mod |n|, A = |n| it follows that - * - * 0 <= B < A, - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - */ + // From B = a mod |n|, A = |n| it follows that + // + // 0 <= B < A, + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). while (!BN_is_zero(B)) { BIGNUM *tmp; - /* - * 0 < B < A, - * (*) -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|) - */ + // 0 < B < A, + // (*) -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|) - /* (D, M) := (A/B, A%B) ... */ + // (D, M) := (A/B, A%B) ... if (!BN_div(D, M, A, B, ctx)) { goto err; } - /* Now - * A = D*B + M; - * thus we have - * (**) sign*Y*a == D*B + M (mod |n|). - */ + // Now + // A = D*B + M; + // thus we have + // (**) sign*Y*a == D*B + M (mod |n|). - tmp = A; /* keep the BIGNUM object, the value does not matter */ + tmp = A; // keep the BIGNUM object, the value does not matter - /* (A, B) := (B, A mod B) ... */ + // (A, B) := (B, A mod B) ... A = B; B = M; - /* ... so we have 0 <= B < A again */ - - /* Since the former M is now B and the former B is now A, - * (**) translates into - * sign*Y*a == D*A + B (mod |n|), - * i.e. - * sign*Y*a - D*A == B (mod |n|). - * Similarly, (*) translates into - * -sign*X*a == A (mod |n|). - * - * Thus, - * sign*Y*a + D*sign*X*a == B (mod |n|), - * i.e. - * sign*(Y + D*X)*a == B (mod |n|). - * - * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - * Note that X and Y stay non-negative all the time. - */ + // ... so we have 0 <= B < A again + + // Since the former M is now B and the former B is now A, + // (**) translates into + // sign*Y*a == D*A + B (mod |n|), + // i.e. + // sign*Y*a - D*A == B (mod |n|). + // Similarly, (*) translates into + // -sign*X*a == A (mod |n|). + // + // Thus, + // sign*Y*a + D*sign*X*a == B (mod |n|), + // i.e. + // sign*(Y + D*X)*a == B (mod |n|). + // + // So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). + // Note that X and Y stay non-negative all the time. if (!BN_mul(tmp, D, X, ctx)) { goto err; @@ -565,7 +559,7 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, goto err; } - M = Y; /* keep the BIGNUM object, the value does not matter */ + M = Y; // keep the BIGNUM object, the value does not matter Y = X; X = tmp; sign = -sign; @@ -577,22 +571,20 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, goto err; } - /* - * The while loop (Euclid's algorithm) ends when - * A == gcd(a,n); - * we have - * sign*Y*a == A (mod |n|), - * where Y is non-negative. - */ + // The while loop (Euclid's algorithm) ends when + // A == gcd(a,n); + // we have + // sign*Y*a == A (mod |n|), + // where Y is non-negative. if (sign < 0) { if (!BN_sub(Y, n, Y)) { goto err; } } - /* Now Y*a == A (mod |n|). */ + // Now Y*a == A (mod |n|). - /* Y*a == 1 (mod |n|) */ + // Y*a == 1 (mod |n|) if (!Y->neg && BN_ucmp(Y, n) < 0) { if (!BN_copy(R, Y)) { goto err; diff --git a/crypto/fipsmodule/bn/generic.c b/crypto/fipsmodule/bn/generic.c index 3d98689f..b70080f0 100644 --- a/crypto/fipsmodule/bn/generic.c +++ b/crypto/fipsmodule/bn/generic.c @@ -61,8 +61,8 @@ #include "internal.h" -/* This file has two other implementations: x86 assembly language in - * asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. */ +// This file has two other implementations: x86 assembly language in +// asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. #if defined(OPENSSL_NO_ASM) || \ !(defined(OPENSSL_X86) || (defined(OPENSSL_X86_64) && defined(__GNUC__))) @@ -122,7 +122,7 @@ BN_UMULT_LOHI(r0, r1, tmp, tmp); \ } while (0) -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) { @@ -242,7 +242,7 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return (BN_ULONG)ll; } -#else /* !BN_ULLONG */ +#else // !BN_ULLONG BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n) { @@ -299,7 +299,7 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return (BN_ULONG)c; } -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n) { @@ -356,15 +356,15 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return c; } -/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ -/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ -/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ -/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */ +// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) +// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) +// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) +// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) #ifdef BN_ULLONG -/* Keep in mind that additions to multiplication result can not overflow, - * because its high half cannot be all-ones. */ +// Keep in mind that additions to multiplication result can not overflow, +// because its high half cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG hi; \ @@ -415,8 +415,8 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, #else -/* Keep in mind that additions to hi can not overflow, because the high word of - * a multiplication result cannot be all-ones. */ +// Keep in mind that additions to hi can not overflow, because the high word of +// a multiplication result cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG ta = (a), tb = (b); \ @@ -456,7 +456,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2) -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) { BN_ULONG c1, c2, c3; diff --git a/crypto/fipsmodule/bn/internal.h b/crypto/fipsmodule/bn/internal.h index 092e759a..ecd7d6cb 100644 --- a/crypto/fipsmodule/bn/internal.h +++ b/crypto/fipsmodule/bn/internal.h @@ -141,7 +141,7 @@ extern "C" { #if defined(OPENSSL_64_BIT) #if !defined(_MSC_VER) -/* MSVC doesn't support two-word integers on 64-bit. */ +// MSVC doesn't support two-word integers on 64-bit. #define BN_ULLONG uint128_t #endif @@ -168,11 +168,11 @@ extern "C" { #define BN_MASK2l (0xffffUL) #define BN_MASK2h1 (0xffff8000UL) #define BN_MASK2h (0xffff0000UL) -/* On some 32-bit platforms, Montgomery multiplication is done using 64-bit - * arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| - * needs to be two words long. Only certain 32-bit platforms actually make use - * of n0[1] and shorter R value would suffice for the others. However, - * currently only the assembly files know which is which. */ +// On some 32-bit platforms, Montgomery multiplication is done using 64-bit +// arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| +// needs to be two words long. Only certain 32-bit platforms actually make use +// of n0[1] and shorter R value would suffice for the others. However, +// currently only the assembly files know which is which. #define BN_MONT_CTX_N0_LIMBS 2 #define BN_TBIT (0x80000000UL) #define BN_DEC_CONV (1000000000UL) @@ -195,21 +195,21 @@ extern "C" { #define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2) #endif -/* bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or - * until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. */ +// bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or +// until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. void bn_correct_top(BIGNUM *bn); -/* bn_wexpand ensures that |bn| has at least |words| works of space without - * altering its value. It returns one on success or zero on allocation - * failure. */ +// bn_wexpand ensures that |bn| has at least |words| works of space without +// altering its value. It returns one on success or zero on allocation +// failure. int bn_wexpand(BIGNUM *bn, size_t words); -/* bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather - * than a number of words. */ +// bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather +// than a number of words. int bn_expand(BIGNUM *bn, size_t bits); -/* bn_set_words sets |bn| to the value encoded in the |num| words in |words|, - * least significant word first. */ +// bn_set_words sets |bn| to the value encoded in the |num| words in |words|, +// least significant word first. int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); @@ -223,14 +223,14 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b); void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a); void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a); -/* bn_cmp_words returns a value less than, equal to or greater than zero if - * the, length |n|, array |a| is less than, equal to or greater than |b|. */ +// bn_cmp_words returns a value less than, equal to or greater than zero if +// the, length |n|, array |a| is less than, equal to or greater than |b|. int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n); -/* bn_cmp_words returns a value less than, equal to or greater than zero if the - * array |a| is less than, equal to or greater than |b|. The arrays can be of - * different lengths: |cl| gives the minimum of the two lengths and |dl| gives - * the length of |a| minus the length of |b|. */ +// bn_cmp_words returns a value less than, equal to or greater than zero if the +// array |a| is less than, equal to or greater than |b|. The arrays can be of +// different lengths: |cl| gives the minimum of the two lengths and |dl| gives +// the length of |a| minus the length of |b|. int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, @@ -247,25 +247,25 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n); #error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform." #endif -/* bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|, - * computed with Fermat's Little Theorem. It returns one on success and zero on - * error. If |mont_p| is NULL, one will be computed temporarily. */ +// bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|, +// computed with Fermat's Little Theorem. It returns one on success and zero on +// error. If |mont_p| is NULL, one will be computed temporarily. int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p); -/* bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses - * |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of - * protecting the exponent. */ +// bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses +// |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of +// protecting the exponent. int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p); -/* bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or - * -2 on error. */ +// bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or +// -2 on error. int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BN_INTERNAL_H */ +#endif // OPENSSL_HEADER_BN_INTERNAL_H diff --git a/crypto/fipsmodule/bn/jacobi.c b/crypto/fipsmodule/bn/jacobi.c index 93e8fd97..9c909bb2 100644 --- a/crypto/fipsmodule/bn/jacobi.c +++ b/crypto/fipsmodule/bn/jacobi.c @@ -57,24 +57,24 @@ #include "internal.h" -/* least significant word */ +// least significant word #define BN_lsw(n) (((n)->top == 0) ? (BN_ULONG) 0 : (n)->d[0]) int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { - /* In 'tab', only odd-indexed entries are relevant: - * For any odd BIGNUM n, - * tab[BN_lsw(n) & 7] - * is $(-1)^{(n^2-1)/8}$ (using TeX notation). - * Note that the sign of n does not matter. */ + // In 'tab', only odd-indexed entries are relevant: + // For any odd BIGNUM n, + // tab[BN_lsw(n) & 7] + // is $(-1)^{(n^2-1)/8}$ (using TeX notation). + // Note that the sign of n does not matter. static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1}; - /* The Jacobi symbol is only defined for odd modulus. */ + // The Jacobi symbol is only defined for odd modulus. if (!BN_is_odd(b)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return -2; } - /* Require b be positive. */ + // Require b be positive. if (BN_is_negative(b)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return -2; @@ -93,22 +93,22 @@ int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { goto end; } - /* Adapted from logic to compute the Kronecker symbol, originally implemented - * according to Henri Cohen, "A Course in Computational Algebraic Number - * Theory" (algorithm 1.4.10). */ + // Adapted from logic to compute the Kronecker symbol, originally implemented + // according to Henri Cohen, "A Course in Computational Algebraic Number + // Theory" (algorithm 1.4.10). ret = 1; while (1) { - /* Cohen's step 3: */ + // Cohen's step 3: - /* B is positive and odd */ + // B is positive and odd if (BN_is_zero(A)) { ret = BN_is_one(B) ? ret : 0; goto end; } - /* now A is non-zero */ + // now A is non-zero int i = 0; while (!BN_is_bit_set(A, i)) { i++; @@ -118,18 +118,18 @@ int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { goto end; } if (i & 1) { - /* i is odd */ - /* multiply 'ret' by $(-1)^{(B^2-1)/8}$ */ + // i is odd + // multiply 'ret' by $(-1)^{(B^2-1)/8}$ ret = ret * tab[BN_lsw(B) & 7]; } - /* Cohen's step 4: */ - /* multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ */ + // Cohen's step 4: + // multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2) { ret = -ret; } - /* (A, B) := (B mod |A|, |A|) */ + // (A, B) := (B mod |A|, |A|) if (!BN_nnmod(B, B, A, ctx)) { ret = -2; goto end; diff --git a/crypto/fipsmodule/bn/montgomery.c b/crypto/fipsmodule/bn/montgomery.c index d70509f5..8024e276 100644 --- a/crypto/fipsmodule/bn/montgomery.c +++ b/crypto/fipsmodule/bn/montgomery.c @@ -187,18 +187,18 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { return 0; } - /* Save the modulus. */ + // Save the modulus. if (!BN_copy(&mont->N, mod)) { OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); return 0; } - /* Find n0 such that n0 * N == -1 (mod r). - * - * Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the - * others, we could use a shorter R value and use faster |BN_ULONG|-based - * math instead of |uint64_t|-based math, which would be double-precision. - * However, currently only the assembler files know which is which. */ + // Find n0 such that n0 * N == -1 (mod r). + // + // Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the + // others, we could use a shorter R value and use faster |BN_ULONG|-based + // math instead of |uint64_t|-based math, which would be double-precision. + // However, currently only the assembler files know which is which. uint64_t n0 = bn_mont_n0(mod); mont->n0[0] = (BN_ULONG)n0; #if BN_MONT_CTX_N0_LIMBS == 2 @@ -207,14 +207,14 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { mont->n0[1] = 0; #endif - /* Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R - * > mod. Even though the assembly on some 32-bit platforms works with 64-bit - * values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * - * BN_BITS2|, is correct because R**2 will still be a multiple of the latter - * as |BN_MONT_CTX_N0_LIMBS| is either one or two. - * - * XXX: This is not constant time with respect to |mont->N|, but it should - * be. */ + // Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R + // > mod. Even though the assembly on some 32-bit platforms works with 64-bit + // values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * + // BN_BITS2|, is correct because R**2 will still be a multiple of the latter + // as |BN_MONT_CTX_N0_LIMBS| is either one or two. + // + // XXX: This is not constant time with respect to |mont->N|, but it should + // be. unsigned lgBigR = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2; if (!bn_mod_exp_base_2_vartime(&mont->RR, lgBigR * 2, &mont->N)) { return 0; @@ -272,7 +272,7 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, return 1; } - max = (2 * nl); /* carry is stored separately */ + max = (2 * nl); // carry is stored separately if (!bn_wexpand(r, max)) { return 0; } @@ -281,7 +281,7 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, np = n->d; rp = r->d; - /* clear the top words of T */ + // clear the top words of T if (max > r->top) { OPENSSL_memset(&rp[r->top], 0, (max - r->top) * sizeof(BN_ULONG)); } @@ -311,8 +311,8 @@ static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, uintptr_t m; v = bn_sub_words(rp, ap, np, nl) - carry; - /* if subtraction result is real, then trick unconditional memcpy below to - * perform in-place "refresh" instead of actual copy. */ + // if subtraction result is real, then trick unconditional memcpy below to + // perform in-place "refresh" instead of actual copy. m = (0u - (uintptr_t)v); nrp = (BN_ULONG *)(((uintptr_t)rp & ~m) | ((uintptr_t)ap & m)); @@ -371,7 +371,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, #else int num = mont->N.top; - /* |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. */ + // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. if (num < (128 / BN_BITS2) || a->top != num || b->top != num) { @@ -382,7 +382,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, return 0; } if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { - /* The check above ensures this won't happen. */ + // The check above ensures this won't happen. assert(0); OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); return 0; @@ -417,7 +417,7 @@ static int bn_mod_mul_montgomery_fallback(BIGNUM *r, const BIGNUM *a, } } - /* reduce from aRR to aR */ + // reduce from aRR to aR if (!BN_from_montgomery_word(r, tmp, mont)) { goto err; } diff --git a/crypto/fipsmodule/bn/montgomery_inv.c b/crypto/fipsmodule/bn/montgomery_inv.c index aa2574b0..c3c788ab 100644 --- a/crypto/fipsmodule/bn/montgomery_inv.c +++ b/crypto/fipsmodule/bn/montgomery_inv.c @@ -28,47 +28,47 @@ OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) == BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG), BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T); -/* LG_LITTLE_R is log_2(r). */ +// LG_LITTLE_R is log_2(r). #define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2) uint64_t bn_mont_n0(const BIGNUM *n) { - /* These conditions are checked by the caller, |BN_MONT_CTX_set|. */ + // These conditions are checked by the caller, |BN_MONT_CTX_set|. assert(!BN_is_zero(n)); assert(!BN_is_negative(n)); assert(BN_is_odd(n)); - /* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This - * ensures that we can do integer division by |r| by simply ignoring - * |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo - * |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is - * what makes Montgomery multiplication efficient. - * - * As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography - * with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a - * multi-limb Montgomery multiplication of |a * b (mod n)|, given the - * unreduced product |t == a * b|, we repeatedly calculate: - * - * t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). - * t2 := t1*n0*n - * t3 := t + t2 - * t := t3 / r copy all limbs of |t3| except the lowest to |t|. - * - * In the last step, it would only make sense to ignore the lowest limb of - * |t3| if it were zero. The middle steps ensure that this is the case: - * - * t3 == 0 (mod r) - * t + t2 == 0 (mod r) - * t + t1*n0*n == 0 (mod r) - * t1*n0*n == -t (mod r) - * t*n0*n == -t (mod r) - * n0*n == -1 (mod r) - * n0 == -1/n (mod r) - * - * Thus, in each iteration of the loop, we multiply by the constant factor - * |n0|, the negative inverse of n (mod r). */ - - /* n_mod_r = n % r. As explained above, this is done by taking the lowest - * |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */ + // r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This + // ensures that we can do integer division by |r| by simply ignoring + // |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo + // |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is + // what makes Montgomery multiplication efficient. + // + // As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography + // with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a + // multi-limb Montgomery multiplication of |a * b (mod n)|, given the + // unreduced product |t == a * b|, we repeatedly calculate: + // + // t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). + // t2 := t1*n0*n + // t3 := t + t2 + // t := t3 / r copy all limbs of |t3| except the lowest to |t|. + // + // In the last step, it would only make sense to ignore the lowest limb of + // |t3| if it were zero. The middle steps ensure that this is the case: + // + // t3 == 0 (mod r) + // t + t2 == 0 (mod r) + // t + t1*n0*n == 0 (mod r) + // t1*n0*n == -t (mod r) + // t*n0*n == -t (mod r) + // n0*n == -1 (mod r) + // n0 == -1/n (mod r) + // + // Thus, in each iteration of the loop, we multiply by the constant factor + // |n0|, the negative inverse of n (mod r). + + // n_mod_r = n % r. As explained above, this is done by taking the lowest + // |BN_MONT_CTX_N0_LIMBS| limbs of |n|. uint64_t n_mod_r = n->d[0]; #if BN_MONT_CTX_N0_LIMBS == 2 if (n->top > 1) { @@ -79,32 +79,32 @@ uint64_t bn_mont_n0(const BIGNUM *n) { return bn_neg_inv_mod_r_u64(n_mod_r); } -/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| - * such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| - * must be odd. - * - * This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery - * Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf). - * It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and - * Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000" - * (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21). - * - * This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" - * (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is - * constant-time with respect to |n|. We assume uint64_t additions, - * subtractions, shifts, and bitwise operations are all constant time, which - * may be a large leap of faith on 32-bit targets. We avoid division and - * multiplication, which tend to be the most problematic in terms of timing - * leaks. - * - * Most GCD implementations return values such that |u*r + v*n == 1|, so the - * caller would have to negate the resultant |v| for the purpose of Montgomery - * multiplication. This implementation does the negation implicitly by doing - * the computations as a difference instead of a sum. */ +// bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| +// such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| +// must be odd. +// +// This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery +// Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf). +// It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and +// Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000" +// (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21). +// +// This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" +// (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is +// constant-time with respect to |n|. We assume uint64_t additions, +// subtractions, shifts, and bitwise operations are all constant time, which +// may be a large leap of faith on 32-bit targets. We avoid division and +// multiplication, which tend to be the most problematic in terms of timing +// leaks. +// +// Most GCD implementations return values such that |u*r + v*n == 1|, so the +// caller would have to negate the resultant |v| for the purpose of Montgomery +// multiplication. This implementation does the negation implicitly by doing +// the computations as a difference instead of a sum. static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { assert(n % 2 == 1); - /* alpha == 2**(lg r - 1) == r / 2. */ + // alpha == 2**(lg r - 1) == r / 2. static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1); const uint64_t beta = n; @@ -112,46 +112,46 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { uint64_t u = 1; uint64_t v = 0; - /* The invariant maintained from here on is: - * 2**(lg r - i) == u*2*alpha - v*beta. */ + // The invariant maintained from here on is: + // 2**(lg r - i) == u*2*alpha - v*beta. for (size_t i = 0; i < LG_LITTLE_R; ++i) { #if BN_BITS2 == 64 && defined(BN_ULLONG) assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); #endif - /* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set - * |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */ - - uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */ - - /* The addition can overflow, so use Dietz's method for it. - * - * Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all - * (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values - * (embedded in 64 bits to so that overflow can be ignored): - * - * (declare-fun x () (_ BitVec 64)) - * (declare-fun y () (_ BitVec 64)) - * (assert (let ( - * (one (_ bv1 64)) - * (thirtyTwo (_ bv32 64))) - * (and - * (bvult x (bvshl one thirtyTwo)) - * (bvult y (bvshl one thirtyTwo)) - * (not (= - * (bvadd (bvlshr (bvxor x y) one) (bvand x y)) - * (bvlshr (bvadd x y) one))) - * ))) - * (check-sat) */ - uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */ + // Delete a common factor of 2 in u and v if |u| is even. Otherwise, set + // |u = (u + beta) / 2| and |v = (v / 2) + alpha|. + + uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0. + + // The addition can overflow, so use Dietz's method for it. + // + // Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all + // (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values + // (embedded in 64 bits to so that overflow can be ignored): + // + // (declare-fun x () (_ BitVec 64)) + // (declare-fun y () (_ BitVec 64)) + // (assert (let ( + // (one (_ bv1 64)) + // (thirtyTwo (_ bv32 64))) + // (and + // (bvult x (bvshl one thirtyTwo)) + // (bvult y (bvshl one thirtyTwo)) + // (not (= + // (bvadd (bvlshr (bvxor x y) one) (bvand x y)) + // (bvlshr (bvadd x y) one))) + // ))) + // (check-sat) + uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0. u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd); - uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */ + uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0. v = (v >> 1) + alpha_if_u_is_odd; } - /* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */ + // The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. #if BN_BITS2 == 64 && defined(BN_ULLONG) assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); #endif @@ -159,9 +159,9 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { return v; } -/* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger - * than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and - * odd. */ +// bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger +// than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and +// odd. int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) { assert(!BN_is_zero(n)); assert(!BN_is_negative(n)); @@ -175,13 +175,13 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) { return 1; } - /* Set |r| to the smallest power of two larger than |n|. */ + // Set |r| to the smallest power of two larger than |n|. assert(p > n_bits); if (!BN_set_bit(r, n_bits)) { return 0; } - /* Unconditionally reduce |r|. */ + // Unconditionally reduce |r|. assert(BN_cmp(r, n) > 0); if (!BN_usub(r, r, n)) { return 0; @@ -189,10 +189,10 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) { assert(BN_cmp(r, n) < 0); for (unsigned i = n_bits; i < p; ++i) { - /* This is like |BN_mod_lshift1_quick| except using |BN_usub|. - * - * TODO: Replace this with the use of a constant-time variant of - * |BN_mod_lshift1_quick|. */ + // This is like |BN_mod_lshift1_quick| except using |BN_usub|. + // + // TODO: Replace this with the use of a constant-time variant of + // |BN_mod_lshift1_quick|. if (!BN_lshift1(r, r)) { return 0; } diff --git a/crypto/fipsmodule/bn/mul.c b/crypto/fipsmodule/bn/mul.c index 36a40601..7cc0e3cd 100644 --- a/crypto/fipsmodule/bn/mul.c +++ b/crypto/fipsmodule/bn/mul.c @@ -113,15 +113,15 @@ static void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b, } #if !defined(OPENSSL_X86) || defined(OPENSSL_NO_ASM) -/* Here follows specialised variants of bn_add_words() and bn_sub_words(). They - * have the property performing operations on arrays of different sizes. The - * sizes of those arrays is expressed through cl, which is the common length ( - * basicall, min(len(a),len(b)) ), and dl, which is the delta between the two - * lengths, calculated as len(a)-len(b). All lengths are the number of - * BN_ULONGs... For the operations that require a result array as parameter, - * it must have the length cl+abs(dl). These functions should probably end up - * in bn_asm.c as soon as there are assembler counterparts for the systems that - * use assembler files. */ +// Here follows specialised variants of bn_add_words() and bn_sub_words(). They +// have the property performing operations on arrays of different sizes. The +// sizes of those arrays is expressed through cl, which is the common length ( +// basicall, min(len(a),len(b)) ), and dl, which is the delta between the two +// lengths, calculated as len(a)-len(b). All lengths are the number of +// BN_ULONGs... For the operations that require a result array as parameter, +// it must have the length cl+abs(dl). These functions should probably end up +// in bn_asm.c as soon as there are assembler counterparts for the systems that +// use assembler files. static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { @@ -274,25 +274,24 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, return c; } #else -/* On other platforms the function is defined in asm. */ +// On other platforms the function is defined in asm. BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); #endif -/* Karatsuba recursive multiplication algorithm - * (cf. Knuth, The Art of Computer Programming, Vol. 2) */ - -/* r is 2*n2 words in size, - * a and b are both n2 words in size. - * n2 must be a power of 2. - * We multiply and return the result. - * t must be 2*n2 words in size - * We calculate - * a[0]*b[0] - * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) - * a[1]*b[1] - */ -/* dnX may not be positive, but n2/2+dnX has to be */ +// Karatsuba recursive multiplication algorithm +// (cf. Knuth, The Art of Computer Programming, Vol. 2) + +// r is 2*n2 words in size, +// a and b are both n2 words in size. +// n2 must be a power of 2. +// We multiply and return the result. +// t must be 2*n2 words in size +// We calculate +// a[0]*b[0] +// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) +// a[1]*b[1] +// dnX may not be positive, but n2/2+dnX has to be static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, int dna, int dnb, BN_ULONG *t) { int n = n2 / 2, c1, c2; @@ -300,15 +299,14 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, unsigned int neg, zero; BN_ULONG ln, lo, *p; - /* Only call bn_mul_comba 8 if n2 == 8 and the - * two arrays are complete [steve] - */ + // Only call bn_mul_comba 8 if n2 == 8 and the + // two arrays are complete [steve] if (n2 == 8 && dna == 0 && dnb == 0) { bn_mul_comba8(r, a, b); return; } - /* Else do normal multiply */ + // Else do normal multiply if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL) { bn_mul_normal(r, a, n2 + dna, b, n2 + dnb); if ((dna + dnb) < 0) { @@ -318,21 +316,21 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, return; } - /* r=(a[0]-a[1])*(b[1]-b[0]) */ + // r=(a[0]-a[1])*(b[1]-b[0]) c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna); c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n); zero = neg = 0; switch (c1 * 3 + c2) { case -4: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - break; case -3: zero = 1; break; case -2: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // + neg = 1; break; case -1: @@ -341,8 +339,8 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, zero = 1; break; case 2: - bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // + + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - neg = 1; break; case 3: @@ -355,7 +353,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, } if (n == 4 && dna == 0 && dnb == 0) { - /* XXX: bn_mul_comba4 could take extra args to do this well */ + // XXX: bn_mul_comba4 could take extra args to do this well if (!zero) { bn_mul_comba4(&(t[n2]), t, &(t[n])); } else { @@ -365,7 +363,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, bn_mul_comba4(r, a, b); bn_mul_comba4(&(r[n2]), &(a[n]), &(b[n])); } else if (n == 8 && dna == 0 && dnb == 0) { - /* XXX: bn_mul_comba8 could take extra args to do this well */ + // XXX: bn_mul_comba8 could take extra args to do this well if (!zero) { bn_mul_comba8(&(t[n2]), t, &(t[n])); } else { @@ -385,24 +383,24 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); if (neg) { - /* if t[32] is negative */ + // if t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); } else { - /* Might have a carry */ + // Might have a carry c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2)); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); @@ -410,8 +408,8 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, ln = (lo + c1) & BN_MASK2; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; @@ -423,9 +421,9 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, } } -/* n+tn is the word length - * t needs to be n*4 is size, as does r */ -/* tnX may not be negative but less than n */ +// n+tn is the word length +// t needs to be n*4 is size, as does r +// tnX may not be negative but less than n static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, int tna, int tnb, BN_ULONG *t) { int i, j, n2 = n * 2; @@ -437,33 +435,33 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, return; } - /* r=(a[0]-a[1])*(b[1]-b[0]) */ + // r=(a[0]-a[1])*(b[1]-b[0]) c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna); c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n); neg = 0; switch (c1 * 3 + c2) { case -4: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - break; case -3: - /* break; */ + // break; case -2: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // + neg = 1; break; case -1: case 0: case 1: - /* break; */ + // break; case 2: - bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // + + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - neg = 1; break; case 3: - /* break; */ + // break; case 4: bn_sub_part_words(t, a, &(a[n]), tna, n - tna); bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); @@ -480,8 +478,8 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, bn_mul_recursive(&(t[n2]), t, &(t[n]), n, 0, 0, p); bn_mul_recursive(r, a, b, n, 0, 0, p); i = n / 2; - /* If there is only a bottom half to the number, - * just do it */ + // If there is only a bottom half to the number, + // just do it if (tna > tnb) { j = tna - i; } else { @@ -492,12 +490,12 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); OPENSSL_memset(&(r[n2 + i * 2]), 0, sizeof(BN_ULONG) * (n2 - i * 2)); } else if (j > 0) { - /* eg, n == 16, i == 8 and tn == 11 */ + // eg, n == 16, i == 8 and tn == 11 bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); OPENSSL_memset(&(r[n2 + tna + tnb]), 0, sizeof(BN_ULONG) * (n2 - tna - tnb)); } else { - /* (j < 0) eg, n == 16, i == 8 and tn == 5 */ + // (j < 0) eg, n == 16, i == 8 and tn == 5 OPENSSL_memset(&(r[n2]), 0, sizeof(BN_ULONG) * n2); if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL && tnb < BN_MUL_RECURSIVE_SIZE_NORMAL) { @@ -505,9 +503,9 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, } else { for (;;) { i /= 2; - /* these simplified conditions work - * exclusively because difference - * between tna and tnb is 1 or 0 */ + // these simplified conditions work + // exclusively because difference + // between tna and tnb is 1 or 0 if (i < tna || i < tnb) { bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); @@ -522,25 +520,24 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, } } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); if (neg) { - /* if t[32] is negative */ + // if t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); } else { - /* Might have a carry */ + // Might have a carry c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2)); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); @@ -548,8 +545,8 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, ln = (lo + c1) & BN_MASK2; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; @@ -627,7 +624,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { } bn_mul_part_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d); } else { - /* al <= j || bl <= j */ + // al <= j || bl <= j if (!bn_wexpand(t, k * 2)) { goto err; } @@ -659,7 +656,7 @@ err: return ret; } -/* tmp must have 2*n words */ +// tmp must have 2*n words static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp) { int i, j, max; const BN_ULONG *ap; @@ -687,23 +684,22 @@ static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp) bn_add_words(r, r, r, max); - /* There will not be a carry */ + // There will not be a carry bn_sqr_words(tmp, a, n); bn_add_words(r, r, tmp, max); } -/* r is 2*n words in size, - * a and b are both n words in size. (There's not actually a 'b' here ...) - * n must be a power of 2. - * We multiply and return the result. - * t must be 2*n words in size - * We calculate - * a[0]*b[0] - * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) - * a[1]*b[1] - */ +// r is 2*n words in size, +// a and b are both n words in size. (There's not actually a 'b' here ...) +// n must be a power of 2. +// We multiply and return the result. +// t must be 2*n words in size +// We calculate +// a[0]*b[0] +// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) +// a[1]*b[1] static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t) { int n = n2 / 2; int zero, c1; @@ -720,7 +716,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t bn_sqr_normal(r, a, n2, t); return; } - /* r=(a[0]-a[1])*(a[1]-a[0]) */ + // r=(a[0]-a[1])*(a[1]-a[0]) c1 = bn_cmp_words(a, &(a[n]), n); zero = 0; if (c1 > 0) { @@ -731,7 +727,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t zero = 1; } - /* The result will always be negative unless it is zero */ + // The result will always be negative unless it is zero p = &(t[n2 * 2]); if (!zero) { @@ -742,19 +738,19 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t bn_sqr_recursive(r, a, n, p); bn_sqr_recursive(&(r[n2]), &(a[n]), n, p); - /* t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) */ + // t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); - /* t[32] is negative */ + // t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); - /* t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1]) - * r[10] holds (a[0]*a[0]) - * r[32] holds (a[1]*a[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1]) + // r[10] holds (a[0]*a[0]) + // r[32] holds (a[1]*a[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); @@ -762,8 +758,8 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t ln = (lo + c1) & BN_MASK2; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; @@ -818,7 +814,7 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { goto err; } - max = 2 * al; /* Non-zero (from above) */ + max = 2 * al; // Non-zero (from above) if (!bn_wexpand(rr, max)) { goto err; } @@ -852,8 +848,8 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { } rr->neg = 0; - /* If the most-significant half of the top word of 'a' is zero, then - * the square of 'a' will max-1 words. */ + // If the most-significant half of the top word of 'a' is zero, then + // the square of 'a' will max-1 words. if (a->d[al - 1] == (a->d[al - 1] & BN_MASK2l)) { rr->top = max - 1; } else { diff --git a/crypto/fipsmodule/bn/prime.c b/crypto/fipsmodule/bn/prime.c index 3e2e6f54..691d0cba 100644 --- a/crypto/fipsmodule/bn/prime.c +++ b/crypto/fipsmodule/bn/prime.c @@ -113,13 +113,13 @@ #include "internal.h" -/* The quick sieve algorithm approach to weeding out primes is Philip - * Zimmermann's, as implemented in PGP. I have had a read of his comments and - * implemented my own version. */ +// The quick sieve algorithm approach to weeding out primes is Philip +// Zimmermann's, as implemented in PGP. I have had a read of his comments and +// implemented my own version. #define NUMPRIMES 2048 -/* primes contains all the primes that fit into a uint16_t. */ +// primes contains all the primes that fit into a uint16_t. static const uint16_t primes[NUMPRIMES] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, @@ -310,12 +310,12 @@ static const uint16_t primes[NUMPRIMES] = { 17851, 17863, }; -/* BN_prime_checks_for_size returns the number of Miller-Rabin iterations - * necessary for a 'bits'-bit prime, in order to maintain an error rate greater - * than the security level for an RSA prime of that many bits (calculated using - * the FIPS SP 800-57 security level and 186-4 Section F.1; original paper: - * Damgaard, Landrock, Pomerance: Average case error estimates for the strong - * probable prime test. -- Math. Comp. 61 (1993) 177-194) */ +// BN_prime_checks_for_size returns the number of Miller-Rabin iterations +// necessary for a 'bits'-bit prime, in order to maintain an error rate greater +// than the security level for an RSA prime of that many bits (calculated using +// the FIPS SP 800-57 security level and 186-4 Section F.1; original paper: +// Damgaard, Landrock, Pomerance: Average case error estimates for the strong +// probable prime test. -- Math. Comp. 61 (1993) 177-194) static int BN_prime_checks_for_size(int bits) { if (bits >= 3747) { return 3; @@ -371,11 +371,11 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, int checks = BN_prime_checks_for_size(bits); if (bits < 2) { - /* There are no prime numbers this small. */ + // There are no prime numbers this small. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } else if (bits == 2 && safe) { - /* The smallest safe prime (7) is three bits. */ + // The smallest safe prime (7) is three bits. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } @@ -391,7 +391,7 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, } loop: - /* make a random number and set the top and bottom bits */ + // make a random number and set the top and bottom bits if (add == NULL) { if (!probable_prime(ret, bits)) { goto err; @@ -409,7 +409,7 @@ loop: } if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, c1++)) { - /* aborted */ + // aborted goto err; } @@ -421,8 +421,8 @@ loop: goto loop; } } else { - /* for "safe prime" generation, check that (p-1)/2 is prime. Since a prime - * is odd, We just need to divide by 2 */ + // for "safe prime" generation, check that (p-1)/2 is prime. Since a prime + // is odd, We just need to divide by 2 if (!BN_rshift1(t, ret)) { goto err; } @@ -445,11 +445,11 @@ loop: if (!BN_GENCB_call(cb, i, c1 - 1)) { goto err; } - /* We have a safe prime test pass */ + // We have a safe prime test pass } } - /* we have a prime :-) */ + // we have a prime :-) found = 1; err: @@ -487,13 +487,13 @@ int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx, return 0; } - /* first look for small factors */ + // first look for small factors if (!BN_is_odd(a)) { - /* a is even => a is prime if and only if a == 2 */ + // a is even => a is prime if and only if a == 2 return BN_is_word(a, 2); } - /* Enhanced Miller-Rabin does not work for three. */ + // Enhanced Miller-Rabin does not work for three. if (BN_is_word(a, 3)) { return 1; } @@ -539,7 +539,7 @@ err: int BN_enhanced_miller_rabin_primality_test( enum bn_primality_result_t *out_result, const BIGNUM *w, int iterations, BN_CTX *ctx, BN_GENCB *cb) { - /* Enhanced Miller-Rabin is only valid on odd integers greater than 3. */ + // Enhanced Miller-Rabin is only valid on odd integers greater than 3. if (!BN_is_odd(w) || BN_cmp_word(w, 3) <= 0) { OPENSSL_PUT_ERROR(BN, BN_R_INVALID_INPUT); return 0; @@ -561,7 +561,7 @@ int BN_enhanced_miller_rabin_primality_test( goto err; } - /* Write w1 as m*2^a (Steps 1 and 2). */ + // Write w1 as m*2^a (Steps 1 and 2). int a = 0; while (!BN_is_bit_set(w1, a)) { a++; @@ -585,22 +585,22 @@ int BN_enhanced_miller_rabin_primality_test( goto err; } - /* Montgomery setup for computations mod A */ + // Montgomery setup for computations mod A mont = BN_MONT_CTX_new(); if (mont == NULL || !BN_MONT_CTX_set(mont, w, ctx)) { goto err; } - /* The following loop performs in inner iteration of the Enhanced Miller-Rabin - * Primality test (Step 4). */ + // The following loop performs in inner iteration of the Enhanced Miller-Rabin + // Primality test (Step 4). for (int i = 1; i <= iterations; i++) { - /* Step 4.1-4.2 */ + // Step 4.1-4.2 if (!BN_rand_range_ex(b, 2, w1)) { goto err; } - /* Step 4.3-4.4 */ + // Step 4.3-4.4 if (!BN_gcd(g, b, w, ctx)) { goto err; } @@ -610,17 +610,17 @@ int BN_enhanced_miller_rabin_primality_test( goto err; } - /* Step 4.5 */ + // Step 4.5 if (!BN_mod_exp_mont(z, b, m, w, ctx, mont)) { goto err; } - /* Step 4.6 */ + // Step 4.6 if (BN_is_one(z) || BN_cmp(z, w1) == 0) { goto loop; } - /* Step 4.7 */ + // Step 4.7 for (int j = 1; j < a; j++) { if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { goto err; @@ -633,18 +633,18 @@ int BN_enhanced_miller_rabin_primality_test( } } - /* Step 4.8-4.9 */ + // Step 4.8-4.9 if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { goto err; } - /* Step 4.10-4.11 */ + // Step 4.10-4.11 if (!BN_is_one(z) && !BN_copy(x, z)) { goto err; } composite: - /* Step 4.12-4.14 */ + // Step 4.12-4.14 if (!BN_copy(x1, x) || !BN_sub_word(x1, 1) || !BN_gcd(g, x1, w, ctx)) { @@ -660,7 +660,7 @@ int BN_enhanced_miller_rabin_primality_test( goto err; loop: - /* Step 4.15 */ + // Step 4.15 if (!BN_GENCB_call(cb, 1, i)) { goto err; } @@ -688,7 +688,7 @@ again: return 0; } - /* we now have a random number 'rnd' to test. */ + // we now have a random number 'rnd' to test. for (i = 1; i < NUMPRIMES; i++) { BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); if (mod == (BN_ULONG)-1) { @@ -696,12 +696,12 @@ again: } mods[i] = (uint16_t)mod; } - /* If bits is so small that it fits into a single word then we - * additionally don't want to exceed that many bits. */ + // If bits is so small that it fits into a single word then we + // additionally don't want to exceed that many bits. if (is_single_word) { BN_ULONG size_limit; if (bits == BN_BITS2) { - /* Avoid undefined behavior. */ + // Avoid undefined behavior. size_limit = ~((BN_ULONG)0) - BN_get_word(rnd); } else { size_limit = (((BN_ULONG)1) << bits) - BN_get_word(rnd) - 1; @@ -716,15 +716,15 @@ loop: if (is_single_word) { BN_ULONG rnd_word = BN_get_word(rnd); - /* In the case that the candidate prime is a single word then - * we check that: - * 1) It's greater than primes[i] because we shouldn't reject - * 3 as being a prime number because it's a multiple of - * three. - * 2) That it's not a multiple of a known prime. We don't - * check that rnd-1 is also coprime to all the known - * primes because there aren't many small primes where - * that's true. */ + // In the case that the candidate prime is a single word then + // we check that: + // 1) It's greater than primes[i] because we shouldn't reject + // 3 as being a prime number because it's a multiple of + // three. + // 2) That it's not a multiple of a known prime. We don't + // check that rnd-1 is also coprime to all the known + // primes because there aren't many small primes where + // that's true. for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) { if ((mods[i] + delta) % primes[i] == 0) { delta += 2; @@ -736,8 +736,8 @@ loop: } } else { for (i = 1; i < NUMPRIMES; i++) { - /* check that rnd is not a prime and also - * that gcd(rnd-1,primes) == 1 (except for 2) */ + // check that rnd is not a prime and also + // that gcd(rnd-1,primes) == 1 (except for 2) if (((mods[i] + delta) % primes[i]) <= 1) { delta += 2; if (delta > maxdelta) { @@ -772,7 +772,7 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, goto err; } - /* we need ((rnd-rem) % add) == 0 */ + // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, rnd, add, ctx)) { goto err; @@ -789,11 +789,11 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, goto err; } } - /* we now have a random number 'rand' to test. */ + // we now have a random number 'rand' to test. loop: for (i = 1; i < NUMPRIMES; i++) { - /* check that rnd is a prime */ + // check that rnd is a prime BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); if (mod == (BN_ULONG)-1) { goto err; @@ -835,7 +835,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, goto err; } - /* we need ((rnd-rem) % add) == 0 */ + // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, q, qadd, ctx)) { goto err; } @@ -857,7 +857,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, } } - /* we now have a random number 'rand' to test. */ + // we now have a random number 'rand' to test. if (!BN_lshift1(p, q)) { goto err; } @@ -867,9 +867,9 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, loop: for (i = 1; i < NUMPRIMES; i++) { - /* check that p and q are prime */ - /* check that for p and q - * gcd(p-1,primes) == 1 (except for 2) */ + // check that p and q are prime + // check that for p and q + // gcd(p-1,primes) == 1 (except for 2) BN_ULONG pmod = BN_mod_word(p, (BN_ULONG)primes[i]); BN_ULONG qmod = BN_mod_word(q, (BN_ULONG)primes[i]); if (pmod == (BN_ULONG)-1 || qmod == (BN_ULONG)-1) { diff --git a/crypto/fipsmodule/bn/random.c b/crypto/fipsmodule/bn/random.c index 8aa40cf8..64e7605b 100644 --- a/crypto/fipsmodule/bn/random.c +++ b/crypto/fipsmodule/bn/random.c @@ -158,7 +158,7 @@ static int bn_rand_with_additional_data(BIGNUM *rnd, int bits, int top, goto err; } - /* Make a random number and set the top and bottom bits. */ + // Make a random number and set the top and bottom bits. RAND_bytes_with_additional_data(buf, bytes, additional_data); if (top != BN_RAND_TOP_ANY) { @@ -176,7 +176,7 @@ static int bn_rand_with_additional_data(BIGNUM *rnd, int bits, int top, buf[0] &= ~mask; - /* Set the bottom bit if requested, */ + // Set the bottom bit if requested, if (bottom == BN_RAND_BOTTOM_ODD) { buf[bytes - 1] |= 1; } @@ -212,28 +212,28 @@ static int bn_rand_range_with_additional_data( return 0; } - /* This function is used to implement steps 4 through 7 of FIPS 186-4 - * appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive| - * is n and |min_inclusive| is one. */ + // This function is used to implement steps 4 through 7 of FIPS 186-4 + // appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive| + // is n and |min_inclusive| is one. unsigned count = 100; - unsigned n = BN_num_bits(max_exclusive); /* n > 0 */ + unsigned n = BN_num_bits(max_exclusive); // n > 0 do { if (!--count) { OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); return 0; } - if (/* steps 4 and 5 */ + if (// steps 4 and 5 !bn_rand_with_additional_data(r, n, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY, additional_data) || - /* step 7 */ + // step 7 !BN_add_word(r, min_inclusive)) { return 0; } - /* Step 6. This loops if |r| >= |max_exclusive|. This is identical to - * checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the - * formulation stated in FIPS 186-4. */ + // Step 6. This loops if |r| >= |max_exclusive|. This is identical to + // checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the + // formulation stated in FIPS 186-4. } while (BN_cmp(r, max_exclusive) >= 0); return 1; @@ -256,22 +256,22 @@ int BN_pseudo_rand_range(BIGNUM *r, const BIGNUM *range) { int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv, const uint8_t *message, size_t message_len, BN_CTX *ctx) { - /* We copy |priv| into a local buffer to avoid furthur exposing its - * length. */ + // We copy |priv| into a local buffer to avoid furthur exposing its + // length. uint8_t private_bytes[96]; size_t todo = sizeof(priv->d[0]) * priv->top; if (todo > sizeof(private_bytes)) { - /* No reasonable DSA or ECDSA key should have a private key - * this large and we don't handle this case in order to avoid - * leaking the length of the private key. */ + // No reasonable DSA or ECDSA key should have a private key + // this large and we don't handle this case in order to avoid + // leaking the length of the private key. OPENSSL_PUT_ERROR(BN, BN_R_PRIVATE_KEY_TOO_LARGE); return 0; } OPENSSL_memcpy(private_bytes, priv->d, todo); OPENSSL_memset(private_bytes + todo, 0, sizeof(private_bytes) - todo); - /* Pass a SHA512 hash of the private key and message as additional data into - * the RBG. This is a hardening measure against entropy failure. */ + // Pass a SHA512 hash of the private key and message as additional data into + // the RBG. This is a hardening measure against entropy failure. OPENSSL_COMPILE_ASSERT(SHA512_DIGEST_LENGTH >= 32, additional_data_is_too_large_for_sha512); SHA512_CTX sha; @@ -281,6 +281,6 @@ int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv, SHA512_Update(&sha, message, message_len); SHA512_Final(digest, &sha); - /* Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2. */ + // Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2. return bn_rand_range_with_additional_data(out, 1, range, digest); } diff --git a/crypto/fipsmodule/bn/shift.c b/crypto/fipsmodule/bn/shift.c index 1e41342d..d3fcf395 100644 --- a/crypto/fipsmodule/bn/shift.c +++ b/crypto/fipsmodule/bn/shift.c @@ -157,7 +157,7 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n) { } } else { if (n == 0) { - return 1; /* or the copying loop will go berserk */ + return 1; // or the copying loop will go berserk } } diff --git a/crypto/fipsmodule/bn/sqrt.c b/crypto/fipsmodule/bn/sqrt.c index 0342bc06..68ccb919 100644 --- a/crypto/fipsmodule/bn/sqrt.c +++ b/crypto/fipsmodule/bn/sqrt.c @@ -60,9 +60,9 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { - /* Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm - * (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory", - * algorithm 1.5.1). |p| is assumed to be a prime. */ + // Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm + // (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory", + // algorithm 1.5.1). |p| is assumed to be a prime. BIGNUM *ret = in; int err = 1; @@ -125,26 +125,25 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } - /* A = a mod p */ + // A = a mod p if (!BN_nnmod(A, a, p, ctx)) { goto end; } - /* now write |p| - 1 as 2^e*q where q is odd */ + // now write |p| - 1 as 2^e*q where q is odd e = 1; while (!BN_is_bit_set(p, e)) { e++; } - /* we'll set q later (if needed) */ + // we'll set q later (if needed) if (e == 1) { - /* The easy case: (|p|-1)/2 is odd, so 2 has an inverse - * modulo (|p|-1)/2, and square roots can be computed - * directly by modular exponentiation. - * We have - * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), - * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. - */ + // The easy case: (|p|-1)/2 is odd, so 2 has an inverse + // modulo (|p|-1)/2, and square roots can be computed + // directly by modular exponentiation. + // We have + // 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), + // so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. if (!BN_rshift(q, p, 2)) { goto end; } @@ -158,39 +157,38 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } if (e == 2) { - /* |p| == 5 (mod 8) - * - * In this case 2 is always a non-square since - * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. - * So if a really is a square, then 2*a is a non-square. - * Thus for - * b := (2*a)^((|p|-5)/8), - * i := (2*a)*b^2 - * we have - * i^2 = (2*a)^((1 + (|p|-5)/4)*2) - * = (2*a)^((p-1)/2) - * = -1; - * so if we set - * x := a*b*(i-1), - * then - * x^2 = a^2 * b^2 * (i^2 - 2*i + 1) - * = a^2 * b^2 * (-2*i) - * = a*(-i)*(2*a*b^2) - * = a*(-i)*i - * = a. - * - * (This is due to A.O.L. Atkin, - * , - * November 1992.) - */ - - /* t := 2*a */ + // |p| == 5 (mod 8) + // + // In this case 2 is always a non-square since + // Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. + // So if a really is a square, then 2*a is a non-square. + // Thus for + // b := (2*a)^((|p|-5)/8), + // i := (2*a)*b^2 + // we have + // i^2 = (2*a)^((1 + (|p|-5)/4)*2) + // = (2*a)^((p-1)/2) + // = -1; + // so if we set + // x := a*b*(i-1), + // then + // x^2 = a^2 * b^2 * (i^2 - 2*i + 1) + // = a^2 * b^2 * (-2*i) + // = a*(-i)*(2*a*b^2) + // = a*(-i)*i + // = a. + // + // (This is due to A.O.L. Atkin, + // , + // November 1992.) + + // t := 2*a if (!BN_mod_lshift1_quick(t, A, p)) { goto end; } - /* b := (2*a)^((|p|-5)/8) */ + // b := (2*a)^((|p|-5)/8) if (!BN_rshift(q, p, 3)) { goto end; } @@ -199,18 +197,18 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } - /* y := b^2 */ + // y := b^2 if (!BN_mod_sqr(y, b, p, ctx)) { goto end; } - /* t := (2*a)*b^2 - 1*/ + // t := (2*a)*b^2 - 1 if (!BN_mod_mul(t, t, y, p, ctx) || !BN_sub_word(t, 1)) { goto end; } - /* x = a*b*t */ + // x = a*b*t if (!BN_mod_mul(x, A, b, p, ctx) || !BN_mod_mul(x, x, t, p, ctx)) { goto end; @@ -223,17 +221,16 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto vrfy; } - /* e > 2, so we really have to use the Tonelli/Shanks algorithm. - * First, find some y that is not a square. */ + // e > 2, so we really have to use the Tonelli/Shanks algorithm. + // First, find some y that is not a square. if (!BN_copy(q, p)) { - goto end; /* use 'q' as temp */ + goto end; // use 'q' as temp } q->neg = 0; i = 2; do { - /* For efficiency, try small numbers first; - * if this fails, try random numbers. - */ + // For efficiency, try small numbers first; + // if this fails, try random numbers. if (i < 22) { if (!BN_set_word(y, i)) { goto end; @@ -247,7 +244,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } } - /* now 0 <= y < |p| */ + // now 0 <= y < |p| if (BN_is_zero(y)) { if (!BN_set_word(y, i)) { goto end; @@ -255,34 +252,33 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } } - r = bn_jacobi(y, q, ctx); /* here 'q' is |p| */ + r = bn_jacobi(y, q, ctx); // here 'q' is |p| if (r < -1) { goto end; } if (r == 0) { - /* m divides p */ + // m divides p OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); goto end; } } while (r == 1 && ++i < 82); if (r != -1) { - /* Many rounds and still no non-square -- this is more likely - * a bug than just bad luck. - * Even if p is not prime, we should have found some y - * such that r == -1. - */ + // Many rounds and still no non-square -- this is more likely + // a bug than just bad luck. + // Even if p is not prime, we should have found some y + // such that r == -1. OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); goto end; } - /* Here's our actual 'q': */ + // Here's our actual 'q': if (!BN_rshift(q, q, e)) { goto end; } - /* Now that we have some non-square, we can find an element - * of order 2^e by computing its q'th power. */ + // Now that we have some non-square, we can find an element + // of order 2^e by computing its q'th power. if (!BN_mod_exp_mont(y, y, q, p, ctx, NULL)) { goto end; } @@ -291,37 +287,36 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } - /* Now we know that (if p is indeed prime) there is an integer - * k, 0 <= k < 2^e, such that - * - * a^q * y^k == 1 (mod p). - * - * As a^q is a square and y is not, k must be even. - * q+1 is even, too, so there is an element - * - * X := a^((q+1)/2) * y^(k/2), - * - * and it satisfies - * - * X^2 = a^q * a * y^k - * = a, - * - * so it is the square root that we are looking for. - */ - - /* t := (q-1)/2 (note that q is odd) */ + // Now we know that (if p is indeed prime) there is an integer + // k, 0 <= k < 2^e, such that + // + // a^q * y^k == 1 (mod p). + // + // As a^q is a square and y is not, k must be even. + // q+1 is even, too, so there is an element + // + // X := a^((q+1)/2) * y^(k/2), + // + // and it satisfies + // + // X^2 = a^q * a * y^k + // = a, + // + // so it is the square root that we are looking for. + + // t := (q-1)/2 (note that q is odd) if (!BN_rshift1(t, q)) { goto end; } - /* x := a^((q-1)/2) */ - if (BN_is_zero(t)) /* special case: p = 2^e + 1 */ + // x := a^((q-1)/2) + if (BN_is_zero(t)) // special case: p = 2^e + 1 { if (!BN_nnmod(t, A, p, ctx)) { goto end; } if (BN_is_zero(t)) { - /* special case: a == 0 (mod p) */ + // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; @@ -333,33 +328,32 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } if (BN_is_zero(x)) { - /* special case: a == 0 (mod p) */ + // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; } } - /* b := a*x^2 (= a^q) */ + // b := a*x^2 (= a^q) if (!BN_mod_sqr(b, x, p, ctx) || !BN_mod_mul(b, b, A, p, ctx)) { goto end; } - /* x := a*x (= a^((q+1)/2)) */ + // x := a*x (= a^((q+1)/2)) if (!BN_mod_mul(x, x, A, p, ctx)) { goto end; } while (1) { - /* Now b is a^q * y^k for some even k (0 <= k < 2^E - * where E refers to the original value of e, which we - * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). - * - * We have a*b = x^2, - * y^2^(e-1) = -1, - * b^2^(e-1) = 1. - */ + // Now b is a^q * y^k for some even k (0 <= k < 2^E + // where E refers to the original value of e, which we + // don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). + // + // We have a*b = x^2, + // y^2^(e-1) = -1, + // b^2^(e-1) = 1. if (BN_is_one(b)) { if (!BN_copy(ret, x)) { @@ -370,7 +364,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } - /* find smallest i such that b^(2^i) = 1 */ + // find smallest i such that b^(2^i) = 1 i = 1; if (!BN_mod_sqr(t, b, p, ctx)) { goto end; @@ -387,7 +381,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } - /* t := y^2^(e - i - 1) */ + // t := y^2^(e - i - 1) if (!BN_copy(t, y)) { goto end; } @@ -406,8 +400,8 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { vrfy: if (!err) { - /* verify the result -- the input might have been not a square - * (test added in 0.9.8) */ + // verify the result -- the input might have been not a square + // (test added in 0.9.8) if (!BN_mod_sqr(x, ret, p, ctx)) { err = 1; @@ -457,30 +451,30 @@ int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx) { goto err; } - /* We estimate that the square root of an n-bit number is 2^{n/2}. */ + // We estimate that the square root of an n-bit number is 2^{n/2}. if (!BN_lshift(estimate, BN_value_one(), BN_num_bits(in)/2)) { goto err; } - /* This is Newton's method for finding a root of the equation |estimate|^2 - - * |in| = 0. */ + // This is Newton's method for finding a root of the equation |estimate|^2 - + // |in| = 0. for (;;) { - /* |estimate| = 1/2 * (|estimate| + |in|/|estimate|) */ + // |estimate| = 1/2 * (|estimate| + |in|/|estimate|) if (!BN_div(tmp, NULL, in, estimate, ctx) || !BN_add(tmp, tmp, estimate) || !BN_rshift1(estimate, tmp) || - /* |tmp| = |estimate|^2 */ + // |tmp| = |estimate|^2 !BN_sqr(tmp, estimate, ctx) || - /* |delta| = |in| - |tmp| */ + // |delta| = |in| - |tmp| !BN_sub(delta, in, tmp)) { OPENSSL_PUT_ERROR(BN, ERR_R_BN_LIB); goto err; } delta->neg = 0; - /* The difference between |in| and |estimate| squared is required to always - * decrease. This ensures that the loop always terminates, but I don't have - * a proof that it always finds the square root for a given square. */ + // The difference between |in| and |estimate| squared is required to always + // decrease. This ensures that the loop always terminates, but I don't have + // a proof that it always finds the square root for a given square. if (last_delta_valid && BN_cmp(delta, last_delta) >= 0) { break; } diff --git a/crypto/fipsmodule/cipher/aead.c b/crypto/fipsmodule/cipher/aead.c index ed302096..8d2ad048 100644 --- a/crypto/fipsmodule/cipher/aead.c +++ b/crypto/fipsmodule/cipher/aead.c @@ -101,8 +101,8 @@ void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) { ctx->aead = NULL; } -/* check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If - * |in| and |out| alias, we require that |in| == |out|. */ +// check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If +// |in| and |out| alias, we require that |in| == |out|. static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out, size_t out_len) { if (!buffers_alias(in, in_len, out, out_len)) { @@ -140,8 +140,8 @@ int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, } error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't send raw data. */ + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't send raw data. OPENSSL_memset(out, 0, max_out_len); *out_len = 0; return 0; @@ -172,8 +172,8 @@ int EVP_AEAD_CTX_seal_scatter( } error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't send raw data. */ + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't send raw data. OPENSSL_memset(out, 0, in_len); OPENSSL_memset(out_tag, 0, max_out_tag_len); *out_tag_len = 0; @@ -218,9 +218,9 @@ int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, } error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't try and process bad - * data. */ + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't try and process bad + // data. OPENSSL_memset(out, 0, max_out_len); *out_len = 0; return 0; @@ -247,9 +247,9 @@ int EVP_AEAD_CTX_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, } error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't try and process bad - * data. */ + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't try and process bad + // data. OPENSSL_memset(out, 0, in_len); return 0; } diff --git a/crypto/fipsmodule/cipher/cipher.c b/crypto/fipsmodule/cipher/cipher.c index d1167150..8f0d788a 100644 --- a/crypto/fipsmodule/cipher/cipher.c +++ b/crypto/fipsmodule/cipher/cipher.c @@ -141,12 +141,12 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, } if (cipher) { - /* Ensure a context left from last time is cleared (the previous check - * attempted to avoid this if the same ENGINE and EVP_CIPHER could be - * used). */ + // Ensure a context left from last time is cleared (the previous check + // attempted to avoid this if the same ENGINE and EVP_CIPHER could be + // used). if (ctx->cipher) { EVP_CIPHER_CTX_cleanup(ctx); - /* Restore encrypt and flags */ + // Restore encrypt and flags ctx->encrypt = enc; } @@ -177,7 +177,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, return 0; } - /* we assume block size is a power of 2 in *cryptUpdate */ + // we assume block size is a power of 2 in *cryptUpdate assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 || ctx->cipher->block_size == 16); @@ -189,7 +189,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, case EVP_CIPH_CFB_MODE: ctx->num = 0; - /* fall-through */ + // fall-through case EVP_CIPH_CBC_MODE: assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv)); @@ -202,7 +202,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, case EVP_CIPH_CTR_MODE: case EVP_CIPH_OFB_MODE: ctx->num = 0; - /* Don't reuse IV for CTR mode */ + // Don't reuse IV for CTR mode if (iv) { OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); } @@ -388,8 +388,8 @@ int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, return 0; } - /* if we have 'decrypted' a multiple of block size, make sure - * we have a copy of this last block */ + // if we have 'decrypted' a multiple of block size, make sure + // we have a copy of this last block if (b > 1 && !ctx->buf_len) { *out_len -= b; ctx->final_used = 1; @@ -437,8 +437,8 @@ int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) { } assert(b <= sizeof(ctx->final)); - /* The following assumes that the ciphertext has been authenticated. - * Otherwise it provides a padding oracle. */ + // The following assumes that the ciphertext has been authenticated. + // Otherwise it provides a padding oracle. n = ctx->final[b - 1]; if (n == 0 || n > (int)b) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); diff --git a/crypto/fipsmodule/cipher/e_aes.c b/crypto/fipsmodule/cipher/e_aes.c index 2c6fc417..bd9847ce 100644 --- a/crypto/fipsmodule/cipher/e_aes.c +++ b/crypto/fipsmodule/cipher/e_aes.c @@ -68,7 +68,7 @@ #endif -OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) /* Unreachable code. */ +OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code. typedef struct { union { @@ -86,14 +86,14 @@ typedef struct { union { double align; AES_KEY ks; - } ks; /* AES key schedule to use */ - int key_set; /* Set if key initialised */ - int iv_set; /* Set if an iv is set */ + } ks; // AES key schedule to use + int key_set; // Set if key initialised + int iv_set; // Set if an iv is set GCM128_CONTEXT gcm; - uint8_t *iv; /* Temporary IV store */ - int ivlen; /* IV length */ + uint8_t *iv; // Temporary IV store + int ivlen; // IV length int taglen; - int iv_gen; /* It is OK to generate IVs */ + int iv_gen; // It is OK to generate IVs ctr128_f ctr; } EVP_AES_GCM_CTX; @@ -125,8 +125,8 @@ static char bsaes_capable(void) { #if defined(BSAES) -/* On platforms where BSAES gets defined (just above), then these functions are - * provided by asm. */ +// On platforms where BSAES gets defined (just above), then these functions are +// provided by asm. void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t ivec[16], int enc); void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, @@ -136,8 +136,8 @@ static char bsaes_capable(void) { return 0; } -/* On other platforms, bsaes_capable() will always return false and so the - * following will never be called. */ +// On other platforms, bsaes_capable() will always return false and so the +// following will never be called. static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t ivec[16], int enc) { abort(); @@ -151,8 +151,8 @@ static void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, #endif #if defined(VPAES) -/* On platforms where VPAES gets defined (just above), then these functions are - * provided by asm. */ +// On platforms where VPAES gets defined (just above), then these functions are +// provided by asm. int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); @@ -166,8 +166,8 @@ static char vpaes_capable(void) { return 0; } -/* On other platforms, vpaes_capable() will always return false and so the - * following will never be called. */ +// On other platforms, vpaes_capable() will always return false and so the +// following will never be called. static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) { abort(); @@ -203,8 +203,8 @@ void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, #else -/* On other platforms, aesni_capable() will always return false and so the - * following will never be called. */ +// On other platforms, aesni_capable() will always return false and so the +// following will never be called. static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { abort(); } @@ -404,7 +404,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, if (key) { gctx->ctr = aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len); - /* If we have an iv can set it directly, otherwise use saved IV. */ + // If we have an iv can set it directly, otherwise use saved IV. if (iv == NULL && gctx->iv_set) { iv = gctx->iv; } @@ -414,7 +414,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, } gctx->key_set = 1; } else { - /* If key set use IV, otherwise copy */ + // If key set use IV, otherwise copy if (gctx->key_set) { CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); } else { @@ -434,7 +434,7 @@ static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) { } } -/* increment counter (64-bit int) by 1 */ +// increment counter (64-bit int) by 1 static void ctr64_inc(uint8_t *counter) { int n = 8; uint8_t c; @@ -467,7 +467,7 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { return 0; } - /* Allocate memory for IV if needed */ + // Allocate memory for IV if needed if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) { if (gctx->iv != c->iv) { OPENSSL_free(gctx->iv); @@ -496,14 +496,14 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { return 1; case EVP_CTRL_GCM_SET_IV_FIXED: - /* Special case: -1 length restores whole IV */ + // Special case: -1 length restores whole IV if (arg == -1) { OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen); gctx->iv_gen = 1; return 1; } - /* Fixed field must be at least 4 bytes and invocation field - * at least 8. */ + // Fixed field must be at least 4 bytes and invocation field + // at least 8. if (arg < 4 || (gctx->ivlen - arg) < 8) { return 0; } @@ -525,9 +525,9 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { arg = gctx->ivlen; } OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); - /* Invocation field will be at least 8 bytes in size and - * so no need to check wrap around or increment more than - * last 8 bytes. */ + // Invocation field will be at least 8 bytes in size and + // so no need to check wrap around or increment more than + // last 8 bytes. ctr64_inc(gctx->iv + gctx->ivlen - 8); gctx->iv_set = 1; return 1; @@ -565,7 +565,7 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { EVP_AES_GCM_CTX *gctx = ctx->cipher_data; - /* If not set up, return error */ + // If not set up, return error if (!gctx->key_set) { return -1; } @@ -613,7 +613,7 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, } CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); gctx->taglen = 16; - /* Don't reuse the IV */ + // Don't reuse the IV gctx->iv_set = 0; return 0; } @@ -813,7 +813,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_gcm_generic) { #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) -/* AES-NI section. */ +// AES-NI section. static char aesni_capable(void) { return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0; @@ -880,8 +880,8 @@ static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks); CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt, 1); gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks; - /* If we have an iv can set it directly, otherwise use - * saved IV. */ + // If we have an iv can set it directly, otherwise use + // saved IV. if (iv == NULL && gctx->iv_set) { iv = gctx->iv; } @@ -891,7 +891,7 @@ static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, } gctx->key_set = 1; } else { - /* If key set use IV, otherwise copy */ + // If key set use IV, otherwise copy if (gctx->key_set) { CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); } else { @@ -1104,7 +1104,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_gcm) { } \ } -#else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */ +#else // ^^^ OPENSSL_X86_64 || OPENSSL_X86 static char aesni_capable(void) { return 0; @@ -1158,7 +1158,7 @@ static int aead_aes_gcm_init_impl(struct aead_aes_gcm_ctx *gcm_ctx, if (key_bits != 128 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ + return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { diff --git a/crypto/fipsmodule/cipher/internal.h b/crypto/fipsmodule/cipher/internal.h index 02335e0d..7b5f23f0 100644 --- a/crypto/fipsmodule/cipher/internal.h +++ b/crypto/fipsmodule/cipher/internal.h @@ -70,10 +70,10 @@ extern "C" { #endif -/* EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. */ +// EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. #define EVP_CIPH_MODE_MASK 0x3f -/* EVP_AEAD represents a specific AEAD algorithm. */ +// EVP_AEAD represents a specific AEAD algorithm. struct evp_aead_st { uint8_t key_len; uint8_t nonce_len; @@ -81,8 +81,8 @@ struct evp_aead_st { uint8_t max_tag_len; int seal_scatter_supports_extra_in; - /* init initialises an |EVP_AEAD_CTX|. If this call returns zero then - * |cleanup| will not be called for that context. */ + // init initialises an |EVP_AEAD_CTX|. If this call returns zero then + // |cleanup| will not be called for that context. int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, size_t tag_len); int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, @@ -112,18 +112,18 @@ struct evp_aead_st { size_t extra_in_len); }; -/* aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|, - * where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is - * set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is - * initialised to do GHASH with the given key. It returns a function for - * optimised CTR-mode, or NULL if CTR-mode should be built using - * |*out_block|. */ +// aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|, +// where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is +// set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is +// initialised to do GHASH with the given key. It returns a function for +// optimised CTR-mode, or NULL if CTR-mode should be built using +// |*out_block|. ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx, block128_f *out_block, const uint8_t *key, size_t key_bytes); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CIPHER_INTERNAL_H */ +#endif // OPENSSL_HEADER_CIPHER_INTERNAL_H diff --git a/crypto/fipsmodule/delocate.h b/crypto/fipsmodule/delocate.h index 0153a4eb..065a21ca 100644 --- a/crypto/fipsmodule/delocate.h +++ b/crypto/fipsmodule/delocate.h @@ -24,12 +24,12 @@ #define DEFINE_BSS_GET(type, name) \ static type name __attribute__((used)); \ type *name##_bss_get(void); -/* For FIPS builds we require that CRYPTO_ONCE_INIT be zero. */ +// For FIPS builds we require that CRYPTO_ONCE_INIT be zero. #define DEFINE_STATIC_ONCE(name) DEFINE_BSS_GET(CRYPTO_once_t, name) -/* For FIPS builds we require that CRYPTO_STATIC_MUTEX_INIT be zero. */ +// For FIPS builds we require that CRYPTO_STATIC_MUTEX_INIT be zero. #define DEFINE_STATIC_MUTEX(name) \ DEFINE_BSS_GET(struct CRYPTO_STATIC_MUTEX, name) -/* For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero. */ +// For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero. #define DEFINE_STATIC_EX_DATA_CLASS(name) \ DEFINE_BSS_GET(CRYPTO_EX_DATA_CLASS, name) #else @@ -60,29 +60,29 @@ } \ static void name##_do_init(type *out) -/* DEFINE_METHOD_FUNCTION defines a function named |name| which returns a - * method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it - * is split into a CRYPTO_once_t-guarded initializer in the module and - * unhashed, non-module accessor functions to space reserved in the BSS. The - * method table is initialized by a caller-supplied function which takes a - * parameter named |out| of type |type|*. The caller should follow the macro - * invocation with the body of this function: - * - * DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) { - * out->type = NID_md4; - * out->md_size = MD4_DIGEST_LENGTH; - * out->flags = 0; - * out->init = md4_init; - * out->update = md4_update; - * out->final = md4_final; - * out->block_size = 64; - * out->ctx_size = sizeof(MD4_CTX); - * } - * - * This mechanism does not use a static initializer because their execution - * order is undefined. See FIPS.md for more details. */ +// DEFINE_METHOD_FUNCTION defines a function named |name| which returns a +// method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it +// is split into a CRYPTO_once_t-guarded initializer in the module and +// unhashed, non-module accessor functions to space reserved in the BSS. The +// method table is initialized by a caller-supplied function which takes a +// parameter named |out| of type |type|*. The caller should follow the macro +// invocation with the body of this function: +// +// DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) { +// out->type = NID_md4; +// out->md_size = MD4_DIGEST_LENGTH; +// out->flags = 0; +// out->init = md4_init; +// out->update = md4_update; +// out->final = md4_final; +// out->block_size = 64; +// out->ctx_size = sizeof(MD4_CTX); +// } +// +// This mechanism does not use a static initializer because their execution +// order is undefined. See FIPS.md for more details. #define DEFINE_METHOD_FUNCTION(type, name) DEFINE_DATA(type, name, const) #define DEFINE_LOCAL_DATA(type, name) DEFINE_DATA(type, name, static const) -#endif /* OPENSSL_HEADER_FIPSMODULE_DELOCATE_H */ +#endif // OPENSSL_HEADER_FIPSMODULE_DELOCATE_H diff --git a/crypto/fipsmodule/des/des.c b/crypto/fipsmodule/des/des.c index a6c177c5..2b0fdcd7 100644 --- a/crypto/fipsmodule/des/des.c +++ b/crypto/fipsmodule/des/des.c @@ -62,7 +62,7 @@ static const uint32_t des_skb[8][64] = { - {/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ + { // for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000L, 0x00000010L, 0x20000000L, 0x20000010L, 0x00010000L, 0x00010010L, 0x20010000L, 0x20010010L, 0x00000800L, 0x00000810L, 0x20000800L, 0x20000810L, 0x00010800L, 0x00010810L, 0x20010800L, @@ -76,7 +76,7 @@ static const uint32_t des_skb[8][64] = { 0x20080020L, 0x20080030L, 0x00090020L, 0x00090030L, 0x20090020L, 0x20090030L, 0x00080820L, 0x00080830L, 0x20080820L, 0x20080830L, 0x00090820L, 0x00090830L, 0x20090820L, 0x20090830L, }, - {/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */ + { // for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 0x00000000L, 0x02000000L, 0x00002000L, 0x02002000L, 0x00200000L, 0x02200000L, 0x00202000L, 0x02202000L, 0x00000004L, 0x02000004L, 0x00002004L, 0x02002004L, 0x00200004L, 0x02200004L, 0x00202004L, @@ -90,7 +90,7 @@ static const uint32_t des_skb[8][64] = { 0x10002400L, 0x12002400L, 0x10200400L, 0x12200400L, 0x10202400L, 0x12202400L, 0x10000404L, 0x12000404L, 0x10002404L, 0x12002404L, 0x10200404L, 0x12200404L, 0x10202404L, 0x12202404L, }, - {/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */ + { // for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 0x00000000L, 0x00000001L, 0x00040000L, 0x00040001L, 0x01000000L, 0x01000001L, 0x01040000L, 0x01040001L, 0x00000002L, 0x00000003L, 0x00040002L, 0x00040003L, 0x01000002L, 0x01000003L, 0x01040002L, @@ -104,7 +104,7 @@ static const uint32_t des_skb[8][64] = { 0x08040200L, 0x08040201L, 0x09000200L, 0x09000201L, 0x09040200L, 0x09040201L, 0x08000202L, 0x08000203L, 0x08040202L, 0x08040203L, 0x09000202L, 0x09000203L, 0x09040202L, 0x09040203L, }, - {/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */ + { // for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 0x00000000L, 0x00100000L, 0x00000100L, 0x00100100L, 0x00000008L, 0x00100008L, 0x00000108L, 0x00100108L, 0x00001000L, 0x00101000L, 0x00001100L, 0x00101100L, 0x00001008L, 0x00101008L, 0x00001108L, @@ -118,7 +118,7 @@ static const uint32_t des_skb[8][64] = { 0x04020100L, 0x04120100L, 0x04020008L, 0x04120008L, 0x04020108L, 0x04120108L, 0x04021000L, 0x04121000L, 0x04021100L, 0x04121100L, 0x04021008L, 0x04121008L, 0x04021108L, 0x04121108L, }, - {/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ + { // for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000L, 0x10000000L, 0x00010000L, 0x10010000L, 0x00000004L, 0x10000004L, 0x00010004L, 0x10010004L, 0x20000000L, 0x30000000L, 0x20010000L, 0x30010000L, 0x20000004L, 0x30000004L, 0x20010004L, @@ -132,7 +132,7 @@ static const uint32_t des_skb[8][64] = { 0x00111000L, 0x10111000L, 0x00101004L, 0x10101004L, 0x00111004L, 0x10111004L, 0x20101000L, 0x30101000L, 0x20111000L, 0x30111000L, 0x20101004L, 0x30101004L, 0x20111004L, 0x30111004L, }, - {/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */ + { // for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 0x00000000L, 0x08000000L, 0x00000008L, 0x08000008L, 0x00000400L, 0x08000400L, 0x00000408L, 0x08000408L, 0x00020000L, 0x08020000L, 0x00020008L, 0x08020008L, 0x00020400L, 0x08020400L, 0x00020408L, @@ -146,7 +146,7 @@ static const uint32_t des_skb[8][64] = { 0x02000009L, 0x0A000009L, 0x02000401L, 0x0A000401L, 0x02000409L, 0x0A000409L, 0x02020001L, 0x0A020001L, 0x02020009L, 0x0A020009L, 0x02020401L, 0x0A020401L, 0x02020409L, 0x0A020409L, }, - {/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */ + { // for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 0x00000000L, 0x00000100L, 0x00080000L, 0x00080100L, 0x01000000L, 0x01000100L, 0x01080000L, 0x01080100L, 0x00000010L, 0x00000110L, 0x00080010L, 0x00080110L, 0x01000010L, 0x01000110L, 0x01080010L, @@ -160,7 +160,7 @@ static const uint32_t des_skb[8][64] = { 0x00280200L, 0x00280300L, 0x01200200L, 0x01200300L, 0x01280200L, 0x01280300L, 0x00200210L, 0x00200310L, 0x00280210L, 0x00280310L, 0x01200210L, 0x01200310L, 0x01280210L, 0x01280310L, }, - {/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */ + { // for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 0x00000000L, 0x04000000L, 0x00040000L, 0x04040000L, 0x00000002L, 0x04000002L, 0x00040002L, 0x04040002L, 0x00002000L, 0x04002000L, 0x00042000L, 0x04042000L, 0x00002002L, 0x04002002L, 0x00042002L, @@ -176,7 +176,7 @@ static const uint32_t des_skb[8][64] = { 0x00002822L, 0x04002822L, 0x00042822L, 0x04042822L, }}; static const uint32_t DES_SPtrans[8][64] = { - {/* nibble 0 */ + { // nibble 0 0x02080800L, 0x00080000L, 0x02000002L, 0x02080802L, 0x02000000L, 0x00080802L, 0x00080002L, 0x02000002L, 0x00080802L, 0x02080800L, 0x02080000L, 0x00000802L, 0x02000802L, 0x02000000L, 0x00000000L, @@ -190,7 +190,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x02080000L, 0x02000802L, 0x02000000L, 0x00000802L, 0x00080002L, 0x00000000L, 0x00080000L, 0x02000000L, 0x02000802L, 0x02080800L, 0x00000002L, 0x02080002L, 0x00000800L, 0x00080802L, }, - {/* nibble 1 */ + { // nibble 1 0x40108010L, 0x00000000L, 0x00108000L, 0x40100000L, 0x40000010L, 0x00008010L, 0x40008000L, 0x00108000L, 0x00008000L, 0x40100010L, 0x00000010L, 0x40008000L, 0x00100010L, 0x40108000L, 0x40100000L, @@ -204,7 +204,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00000000L, 0x40000010L, 0x00000010L, 0x40108010L, 0x00108000L, 0x40100000L, 0x40100010L, 0x00100000L, 0x00008010L, 0x40008000L, 0x40008010L, 0x00000010L, 0x40100000L, 0x00108000L, }, - {/* nibble 2 */ + { // nibble 2 0x04000001L, 0x04040100L, 0x00000100L, 0x04000101L, 0x00040001L, 0x04000000L, 0x04000101L, 0x00040100L, 0x04000100L, 0x00040000L, 0x04040000L, 0x00000001L, 0x04040101L, 0x00000101L, 0x00000001L, @@ -218,7 +218,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x04000000L, 0x04040101L, 0x00040000L, 0x04000100L, 0x04000101L, 0x00040100L, 0x04000100L, 0x00000000L, 0x04040001L, 0x00000101L, 0x04000001L, 0x00040101L, 0x00000100L, 0x04040000L, }, - {/* nibble 3 */ + { // nibble 3 0x00401008L, 0x10001000L, 0x00000008L, 0x10401008L, 0x00000000L, 0x10400000L, 0x10001008L, 0x00400008L, 0x10401000L, 0x10000008L, 0x10000000L, 0x00001008L, 0x10000008L, 0x00401008L, 0x00400000L, @@ -232,7 +232,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00401008L, 0x00400000L, 0x10401008L, 0x00000008L, 0x10001000L, 0x00401008L, 0x00400008L, 0x00401000L, 0x10400000L, 0x10001008L, 0x00001008L, 0x10000000L, 0x10000008L, 0x10401000L, }, - {/* nibble 4 */ + { // nibble 4 0x08000000L, 0x00010000L, 0x00000400L, 0x08010420L, 0x08010020L, 0x08000400L, 0x00010420L, 0x08010000L, 0x00010000L, 0x00000020L, 0x08000020L, 0x00010400L, 0x08000420L, 0x08010020L, 0x08010400L, @@ -246,7 +246,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00000000L, 0x08010420L, 0x08010020L, 0x08010400L, 0x00000420L, 0x00010000L, 0x00010400L, 0x08010020L, 0x08000400L, 0x00000420L, 0x00000020L, 0x00010420L, 0x08010000L, 0x08000020L, }, - {/* nibble 5 */ + { // nibble 5 0x80000040L, 0x00200040L, 0x00000000L, 0x80202000L, 0x00200040L, 0x00002000L, 0x80002040L, 0x00200000L, 0x00002040L, 0x80202040L, 0x00202000L, 0x80000000L, 0x80002000L, 0x80000040L, 0x80200000L, @@ -260,7 +260,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00200000L, 0x80002040L, 0x80000040L, 0x80200000L, 0x00202040L, 0x00000000L, 0x00002000L, 0x80000040L, 0x80002040L, 0x80202000L, 0x80200000L, 0x00002040L, 0x00000040L, 0x80200040L, }, - {/* nibble 6 */ + { // nibble 6 0x00004000L, 0x00000200L, 0x01000200L, 0x01000004L, 0x01004204L, 0x00004004L, 0x00004200L, 0x00000000L, 0x01000000L, 0x01000204L, 0x00000204L, 0x01004000L, 0x00000004L, 0x01004200L, 0x01004000L, @@ -274,7 +274,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x01000200L, 0x00004200L, 0x00000204L, 0x00004000L, 0x01004204L, 0x01000000L, 0x01004200L, 0x00000004L, 0x00004004L, 0x01004204L, 0x01000004L, 0x01004200L, 0x01004000L, 0x00004004L, }, - {/* nibble 7 */ + { // nibble 7 0x20800080L, 0x20820000L, 0x00020080L, 0x00000000L, 0x20020000L, 0x00800080L, 0x20800000L, 0x20820080L, 0x00000080L, 0x20000000L, 0x00820000L, 0x00020080L, 0x00820080L, 0x20020080L, 0x20000080L, @@ -305,9 +305,9 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { c2l(in, c); c2l(in, d); - /* do PC1 in 47 simple operations :-) - * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) - * for the inspiration. :-) */ + // do PC1 in 47 simple operations :-) + // Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) + // for the inspiration. :-) PERM_OP(d, c, t, 4, 0x0f0f0f0fL); HPERM_OP(c, t, -2, 0xcccc0000L); HPERM_OP(d, t, -2, 0xcccc0000L); @@ -328,8 +328,8 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { } c &= 0x0fffffffL; d &= 0x0fffffffL; - /* could be a few less shifts but I am to lazy at this - * point in time to investigate */ + // could be a few less shifts but I am to lazy at this + // point in time to investigate s = des_skb[0][(c) & 0x3f] | des_skb[1][((c >> 6L) & 0x03) | ((c >> 7L) & 0x3c)] | des_skb[2][((c >> 13L) & 0x0f) | ((c >> 14L) & 0x30)] | @@ -340,7 +340,7 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { des_skb[6][(d >> 15L) & 0x3f] | des_skb[7][((d >> 21L) & 0x0f) | ((d >> 22L) & 0x30)]; - /* table contained 0213 4657 */ + // table contained 0213 4657 t2 = ((t << 16L) | (s & 0x0000ffffL)) & 0xffffffffL; schedule->subkeys[i][0] = ROTATE(t2, 30) & 0xffffffffL; @@ -385,18 +385,18 @@ static void DES_encrypt1(uint32_t *data, const DES_key_schedule *ks, int enc) { l = data[1]; IP(r, l); - /* Things have been modified so that the initial rotate is done outside - * the loop. This required the DES_SPtrans values in sp.h to be - * rotated 1 bit to the right. One perl script later and things have a - * 5% speed up on a sparc2. Thanks to Richard Outerbridge - * <71755.204@CompuServe.COM> for pointing this out. */ - /* clear the top bits on machines with 8byte longs */ - /* shift left by 2 */ + // Things have been modified so that the initial rotate is done outside + // the loop. This required the DES_SPtrans values in sp.h to be + // rotated 1 bit to the right. One perl script later and things have a + // 5% speed up on a sparc2. Thanks to Richard Outerbridge + // <71755.204@CompuServe.COM> for pointing this out. + // clear the top bits on machines with 8byte longs + // shift left by 2 r = ROTATE(r, 29) & 0xffffffffL; l = ROTATE(l, 29) & 0xffffffffL; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop */ + // I don't know if it is worth the effort of loop unrolling the + // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); @@ -433,7 +433,7 @@ static void DES_encrypt1(uint32_t *data, const DES_key_schedule *ks, int enc) { D_ENCRYPT(ks, r, l, 0); } - /* rotate and clear the top bits on machines with 8byte longs */ + // rotate and clear the top bits on machines with 8byte longs l = ROTATE(l, 3) & 0xffffffffL; r = ROTATE(r, 3) & 0xffffffffL; @@ -448,17 +448,17 @@ static void DES_encrypt2(uint32_t *data, const DES_key_schedule *ks, int enc) { r = data[0]; l = data[1]; - /* Things have been modified so that the initial rotate is done outside the - * loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to - * the right. One perl script later and things have a 5% speed up on a - * sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for - * pointing this out. */ - /* clear the top bits on machines with 8byte longs */ + // Things have been modified so that the initial rotate is done outside the + // loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to + // the right. One perl script later and things have a 5% speed up on a + // sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for + // pointing this out. + // clear the top bits on machines with 8byte longs r = ROTATE(r, 29) & 0xffffffffL; l = ROTATE(l, 29) & 0xffffffffL; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop */ + // I don't know if it is worth the effort of loop unrolling the + // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); @@ -494,7 +494,7 @@ static void DES_encrypt2(uint32_t *data, const DES_key_schedule *ks, int enc) { D_ENCRYPT(ks, l, r, 1); D_ENCRYPT(ks, r, l, 0); } - /* rotate and clear the top bits on machines with 8byte longs */ + // rotate and clear the top bits on machines with 8byte longs data[0] = ROTATE(l, 3) & 0xffffffffL; data[1] = ROTATE(r, 3) & 0xffffffffL; } @@ -764,7 +764,7 @@ void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, } -/* Deprecated functions. */ +// Deprecated functions. void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule) { DES_set_key(key, schedule); diff --git a/crypto/fipsmodule/des/internal.h b/crypto/fipsmodule/des/internal.h index 21eb9335..7bfc45b2 100644 --- a/crypto/fipsmodule/des/internal.h +++ b/crypto/fipsmodule/des/internal.h @@ -80,7 +80,7 @@ extern "C" { *((c)++) = (unsigned char)(((l) >> 24L) & 0xff); \ } while (0) -/* NOTE - c is not incremented as per c2l */ +// NOTE - c is not incremented as per c2l #define c2ln(c, l1, l2, n) \ do { \ (c) += (n); \ @@ -105,7 +105,7 @@ extern "C" { } \ } while (0) -/* NOTE - c is not incremented as per l2c */ +// NOTE - c is not incremented as per l2c #define l2cn(l1, l2, c, n) \ do { \ (c) += (n); \ @@ -218,7 +218,7 @@ how to use xors :-) I got it to its final state. #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DES_INTERNAL_H */ +#endif // OPENSSL_HEADER_DES_INTERNAL_H diff --git a/crypto/fipsmodule/digest/digest.c b/crypto/fipsmodule/digest/digest.c index 00e6d4b5..f8a0dd26 100644 --- a/crypto/fipsmodule/digest/digest.c +++ b/crypto/fipsmodule/digest/digest.c @@ -123,9 +123,9 @@ int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) { } if (out->digest == in->digest) { - /* |md_data| will be the correct size in this case so it's removed from - * |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and - * then it's reused. */ + // |md_data| will be the correct size in this case so it's removed from + // |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and + // then it's reused. tmp_buf = out->md_data; out->md_data = NULL; } diff --git a/crypto/fipsmodule/digest/internal.h b/crypto/fipsmodule/digest/internal.h index e3d812ad..2d06ed07 100644 --- a/crypto/fipsmodule/digest/internal.h +++ b/crypto/fipsmodule/digest/internal.h @@ -65,48 +65,48 @@ extern "C" { struct env_md_st { - /* type contains a NID identifing the digest function. (For example, - * NID_md5.) */ + // type contains a NID identifing the digest function. (For example, + // NID_md5.) int type; - /* md_size contains the size, in bytes, of the resulting digest. */ + // md_size contains the size, in bytes, of the resulting digest. unsigned md_size; - /* flags contains the OR of |EVP_MD_FLAG_*| values. */ + // flags contains the OR of |EVP_MD_FLAG_*| values. uint32_t flags; - /* init initialises the state in |ctx->md_data|. */ + // init initialises the state in |ctx->md_data|. void (*init)(EVP_MD_CTX *ctx); - /* update hashes |len| bytes of |data| into the state in |ctx->md_data|. */ + // update hashes |len| bytes of |data| into the state in |ctx->md_data|. void (*update)(EVP_MD_CTX *ctx, const void *data, size_t count); - /* final completes the hash and writes |md_size| bytes of digest to |out|. */ + // final completes the hash and writes |md_size| bytes of digest to |out|. void (*final)(EVP_MD_CTX *ctx, uint8_t *out); - /* block_size contains the hash's native block size. */ + // block_size contains the hash's native block size. unsigned block_size; - /* ctx_size contains the size, in bytes, of the state of the hash function. */ + // ctx_size contains the size, in bytes, of the state of the hash function. unsigned ctx_size; }; -/* evp_md_pctx_ops contains function pointers to allow the |pctx| member of - * |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP - * functions. */ +// evp_md_pctx_ops contains function pointers to allow the |pctx| member of +// |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP +// functions. struct evp_md_pctx_ops { - /* free is called when an |EVP_MD_CTX| is being freed and the |pctx| also - * needs to be freed. */ + // free is called when an |EVP_MD_CTX| is being freed and the |pctx| also + // needs to be freed. void (*free) (EVP_PKEY_CTX *pctx); - /* dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs - * to be copied. */ + // dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs + // to be copied. EVP_PKEY_CTX* (*dup) (EVP_PKEY_CTX *pctx); }; #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DIGEST_INTERNAL */ +#endif // OPENSSL_HEADER_DIGEST_INTERNAL diff --git a/crypto/fipsmodule/digest/md32_common.h b/crypto/fipsmodule/digest/md32_common.h index 73716298..a0c3665d 100644 --- a/crypto/fipsmodule/digest/md32_common.h +++ b/crypto/fipsmodule/digest/md32_common.h @@ -57,56 +57,55 @@ extern "C" { #endif -/* This is a generic 32-bit "collector" for message digest algorithms. It - * collects input character stream into chunks of 32-bit values and invokes the - * block function that performs the actual hash calculations. To make use of - * this mechanism, the following macros must be defined before including - * md32_common.h. - * - * One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be - * defined to specify the byte order of the input stream. - * - * |HASH_CBLOCK| must be defined as the integer block size, in bytes. - * - * |HASH_CTX| must be defined as the name of the context structure, which must - * have at least the following members: - * - * typedef struct _state_st { - * uint32_t h[ / sizeof(uint32_t)]; - * uint32_t Nl, Nh; - * uint8_t data[HASH_CBLOCK]; - * unsigned num; - * ... - * } _CTX; - * - * is the output length of the hash in bytes, before - * any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and - * SHA-512). - * - * |HASH_UPDATE| must be defined as the name of the "Update" function to - * generate. - * - * |HASH_TRANSFORM| must be defined as the the name of the "Transform" - * function to generate. - * - * |HASH_FINAL| must be defined as the name of "Final" function to generate. - * - * |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function. - * That function must be implemented manually. It must be capable of operating - * on *unaligned* input data in its original (data) byte order. It must have - * this signature: - * - * void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data, - * size_t num); - * - * It must update the hash state |state| with |num| blocks of data from |data|, - * where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of - * |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|, - * and so will have | / sizeof(uint32_t)| elements. - * - * |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts - * the hash state |c->h| into the output byte order, storing the result in |s|. - */ +// This is a generic 32-bit "collector" for message digest algorithms. It +// collects input character stream into chunks of 32-bit values and invokes the +// block function that performs the actual hash calculations. To make use of +// this mechanism, the following macros must be defined before including +// md32_common.h. +// +// One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be +// defined to specify the byte order of the input stream. +// +// |HASH_CBLOCK| must be defined as the integer block size, in bytes. +// +// |HASH_CTX| must be defined as the name of the context structure, which must +// have at least the following members: +// +// typedef struct _state_st { +// uint32_t h[ / sizeof(uint32_t)]; +// uint32_t Nl, Nh; +// uint8_t data[HASH_CBLOCK]; +// unsigned num; +// ... +// } _CTX; +// +// is the output length of the hash in bytes, before +// any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and +// SHA-512). +// +// |HASH_UPDATE| must be defined as the name of the "Update" function to +// generate. +// +// |HASH_TRANSFORM| must be defined as the the name of the "Transform" +// function to generate. +// +// |HASH_FINAL| must be defined as the name of "Final" function to generate. +// +// |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function. +// That function must be implemented manually. It must be capable of operating +// on *unaligned* input data in its original (data) byte order. It must have +// this signature: +// +// void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data, +// size_t num); +// +// It must update the hash state |state| with |num| blocks of data from |data|, +// where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of +// |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|, +// and so will have | / sizeof(uint32_t)| elements. +// +// |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts +// the hash state |c->h| into the output byte order, storing the result in |s|. #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) #error "DATA_ORDER must be defined!" @@ -173,7 +172,7 @@ extern "C" { *((c)++) = (uint8_t)(((l) >> 24) & 0xff); \ } while (0) -#endif /* DATA_ORDER */ +#endif // DATA_ORDER int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { const uint8_t *data = data_; @@ -184,7 +183,7 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { uint32_t l = c->Nl + (((uint32_t)len) << 3); if (l < c->Nl) { - /* Handle carries. */ + // Handle carries. c->Nh++; } c->Nh += (uint32_t)(len >> 29); @@ -199,7 +198,7 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { data += n; len -= n; c->num = 0; - /* Keep |c->data| zeroed when unused. */ + // Keep |c->data| zeroed when unused. OPENSSL_memset(c->data, 0, HASH_CBLOCK); } else { OPENSSL_memcpy(c->data + n, data, len); @@ -230,14 +229,14 @@ void HASH_TRANSFORM(HASH_CTX *c, const uint8_t *data) { int HASH_FINAL(uint8_t *md, HASH_CTX *c) { - /* |c->data| always has room for at least one byte. A full block would have - * been consumed. */ + // |c->data| always has room for at least one byte. A full block would have + // been consumed. size_t n = c->num; assert(n < HASH_CBLOCK); c->data[n] = 0x80; n++; - /* Fill the block with zeros if there isn't room for a 64-bit length. */ + // Fill the block with zeros if there isn't room for a 64-bit length. if (n > (HASH_CBLOCK - 8)) { OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - n); n = 0; @@ -245,7 +244,7 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { } OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - 8 - n); - /* Append a 64-bit length to the block and process it. */ + // Append a 64-bit length to the block and process it. uint8_t *p = c->data + HASH_CBLOCK - 8; #if defined(DATA_ORDER_IS_BIG_ENDIAN) HOST_l2c(c->Nh, p); @@ -265,5 +264,5 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif diff --git a/crypto/fipsmodule/ec/ec.c b/crypto/fipsmodule/ec/ec.c index 55f388db..d82e58f9 100644 --- a/crypto/fipsmodule/ec/ec.c +++ b/crypto/fipsmodule/ec/ec.c @@ -81,86 +81,86 @@ static const uint8_t kP224Params[6 * 28] = { - /* p */ + // p 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - /* a */ + // a 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, - /* b */ + // b 0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, 0x32, 0x56, 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA, 0x27, 0x0B, 0x39, 0x43, 0x23, 0x55, 0xFF, 0xB4, - /* x */ + // x 0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, 0x90, 0xB9, 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xD6, 0x11, 0x5C, 0x1D, 0x21, - /* y */ + // y 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34, - /* order */ + // order 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x16, 0xA2, 0xE0, 0xB8, 0xF0, 0x3E, 0x13, 0xDD, 0x29, 0x45, 0x5C, 0x5C, 0x2A, 0x3D, }; static const uint8_t kP256Params[6 * 32] = { - /* p */ + // p 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ + // a 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ + // b 0x5A, 0xC6, 0x35, 0xD8, 0xAA, 0x3A, 0x93, 0xE7, 0xB3, 0xEB, 0xBD, 0x55, 0x76, 0x98, 0x86, 0xBC, 0x65, 0x1D, 0x06, 0xB0, 0xCC, 0x53, 0xB0, 0xF6, 0x3B, 0xCE, 0x3C, 0x3E, 0x27, 0xD2, 0x60, 0x4B, - /* x */ + // x 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8, 0xBC, 0xE6, 0xE5, 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D, 0x81, 0x2D, 0xEB, 0x33, 0xA0, 0xF4, 0xA1, 0x39, 0x45, 0xD8, 0x98, 0xC2, 0x96, - /* y */ + // y 0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0x0f, 0x9e, 0x16, 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce, 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5, - /* order */ + // order 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51, }; static const uint8_t kP384Params[6 * 48] = { - /* p */ + // p 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ + // a 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ + // b 0xB3, 0x31, 0x2F, 0xA7, 0xE2, 0x3E, 0xE7, 0xE4, 0x98, 0x8E, 0x05, 0x6B, 0xE3, 0xF8, 0x2D, 0x19, 0x18, 0x1D, 0x9C, 0x6E, 0xFE, 0x81, 0x41, 0x12, 0x03, 0x14, 0x08, 0x8F, 0x50, 0x13, 0x87, 0x5A, 0xC6, 0x56, 0x39, 0x8D, 0x8A, 0x2E, 0xD1, 0x9D, 0x2A, 0x85, 0xC8, 0xED, 0xD3, 0xEC, 0x2A, 0xEF, - /* x */ + // x 0xAA, 0x87, 0xCA, 0x22, 0xBE, 0x8B, 0x05, 0x37, 0x8E, 0xB1, 0xC7, 0x1E, 0xF3, 0x20, 0xAD, 0x74, 0x6E, 0x1D, 0x3B, 0x62, 0x8B, 0xA7, 0x9B, 0x98, 0x59, 0xF7, 0x41, 0xE0, 0x82, 0x54, 0x2A, 0x38, 0x55, 0x02, 0xF2, 0x5D, 0xBF, 0x55, 0x29, 0x6C, 0x3A, 0x54, 0x5E, 0x38, 0x72, 0x76, 0x0A, 0xB7, - /* y */ + // y 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf, 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c, 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce, 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f, - /* order */ + // order 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF, 0x58, 0x1A, 0x0D, 0xB2, @@ -168,42 +168,42 @@ static const uint8_t kP384Params[6 * 48] = { }; static const uint8_t kP521Params[6 * 66] = { - /* p */ + // p 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ + // a 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ + // b 0x00, 0x51, 0x95, 0x3E, 0xB9, 0x61, 0x8E, 0x1C, 0x9A, 0x1F, 0x92, 0x9A, 0x21, 0xA0, 0xB6, 0x85, 0x40, 0xEE, 0xA2, 0xDA, 0x72, 0x5B, 0x99, 0xB3, 0x15, 0xF3, 0xB8, 0xB4, 0x89, 0x91, 0x8E, 0xF1, 0x09, 0xE1, 0x56, 0x19, 0x39, 0x51, 0xEC, 0x7E, 0x93, 0x7B, 0x16, 0x52, 0xC0, 0xBD, 0x3B, 0xB1, 0xBF, 0x07, 0x35, 0x73, 0xDF, 0x88, 0x3D, 0x2C, 0x34, 0xF1, 0xEF, 0x45, 0x1F, 0xD4, 0x6B, 0x50, 0x3F, 0x00, - /* x */ + // x 0x00, 0xC6, 0x85, 0x8E, 0x06, 0xB7, 0x04, 0x04, 0xE9, 0xCD, 0x9E, 0x3E, 0xCB, 0x66, 0x23, 0x95, 0xB4, 0x42, 0x9C, 0x64, 0x81, 0x39, 0x05, 0x3F, 0xB5, 0x21, 0xF8, 0x28, 0xAF, 0x60, 0x6B, 0x4D, 0x3D, 0xBA, 0xA1, 0x4B, 0x5E, 0x77, 0xEF, 0xE7, 0x59, 0x28, 0xFE, 0x1D, 0xC1, 0x27, 0xA2, 0xFF, 0xA8, 0xDE, 0x33, 0x48, 0xB3, 0xC1, 0x85, 0x6A, 0x42, 0x9B, 0xF9, 0x7E, 0x7E, 0x31, 0xC2, 0xE5, 0xBD, 0x66, - /* y */ + // y 0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50, - /* order */ + // order 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x51, 0x86, @@ -212,15 +212,15 @@ static const uint8_t kP521Params[6 * 66] = { 0xB7, 0x1E, 0x91, 0x38, 0x64, 0x09, }; -/* MSan appears to have a bug that causes code to be miscompiled in opt mode. - * While that is being looked at, don't run the uint128_t code under MSan. */ +// MSan appears to have a bug that causes code to be miscompiled in opt mode. +// While that is being looked at, don't run the uint128_t code under MSan. #if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \ !defined(MEMORY_SANITIZER) #define BORINGSSL_USE_INT128_CODE #endif DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { - /* 1.3.132.0.35 */ + // 1.3.132.0.35 static const uint8_t kOIDP521[] = {0x2b, 0x81, 0x04, 0x00, 0x23}; out->curves[0].nid = NID_secp521r1; out->curves[0].oid = kOIDP521; @@ -230,7 +230,7 @@ DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { out->curves[0].params = kP521Params; out->curves[0].method = EC_GFp_mont_method(); - /* 1.3.132.0.34 */ + // 1.3.132.0.34 static const uint8_t kOIDP384[] = {0x2b, 0x81, 0x04, 0x00, 0x22}; out->curves[1].nid = NID_secp384r1; out->curves[1].oid = kOIDP384; @@ -240,7 +240,7 @@ DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { out->curves[1].params = kP384Params; out->curves[1].method = EC_GFp_mont_method(); - /* 1.2.840.10045.3.1.7 */ + // 1.2.840.10045.3.1.7 static const uint8_t kOIDP256[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}; out->curves[2].nid = NID_X9_62_prime256v1; @@ -261,7 +261,7 @@ DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { EC_GFp_mont_method(); #endif - /* 1.3.132.0.33 */ + // 1.3.132.0.33 static const uint8_t kOIDP224[] = {0x2b, 0x81, 0x04, 0x00, 0x21}; out->curves[3].nid = NID_secp224r1; out->curves[3].oid = kOIDP224; @@ -277,9 +277,9 @@ DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { #endif } -/* built_in_curve_scalar_field_monts contains Montgomery contexts for - * performing inversions in the scalar fields of each of the built-in - * curves. It's protected by |built_in_curve_scalar_field_monts_once|. */ +// built_in_curve_scalar_field_monts contains Montgomery contexts for +// performing inversions in the scalar fields of each of the built-in +// curves. It's protected by |built_in_curve_scalar_field_monts_once|. DEFINE_LOCAL_DATA(BN_MONT_CTX **, built_in_curve_scalar_field_monts) { const struct built_in_curves *const curves = OPENSSL_built_in_curves(); @@ -386,12 +386,12 @@ EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor) { if (group->curve_name != NID_undef || group->generator != NULL) { - /* |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by - * |EC_GROUP_new_curve_GFp| and may only used once on each group. */ + // |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by + // |EC_GROUP_new_curve_GFp| and may only used once on each group. return 0; } - /* Require a cofactor of one for custom curves, which implies prime order. */ + // Require a cofactor of one for custom curves, which implies prime order. if (!BN_is_one(cofactor)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COFACTOR); return 0; @@ -579,7 +579,7 @@ int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx) { int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx) { - /* All |EC_GROUP|s have cofactor 1. */ + // All |EC_GROUP|s have cofactor 1. return BN_set_word(cofactor, 1); } @@ -782,9 +782,9 @@ int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx) { int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { - /* Previously, this function set |r| to the point at infinity if there was - * nothing to multiply. But, nobody should be calling this function with - * nothing to multiply in the first place. */ + // Previously, this function set |r| to the point at infinity if there was + // nothing to multiply. But, nobody should be calling this function with + // nothing to multiply in the first place. if ((g_scalar == NULL && p_scalar == NULL) || ((p == NULL) != (p_scalar == NULL))) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); diff --git a/crypto/fipsmodule/ec/ec_key.c b/crypto/fipsmodule/ec/ec_key.c index acabb068..e5e8b1a3 100644 --- a/crypto/fipsmodule/ec/ec_key.c +++ b/crypto/fipsmodule/ec/ec_key.c @@ -165,9 +165,9 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } - /* Copy the parameters. */ + // Copy the parameters. if (src->group) { - /* TODO(fork): duplicating the group seems wasteful. */ + // TODO(fork): duplicating the group seems wasteful. EC_GROUP_free(dest->group); dest->group = EC_GROUP_dup(src->group); if (dest->group == NULL) { @@ -175,7 +175,7 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { } } - /* Copy the public key. */ + // Copy the public key. if (src->pub_key && src->group) { EC_POINT_free(dest->pub_key); dest->pub_key = EC_POINT_dup(src->pub_key, src->group); @@ -184,7 +184,7 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { } } - /* copy the private key */ + // copy the private key if (src->priv_key) { if (dest->priv_key == NULL) { dest->priv_key = BN_new(); @@ -196,14 +196,14 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { return NULL; } } - /* copy method/extra data */ + // copy method/extra data if (src->ecdsa_meth) { METHOD_unref(dest->ecdsa_meth); dest->ecdsa_meth = src->ecdsa_meth; METHOD_ref(dest->ecdsa_meth); } - /* copy the rest */ + // copy the rest dest->enc_flag = src->enc_flag; dest->conv_form = src->conv_form; @@ -235,13 +235,13 @@ const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key) { return key->group; } int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) { EC_GROUP_free(key->group); - /* TODO(fork): duplicating the group seems wasteful but see - * |EC_KEY_set_conv_form|. */ + // TODO(fork): duplicating the group seems wasteful but see + // |EC_KEY_set_conv_form|. key->group = EC_GROUP_dup(group); if (key->group == NULL) { return 0; } - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (key->priv_key != NULL && BN_cmp(key->priv_key, EC_GROUP_get0_order(group)) >= 0) { return 0; @@ -254,7 +254,7 @@ const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key) { } int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) { - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (key->group != NULL && BN_cmp(priv_key, EC_GROUP_get0_order(key->group)) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); @@ -310,16 +310,15 @@ int EC_KEY_check_key(const EC_KEY *eckey) { goto err; } - /* testing whether the pub_key is on the elliptic curve */ + // testing whether the pub_key is on the elliptic curve if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, ctx)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); goto err; } - /* in case the priv_key is present : - * check if generator * priv_key == pub_key - */ + // in case the priv_key is present : + // check if generator * priv_key == pub_key if (eckey->priv_key) { - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (BN_cmp(eckey->priv_key, EC_GROUP_get0_order(eckey->group)) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); goto err; @@ -345,7 +344,7 @@ err: int EC_KEY_check_fips(const EC_KEY *key) { if (EC_KEY_is_opaque(key)) { - /* Opaque keys can't be checked. */ + // Opaque keys can't be checked. OPENSSL_PUT_ERROR(EC, EC_R_PUBLIC_KEY_VALIDATION_FAILED); return 0; } @@ -408,8 +407,8 @@ int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, goto err; } - /* Check if retrieved coordinates match originals: if not values - * are out of range. */ + // Check if retrieved coordinates match originals: if not values + // are out of range. if (BN_cmp(x, tx) || BN_cmp(y, ty)) { OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE); goto err; @@ -453,14 +452,14 @@ int EC_KEY_generate_key(EC_KEY *eckey) { const BIGNUM *order = EC_GROUP_get0_order(eckey->group); - /* Check that the size of the group order is FIPS compliant (FIPS 186-4 - * B.4.2). */ + // Check that the size of the group order is FIPS compliant (FIPS 186-4 + // B.4.2). if (BN_num_bits(order) < 160) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); goto err; } - /* Generate the private key by testing candidates (FIPS 186-4 B.4.2). */ + // Generate the private key by testing candidates (FIPS 186-4 B.4.2). if (!BN_rand_range_ex(priv_key, 1, order)) { goto err; } diff --git a/crypto/fipsmodule/ec/ec_montgomery.c b/crypto/fipsmodule/ec/ec_montgomery.c index c2afe257..c5f240bf 100644 --- a/crypto/fipsmodule/ec/ec_montgomery.c +++ b/crypto/fipsmodule/ec/ec_montgomery.c @@ -219,7 +219,7 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, BN_CTX_start(ctx); if (BN_cmp(&point->Z, &group->one) == 0) { - /* |point| is already affine. */ + // |point| is already affine. if (x != NULL && !BN_from_montgomery(x, &point->X, group->mont, ctx)) { goto err; } @@ -227,7 +227,7 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } } else { - /* transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3) */ + // transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3) BIGNUM *Z_1 = BN_CTX_get(ctx); BIGNUM *Z_2 = BN_CTX_get(ctx); @@ -238,18 +238,18 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } - /* The straightforward way to calculate the inverse of a Montgomery-encoded - * value where the result is Montgomery-encoded is: - * - * |BN_from_montgomery| + invert + |BN_to_montgomery|. - * - * This is equivalent, but more efficient, because |BN_from_montgomery| - * is more efficient (at least in theory) than |BN_to_montgomery|, since it - * doesn't have to do the multiplication before the reduction. - * - * Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this - * inversion may be done as the final step of private key operations. - * Unfortunately, this is suboptimal for ECDSA verification. */ + // The straightforward way to calculate the inverse of a Montgomery-encoded + // value where the result is Montgomery-encoded is: + // + // |BN_from_montgomery| + invert + |BN_to_montgomery|. + // + // This is equivalent, but more efficient, because |BN_from_montgomery| + // is more efficient (at least in theory) than |BN_to_montgomery|, since it + // doesn't have to do the multiplication before the reduction. + // + // Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this + // inversion may be done as the final step of private key operations. + // Unfortunately, this is suboptimal for ECDSA verification. if (!BN_from_montgomery(Z_1, &point->Z, group->mont, ctx) || !BN_from_montgomery(Z_1, Z_1, group->mont, ctx) || !bn_mod_inverse_prime(Z_1, Z_1, &group->field, ctx, group->mont)) { @@ -260,10 +260,10 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } - /* Instead of using |BN_from_montgomery| to convert the |x| coordinate - * and then calling |BN_from_montgomery| again to convert the |y| - * coordinate below, convert the common factor |Z_2| once now, saving one - * reduction. */ + // Instead of using |BN_from_montgomery| to convert the |x| coordinate + // and then calling |BN_from_montgomery| again to convert the |y| + // coordinate below, convert the common factor |Z_2| once now, saving one + // reduction. if (!BN_from_montgomery(Z_2, Z_2, group->mont, ctx)) { goto err; } diff --git a/crypto/fipsmodule/ec/internal.h b/crypto/fipsmodule/ec/internal.h index 424fe530..39c9349a 100644 --- a/crypto/fipsmodule/ec/internal.h +++ b/crypto/fipsmodule/ec/internal.h @@ -88,25 +88,25 @@ struct ec_method_st { int (*point_get_affine_coordinates)(const EC_GROUP *, const EC_POINT *, BIGNUM *x, BIGNUM *y, BN_CTX *); - /* Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar| - * are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null. - * Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar| - * and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is - * non-null. */ + // Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar| + // are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null. + // Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar| + // and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is + // non-null. int (*mul)(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx); - /* 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the - * same implementations of point operations can be used with different - * optimized implementations of expensive field operations: */ + // 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the + // same implementations of point operations can be used with different + // optimized implementations of expensive field operations: int (*field_mul)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int (*field_sqr)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, BN_CTX *); int (*field_encode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, - BN_CTX *); /* e.g. to Montgomery */ + BN_CTX *); // e.g. to Montgomery int (*field_decode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, - BN_CTX *); /* e.g. from Montgomery */ + BN_CTX *); // e.g. from Montgomery } /* EC_METHOD */; const EC_METHOD *EC_GFp_mont_method(void); @@ -117,22 +117,22 @@ struct ec_group_st { EC_POINT *generator; BIGNUM order; - int curve_name; /* optional NID for named curve */ + int curve_name; // optional NID for named curve - const BN_MONT_CTX *order_mont; /* data for ECDSA inverse */ + const BN_MONT_CTX *order_mont; // data for ECDSA inverse - /* The following members are handled by the method functions, - * even if they appear generic */ + // The following members are handled by the method functions, + // even if they appear generic - BIGNUM field; /* For curves over GF(p), this is the modulus. */ + BIGNUM field; // For curves over GF(p), this is the modulus. - BIGNUM a, b; /* Curve coefficients. */ + BIGNUM a, b; // Curve coefficients. - int a_is_minus3; /* enable optimized point arithmetics for special case */ + int a_is_minus3; // enable optimized point arithmetics for special case - BN_MONT_CTX *mont; /* Montgomery structure. */ + BN_MONT_CTX *mont; // Montgomery structure. - BIGNUM one; /* The value one. */ + BIGNUM one; // The value one. } /* EC_GROUP */; struct ec_point_st { @@ -140,22 +140,22 @@ struct ec_point_st { BIGNUM X; BIGNUM Y; - BIGNUM Z; /* Jacobian projective coordinates: - * (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0 */ + BIGNUM Z; // Jacobian projective coordinates: + // (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0 } /* EC_POINT */; EC_GROUP *ec_group_new(const EC_METHOD *meth); int ec_group_copy(EC_GROUP *dest, const EC_GROUP *src); -/* ec_group_get_order_mont returns a Montgomery context for operations modulo - * |group|'s order. It may return NULL in the case that |group| is not a - * built-in group. */ +// ec_group_get_order_mont returns a Montgomery context for operations modulo +// |group|'s order. It may return NULL in the case that |group| is not a +// built-in group. const BN_MONT_CTX *ec_group_get_order_mont(const EC_GROUP *group); int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx); -/* method functions in simple.c */ +// method functions in simple.c int ec_GFp_simple_group_init(EC_GROUP *); void ec_GFp_simple_group_finish(EC_GROUP *); int ec_GFp_simple_group_copy(EC_GROUP *, const EC_GROUP *); @@ -200,7 +200,7 @@ int ec_GFp_simple_field_mul(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, int ec_GFp_simple_field_sqr(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, BN_CTX *); -/* method functions in montgomery.c */ +// method functions in montgomery.c int ec_GFp_mont_group_init(EC_GROUP *); int ec_GFp_mont_group_set_curve(EC_GROUP *, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *); @@ -225,8 +225,8 @@ void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, uint8_t in); const EC_METHOD *EC_GFp_nistp224_method(void); const EC_METHOD *EC_GFp_nistp256_method(void); -/* EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with - * x86-64 optimized P256. See http://eprint.iacr.org/2013/816. */ +// EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with +// x86-64 optimized P256. See http://eprint.iacr.org/2013/816. const EC_METHOD *EC_GFp_nistz256_method(void); struct ec_key_st { @@ -235,8 +235,8 @@ struct ec_key_st { EC_POINT *pub_key; BIGNUM *priv_key; - /* fixed_k may contain a specific value of 'k', to be used in ECDSA signing. - * This is only for the FIPS power-on tests. */ + // fixed_k may contain a specific value of 'k', to be used in ECDSA signing. + // This is only for the FIPS power-on tests. BIGNUM *fixed_k; unsigned int enc_flag; @@ -253,13 +253,13 @@ struct built_in_curve { int nid; const uint8_t *oid; uint8_t oid_len; - /* comment is a human-readable string describing the curve. */ + // comment is a human-readable string describing the curve. const char *comment; - /* param_len is the number of bytes needed to store a field element. */ + // param_len is the number of bytes needed to store a field element. uint8_t param_len; - /* params points to an array of 6*|param_len| bytes which hold the field - * elements of the following (in big-endian order): prime, a, b, generator x, - * generator y, order. */ + // params points to an array of 6*|param_len| bytes which hold the field + // elements of the following (in big-endian order): prime, a, b, generator x, + // generator y, order. const uint8_t *params; const EC_METHOD *method; }; @@ -270,13 +270,13 @@ struct built_in_curves { struct built_in_curve curves[OPENSSL_NUM_BUILT_IN_CURVES]; }; -/* OPENSSL_built_in_curves returns a pointer to static information about - * standard curves. The array is terminated with an entry where |nid| is - * |NID_undef|. */ +// OPENSSL_built_in_curves returns a pointer to static information about +// standard curves. The array is terminated with an entry where |nid| is +// |NID_undef|. const struct built_in_curves *OPENSSL_built_in_curves(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_EC_INTERNAL_H */ +#endif // OPENSSL_HEADER_EC_INTERNAL_H diff --git a/crypto/fipsmodule/ec/oct.c b/crypto/fipsmodule/ec/oct.c index 5071c2ee..cf51e4bc 100644 --- a/crypto/fipsmodule/ec/oct.c +++ b/crypto/fipsmodule/ec/oct.c @@ -94,12 +94,12 @@ static size_t ec_GFp_simple_point2oct(const EC_GROUP *group, goto err; } - /* ret := required output buffer length */ + // ret := required output buffer length field_len = BN_num_bytes(&group->field); ret = (form == POINT_CONVERSION_COMPRESSED) ? 1 + field_len : 1 + 2 * field_len; - /* if 'buf' is NULL, just return required length */ + // if 'buf' is NULL, just return required length if (buf != NULL) { if (len < ret) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); @@ -299,13 +299,13 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, goto err; } - /* Recover y. We have a Weierstrass equation - * y^2 = x^3 + a*x + b, - * so y is one of the square roots of x^3 + a*x + b. */ + // Recover y. We have a Weierstrass equation + // y^2 = x^3 + a*x + b, + // so y is one of the square roots of x^3 + a*x + b. - /* tmp1 := x^3 */ + // tmp1 := x^3 if (group->meth->field_decode == 0) { - /* field_{sqr,mul} work on standard representation */ + // field_{sqr,mul} work on standard representation if (!group->meth->field_sqr(group, tmp2, x, ctx) || !group->meth->field_mul(group, tmp1, tmp2, x, ctx)) { goto err; @@ -317,7 +317,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, } } - /* tmp1 := tmp1 + a*x */ + // tmp1 := tmp1 + a*x if (group->a_is_minus3) { if (!BN_mod_lshift1_quick(tmp2, x, &group->field) || !BN_mod_add_quick(tmp2, tmp2, x, &group->field) || @@ -331,7 +331,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, goto err; } } else { - /* field_mul works on standard representation */ + // field_mul works on standard representation if (!group->meth->field_mul(group, tmp2, &group->a, x, ctx)) { goto err; } @@ -342,7 +342,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, } } - /* tmp1 := tmp1 + b */ + // tmp1 := tmp1 + b if (group->meth->field_decode) { if (!group->meth->field_decode(group, tmp2, &group->b, ctx) || !BN_mod_add_quick(tmp1, tmp1, tmp2, &group->field)) { diff --git a/crypto/fipsmodule/ec/p224-64.c b/crypto/fipsmodule/ec/p224-64.c index 67dfcc85..ec5a93d2 100644 --- a/crypto/fipsmodule/ec/p224-64.c +++ b/crypto/fipsmodule/ec/p224-64.c @@ -12,10 +12,10 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* A 64-bit implementation of the NIST P-224 elliptic curve point multiplication - * - * Inspired by Daniel J. Bernstein's public domain nistp224 implementation - * and Adam Langley's public domain 64-bit C implementation of curve25519. */ +// A 64-bit implementation of the NIST P-224 elliptic curve point multiplication +// +// Inspired by Daniel J. Bernstein's public domain nistp224 implementation +// and Adam Langley's public domain 64-bit C implementation of curve25519. #include @@ -34,18 +34,18 @@ #include "../../internal.h" -/* Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3 - * using 64-bit coefficients called 'limbs', and sometimes (for multiplication - * results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + - * 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb - * representation is an 'p224_felem'; a 7-p224_widelimb representation is a - * 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we - * don't always reduce the representations: we ensure that inputs to each - * p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i < - * 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients - * are then again partially reduced to obtain an p224_felem satisfying a_i < - * 2^57. We only reduce to the unique minimal representation at the end of the - * computation. */ +// Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3 +// using 64-bit coefficients called 'limbs', and sometimes (for multiplication +// results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + +// 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb +// representation is an 'p224_felem'; a 7-p224_widelimb representation is a +// 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we +// don't always reduce the representations: we ensure that inputs to each +// p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i < +// 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients +// are then again partially reduced to obtain an p224_felem satisfying a_i < +// 2^57. We only reduce to the unique minimal representation at the end of the +// computation. typedef uint64_t p224_limb; typedef uint128_t p224_widelimb; @@ -53,40 +53,40 @@ typedef uint128_t p224_widelimb; typedef p224_limb p224_felem[4]; typedef p224_widelimb p224_widefelem[7]; -/* Field element represented as a byte arrary. 28*8 = 224 bits is also the - * group order size for the elliptic curve, and we also use this type for - * scalars for point multiplication. */ +// Field element represented as a byte arrary. 28*8 = 224 bits is also the +// group order size for the elliptic curve, and we also use this type for +// scalars for point multiplication. typedef uint8_t p224_felem_bytearray[28]; -/* Precomputed multiples of the standard generator - * Points are given in coordinates (X, Y, Z) where Z normally is 1 - * (0 for the point at infinity). - * For each field element, slice a_0 is word 0, etc. - * - * The table has 2 * 16 elements, starting with the following: - * index | bits | point - * ------+---------+------------------------------ - * 0 | 0 0 0 0 | 0G - * 1 | 0 0 0 1 | 1G - * 2 | 0 0 1 0 | 2^56G - * 3 | 0 0 1 1 | (2^56 + 1)G - * 4 | 0 1 0 0 | 2^112G - * 5 | 0 1 0 1 | (2^112 + 1)G - * 6 | 0 1 1 0 | (2^112 + 2^56)G - * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G - * 8 | 1 0 0 0 | 2^168G - * 9 | 1 0 0 1 | (2^168 + 1)G - * 10 | 1 0 1 0 | (2^168 + 2^56)G - * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G - * 12 | 1 1 0 0 | (2^168 + 2^112)G - * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G - * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G - * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G - * followed by a copy of this with each element multiplied by 2^28. - * - * The reason for this is so that we can clock bits into four different - * locations when doing simple scalar multiplies against the base point, - * and then another four locations using the second 16 elements. */ +// Precomputed multiples of the standard generator +// Points are given in coordinates (X, Y, Z) where Z normally is 1 +// (0 for the point at infinity). +// For each field element, slice a_0 is word 0, etc. +// +// The table has 2 * 16 elements, starting with the following: +// index | bits | point +// ------+---------+------------------------------ +// 0 | 0 0 0 0 | 0G +// 1 | 0 0 0 1 | 1G +// 2 | 0 0 1 0 | 2^56G +// 3 | 0 0 1 1 | (2^56 + 1)G +// 4 | 0 1 0 0 | 2^112G +// 5 | 0 1 0 1 | (2^112 + 1)G +// 6 | 0 1 1 0 | (2^112 + 2^56)G +// 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G +// 8 | 1 0 0 0 | 2^168G +// 9 | 1 0 0 1 | (2^168 + 1)G +// 10 | 1 0 1 0 | (2^168 + 2^56)G +// 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G +// 12 | 1 1 0 0 | (2^168 + 2^112)G +// 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G +// 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G +// 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G +// followed by a copy of this with each element multiplied by 2^28. +// +// The reason for this is so that we can clock bits into four different +// locations when doing simple scalar multiplies against the base point, +// and then another four locations using the second 16 elements. static const p224_felem g_p224_pre_comp[2][16][3] = { {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf}, @@ -187,7 +187,7 @@ static uint64_t p224_load_u64(const uint8_t in[8]) { return ret; } -/* Helper functions to convert field elements to/from internal representation */ +// Helper functions to convert field elements to/from internal representation static void p224_bin28_to_felem(p224_felem out, const uint8_t in[28]) { out[0] = p224_load_u64(in) & 0x00ffffffffffffff; out[1] = p224_load_u64(in + 7) & 0x00ffffffffffffff; @@ -204,16 +204,16 @@ static void p224_felem_to_bin28(uint8_t out[28], const p224_felem in) { } } -/* To preserve endianness when using BN_bn2bin and BN_bin2bn */ +// To preserve endianness when using BN_bn2bin and BN_bin2bn static void p224_flip_endian(uint8_t *out, const uint8_t *in, size_t len) { for (size_t i = 0; i < len; ++i) { out[i] = in[len - 1 - i]; } } -/* From OpenSSL BIGNUM to internal representation */ +// From OpenSSL BIGNUM to internal representation static int p224_BN_to_felem(p224_felem out, const BIGNUM *bn) { - /* BN_bn2bin eats leading zeroes */ + // BN_bn2bin eats leading zeroes p224_felem_bytearray b_out; OPENSSL_memset(b_out, 0, sizeof(b_out)); size_t num_bytes = BN_num_bytes(bn); @@ -230,7 +230,7 @@ static int p224_BN_to_felem(p224_felem out, const BIGNUM *bn) { return 1; } -/* From internal representation to OpenSSL BIGNUM */ +// From internal representation to OpenSSL BIGNUM static BIGNUM *p224_felem_to_BN(BIGNUM *out, const p224_felem in) { p224_felem_bytearray b_in, b_out; p224_felem_to_bin28(b_in, in); @@ -238,10 +238,10 @@ static BIGNUM *p224_felem_to_BN(BIGNUM *out, const p224_felem in) { return BN_bin2bn(b_out, sizeof(b_out), out); } -/* Field operations, using the internal representation of field elements. - * NB! These operations are specific to our point multiplication and cannot be - * expected to be correct in general - e.g., multiplication with a large scalar - * will cause an overflow. */ +// Field operations, using the internal representation of field elements. +// NB! These operations are specific to our point multiplication and cannot be +// expected to be correct in general - e.g., multiplication with a large scalar +// will cause an overflow. static void p224_felem_assign(p224_felem out, const p224_felem in) { out[0] = in[0]; @@ -250,7 +250,7 @@ static void p224_felem_assign(p224_felem out, const p224_felem in) { out[3] = in[3]; } -/* Sum two field elements: out += in */ +// Sum two field elements: out += in static void p224_felem_sum(p224_felem out, const p224_felem in) { out[0] += in[0]; out[1] += in[1]; @@ -258,8 +258,8 @@ static void p224_felem_sum(p224_felem out, const p224_felem in) { out[3] += in[3]; } -/* Get negative value: out = -in */ -/* Assumes in[i] < 2^57 */ +// Get negative value: out = -in +// Assumes in[i] < 2^57 static void p224_felem_neg(p224_felem out, const p224_felem in) { static const p224_limb two58p2 = (((p224_limb)1) << 58) + (((p224_limb)1) << 2); @@ -268,15 +268,15 @@ static void p224_felem_neg(p224_felem out, const p224_felem in) { static const p224_limb two58m42m2 = (((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2); - /* Set to 0 mod 2^224-2^96+1 to ensure out > in */ + // Set to 0 mod 2^224-2^96+1 to ensure out > in out[0] = two58p2 - in[0]; out[1] = two58m42m2 - in[1]; out[2] = two58m2 - in[2]; out[3] = two58m2 - in[3]; } -/* Subtract field elements: out -= in */ -/* Assumes in[i] < 2^57 */ +// Subtract field elements: out -= in +// Assumes in[i] < 2^57 static void p224_felem_diff(p224_felem out, const p224_felem in) { static const p224_limb two58p2 = (((p224_limb)1) << 58) + (((p224_limb)1) << 2); @@ -285,7 +285,7 @@ static void p224_felem_diff(p224_felem out, const p224_felem in) { static const p224_limb two58m42m2 = (((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2); - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ + // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two58p2; out[1] += two58m42m2; out[2] += two58m2; @@ -297,8 +297,8 @@ static void p224_felem_diff(p224_felem out, const p224_felem in) { out[3] -= in[3]; } -/* Subtract in unreduced 128-bit mode: out -= in */ -/* Assumes in[i] < 2^119 */ +// Subtract in unreduced 128-bit mode: out -= in +// Assumes in[i] < 2^119 static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) { static const p224_widelimb two120 = ((p224_widelimb)1) << 120; static const p224_widelimb two120m64 = @@ -307,7 +307,7 @@ static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) { (((p224_widelimb)1) << 104) - (((p224_widelimb)1) << 64); - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ + // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two120; out[1] += two120m64; out[2] += two120m64; @@ -325,8 +325,8 @@ static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) { out[6] -= in[6]; } -/* Subtract in mixed mode: out128 -= in64 */ -/* in[i] < 2^63 */ +// Subtract in mixed mode: out128 -= in64 +// in[i] < 2^63 static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) { static const p224_widelimb two64p8 = (((p224_widelimb)1) << 64) + (((p224_widelimb)1) << 8); @@ -336,7 +336,7 @@ static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) { (((p224_widelimb)1) << 48) - (((p224_widelimb)1) << 8); - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ + // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two64p8; out[1] += two64m48m8; out[2] += two64m8; @@ -348,8 +348,8 @@ static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) { out[3] -= in[3]; } -/* Multiply a field element by a scalar: out = out * scalar - * The scalars we actually use are small, so results fit without overflow */ +// Multiply a field element by a scalar: out = out * scalar +// The scalars we actually use are small, so results fit without overflow static void p224_felem_scalar(p224_felem out, const p224_limb scalar) { out[0] *= scalar; out[1] *= scalar; @@ -357,8 +357,8 @@ static void p224_felem_scalar(p224_felem out, const p224_limb scalar) { out[3] *= scalar; } -/* Multiply an unreduced field element by a scalar: out = out * scalar - * The scalars we actually use are small, so results fit without overflow */ +// Multiply an unreduced field element by a scalar: out = out * scalar +// The scalars we actually use are small, so results fit without overflow static void p224_widefelem_scalar(p224_widefelem out, const p224_widelimb scalar) { out[0] *= scalar; @@ -370,7 +370,7 @@ static void p224_widefelem_scalar(p224_widefelem out, out[6] *= scalar; } -/* Square a field element: out = in^2 */ +// Square a field element: out = in^2 static void p224_felem_square(p224_widefelem out, const p224_felem in) { p224_limb tmp0, tmp1, tmp2; tmp0 = 2 * in[0]; @@ -385,7 +385,7 @@ static void p224_felem_square(p224_widefelem out, const p224_felem in) { out[6] = ((p224_widelimb)in[3]) * in[3]; } -/* Multiply two field elements: out = in1 * in2 */ +// Multiply two field elements: out = in1 * in2 static void p224_felem_mul(p224_widefelem out, const p224_felem in1, const p224_felem in2) { out[0] = ((p224_widelimb)in1[0]) * in2[0]; @@ -400,9 +400,9 @@ static void p224_felem_mul(p224_widefelem out, const p224_felem in1, out[6] = ((p224_widelimb)in1[3]) * in2[3]; } -/* Reduce seven 128-bit coefficients to four 64-bit coefficients. - * Requires in[i] < 2^126, - * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */ +// Reduce seven 128-bit coefficients to four 64-bit coefficients. +// Requires in[i] < 2^126, +// ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 static void p224_felem_reduce(p224_felem out, const p224_widefelem in) { static const p224_widelimb two127p15 = (((p224_widelimb)1) << 127) + (((p224_widelimb)1) << 15); @@ -413,14 +413,14 @@ static void p224_felem_reduce(p224_felem out, const p224_widefelem in) { (((p224_widelimb)1) << 55); p224_widelimb output[5]; - /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */ + // Add 0 mod 2^224-2^96+1 to ensure all differences are positive output[0] = in[0] + two127p15; output[1] = in[1] + two127m71m55; output[2] = in[2] + two127m71; output[3] = in[3]; output[4] = in[4]; - /* Eliminate in[4], in[5], in[6] */ + // Eliminate in[4], in[5], in[6] output[4] += in[6] >> 16; output[3] += (in[6] & 0xffff) << 40; output[2] -= in[6]; @@ -433,90 +433,90 @@ static void p224_felem_reduce(p224_felem out, const p224_widefelem in) { output[1] += (output[4] & 0xffff) << 40; output[0] -= output[4]; - /* Carry 2 -> 3 -> 4 */ + // Carry 2 -> 3 -> 4 output[3] += output[2] >> 56; output[2] &= 0x00ffffffffffffff; output[4] = output[3] >> 56; output[3] &= 0x00ffffffffffffff; - /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */ + // Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 - /* Eliminate output[4] */ + // Eliminate output[4] output[2] += output[4] >> 16; - /* output[2] < 2^56 + 2^56 = 2^57 */ + // output[2] < 2^56 + 2^56 = 2^57 output[1] += (output[4] & 0xffff) << 40; output[0] -= output[4]; - /* Carry 0 -> 1 -> 2 -> 3 */ + // Carry 0 -> 1 -> 2 -> 3 output[1] += output[0] >> 56; out[0] = output[0] & 0x00ffffffffffffff; output[2] += output[1] >> 56; - /* output[2] < 2^57 + 2^72 */ + // output[2] < 2^57 + 2^72 out[1] = output[1] & 0x00ffffffffffffff; output[3] += output[2] >> 56; - /* output[3] <= 2^56 + 2^16 */ + // output[3] <= 2^56 + 2^16 out[2] = output[2] & 0x00ffffffffffffff; - /* out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, - * out[3] <= 2^56 + 2^16 (due to final carry), - * so out < 2*p */ + // out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, + // out[3] <= 2^56 + 2^16 (due to final carry), + // so out < 2*p out[3] = output[3]; } -/* Reduce to unique minimal representation. - * Requires 0 <= in < 2*p (always call p224_felem_reduce first) */ +// Reduce to unique minimal representation. +// Requires 0 <= in < 2*p (always call p224_felem_reduce first) static void p224_felem_contract(p224_felem out, const p224_felem in) { static const int64_t two56 = ((p224_limb)1) << 56; - /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */ - /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */ + // 0 <= in < 2*p, p = 2^224 - 2^96 + 1 + // if in > p , reduce in = in - 2^224 + 2^96 - 1 int64_t tmp[4], a; tmp[0] = in[0]; tmp[1] = in[1]; tmp[2] = in[2]; tmp[3] = in[3]; - /* Case 1: a = 1 iff in >= 2^224 */ + // Case 1: a = 1 iff in >= 2^224 a = (in[3] >> 56); tmp[0] -= a; tmp[1] += a << 40; tmp[3] &= 0x00ffffffffffffff; - /* Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and - * the lower part is non-zero */ + // Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and + // the lower part is non-zero a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) | (((int64_t)(in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63); a &= 0x00ffffffffffffff; - /* turn a into an all-one mask (if a = 0) or an all-zero mask */ + // turn a into an all-one mask (if a = 0) or an all-zero mask a = (a - 1) >> 63; - /* subtract 2^224 - 2^96 + 1 if a is all-one */ + // subtract 2^224 - 2^96 + 1 if a is all-one tmp[3] &= a ^ 0xffffffffffffffff; tmp[2] &= a ^ 0xffffffffffffffff; tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff; tmp[0] -= 1 & a; - /* eliminate negative coefficients: if tmp[0] is negative, tmp[1] must - * be non-zero, so we only need one step */ + // eliminate negative coefficients: if tmp[0] is negative, tmp[1] must + // be non-zero, so we only need one step a = tmp[0] >> 63; tmp[0] += two56 & a; tmp[1] -= 1 & a; - /* carry 1 -> 2 -> 3 */ + // carry 1 -> 2 -> 3 tmp[2] += tmp[1] >> 56; tmp[1] &= 0x00ffffffffffffff; tmp[3] += tmp[2] >> 56; tmp[2] &= 0x00ffffffffffffff; - /* Now 0 <= out < p */ + // Now 0 <= out < p out[0] = tmp[0]; out[1] = tmp[1]; out[2] = tmp[2]; out[3] = tmp[3]; } -/* Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field - * elements are reduced to in < 2^225, so we only need to check three cases: 0, - * 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 */ +// Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field +// elements are reduced to in < 2^225, so we only need to check three cases: 0, +// 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 static p224_limb p224_felem_is_zero(const p224_felem in) { p224_limb zero = in[0] | in[1] | in[2] | in[3]; zero = (((int64_t)(zero)-1) >> 63) & 1; @@ -532,92 +532,92 @@ static p224_limb p224_felem_is_zero(const p224_felem in) { return (zero | two224m96p1 | two225m97p2); } -/* Invert a field element */ -/* Computation chain copied from djb's code */ +// Invert a field element +// Computation chain copied from djb's code static void p224_felem_inv(p224_felem out, const p224_felem in) { p224_felem ftmp, ftmp2, ftmp3, ftmp4; p224_widefelem tmp; p224_felem_square(tmp, in); - p224_felem_reduce(ftmp, tmp); /* 2 */ + p224_felem_reduce(ftmp, tmp); // 2 p224_felem_mul(tmp, in, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^2 - 1 */ + p224_felem_reduce(ftmp, tmp); // 2^2 - 1 p224_felem_square(tmp, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^3 - 2 */ + p224_felem_reduce(ftmp, tmp); // 2^3 - 2 p224_felem_mul(tmp, in, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^3 - 1 */ + p224_felem_reduce(ftmp, tmp); // 2^3 - 1 p224_felem_square(tmp, ftmp); - p224_felem_reduce(ftmp2, tmp); /* 2^4 - 2 */ + p224_felem_reduce(ftmp2, tmp); // 2^4 - 2 p224_felem_square(tmp, ftmp2); - p224_felem_reduce(ftmp2, tmp); /* 2^5 - 4 */ + p224_felem_reduce(ftmp2, tmp); // 2^5 - 4 p224_felem_square(tmp, ftmp2); - p224_felem_reduce(ftmp2, tmp); /* 2^6 - 8 */ + p224_felem_reduce(ftmp2, tmp); // 2^6 - 8 p224_felem_mul(tmp, ftmp2, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^6 - 1 */ + p224_felem_reduce(ftmp, tmp); // 2^6 - 1 p224_felem_square(tmp, ftmp); - p224_felem_reduce(ftmp2, tmp); /* 2^7 - 2 */ - for (size_t i = 0; i < 5; ++i) { /* 2^12 - 2^6 */ + p224_felem_reduce(ftmp2, tmp); // 2^7 - 2 + for (size_t i = 0; i < 5; ++i) { // 2^12 - 2^6 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); } p224_felem_mul(tmp, ftmp2, ftmp); - p224_felem_reduce(ftmp2, tmp); /* 2^12 - 1 */ + p224_felem_reduce(ftmp2, tmp); // 2^12 - 1 p224_felem_square(tmp, ftmp2); - p224_felem_reduce(ftmp3, tmp); /* 2^13 - 2 */ - for (size_t i = 0; i < 11; ++i) {/* 2^24 - 2^12 */ + p224_felem_reduce(ftmp3, tmp); // 2^13 - 2 + for (size_t i = 0; i < 11; ++i) { // 2^24 - 2^12 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp3, tmp); } p224_felem_mul(tmp, ftmp3, ftmp2); - p224_felem_reduce(ftmp2, tmp); /* 2^24 - 1 */ + p224_felem_reduce(ftmp2, tmp); // 2^24 - 1 p224_felem_square(tmp, ftmp2); - p224_felem_reduce(ftmp3, tmp); /* 2^25 - 2 */ - for (size_t i = 0; i < 23; ++i) {/* 2^48 - 2^24 */ + p224_felem_reduce(ftmp3, tmp); // 2^25 - 2 + for (size_t i = 0; i < 23; ++i) { // 2^48 - 2^24 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp3, tmp); } p224_felem_mul(tmp, ftmp3, ftmp2); - p224_felem_reduce(ftmp3, tmp); /* 2^48 - 1 */ + p224_felem_reduce(ftmp3, tmp); // 2^48 - 1 p224_felem_square(tmp, ftmp3); - p224_felem_reduce(ftmp4, tmp); /* 2^49 - 2 */ - for (size_t i = 0; i < 47; ++i) {/* 2^96 - 2^48 */ + p224_felem_reduce(ftmp4, tmp); // 2^49 - 2 + for (size_t i = 0; i < 47; ++i) { // 2^96 - 2^48 p224_felem_square(tmp, ftmp4); p224_felem_reduce(ftmp4, tmp); } p224_felem_mul(tmp, ftmp3, ftmp4); - p224_felem_reduce(ftmp3, tmp); /* 2^96 - 1 */ + p224_felem_reduce(ftmp3, tmp); // 2^96 - 1 p224_felem_square(tmp, ftmp3); - p224_felem_reduce(ftmp4, tmp); /* 2^97 - 2 */ - for (size_t i = 0; i < 23; ++i) {/* 2^120 - 2^24 */ + p224_felem_reduce(ftmp4, tmp); // 2^97 - 2 + for (size_t i = 0; i < 23; ++i) { // 2^120 - 2^24 p224_felem_square(tmp, ftmp4); p224_felem_reduce(ftmp4, tmp); } p224_felem_mul(tmp, ftmp2, ftmp4); - p224_felem_reduce(ftmp2, tmp); /* 2^120 - 1 */ - for (size_t i = 0; i < 6; ++i) { /* 2^126 - 2^6 */ + p224_felem_reduce(ftmp2, tmp); // 2^120 - 1 + for (size_t i = 0; i < 6; ++i) { // 2^126 - 2^6 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); } p224_felem_mul(tmp, ftmp2, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^126 - 1 */ + p224_felem_reduce(ftmp, tmp); // 2^126 - 1 p224_felem_square(tmp, ftmp); - p224_felem_reduce(ftmp, tmp); /* 2^127 - 2 */ + p224_felem_reduce(ftmp, tmp); // 2^127 - 2 p224_felem_mul(tmp, ftmp, in); - p224_felem_reduce(ftmp, tmp); /* 2^127 - 1 */ - for (size_t i = 0; i < 97; ++i) {/* 2^224 - 2^97 */ + p224_felem_reduce(ftmp, tmp); // 2^127 - 1 + for (size_t i = 0; i < 97; ++i) { // 2^224 - 2^97 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); } p224_felem_mul(tmp, ftmp, ftmp3); - p224_felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */ + p224_felem_reduce(out, tmp); // 2^224 - 2^96 - 1 } -/* Copy in constant time: - * if icopy == 1, copy in to out, - * if icopy == 0, copy out to itself. */ +// Copy in constant time: +// if icopy == 1, copy in to out, +// if icopy == 0, copy out to itself. static void p224_copy_conditional(p224_felem out, const p224_felem in, p224_limb icopy) { - /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */ + // icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one const p224_limb copy = -icopy; for (size_t i = 0; i < 4; ++i) { const p224_limb tmp = copy & (in[i] ^ out[i]); @@ -625,19 +625,19 @@ static void p224_copy_conditional(p224_felem out, const p224_felem in, } } -/* ELLIPTIC CURVE POINT OPERATIONS - * - * Points are represented in Jacobian projective coordinates: - * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3), - * or to the point at infinity if Z == 0. */ - -/* Double an elliptic curve point: - * (X', Y', Z') = 2 * (X, Y, Z), where - * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2 - * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2 - * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z - * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed, - * while x_out == y_in is not (maybe this works, but it's not tested). */ +// ELLIPTIC CURVE POINT OPERATIONS +// +// Points are represented in Jacobian projective coordinates: +// (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3), +// or to the point at infinity if Z == 0. + +// Double an elliptic curve point: +// (X', Y', Z') = 2 * (X, Y, Z), where +// X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2 +// Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2 +// Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z +// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed, +// while x_out == y_in is not (maybe this works, but it's not tested). static void p224_point_double(p224_felem x_out, p224_felem y_out, p224_felem z_out, const p224_felem x_in, const p224_felem y_in, const p224_felem z_in) { @@ -647,82 +647,82 @@ static void p224_point_double(p224_felem x_out, p224_felem y_out, p224_felem_assign(ftmp, x_in); p224_felem_assign(ftmp2, x_in); - /* delta = z^2 */ + // delta = z^2 p224_felem_square(tmp, z_in); p224_felem_reduce(delta, tmp); - /* gamma = y^2 */ + // gamma = y^2 p224_felem_square(tmp, y_in); p224_felem_reduce(gamma, tmp); - /* beta = x*gamma */ + // beta = x*gamma p224_felem_mul(tmp, x_in, gamma); p224_felem_reduce(beta, tmp); - /* alpha = 3*(x-delta)*(x+delta) */ + // alpha = 3*(x-delta)*(x+delta) p224_felem_diff(ftmp, delta); - /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */ + // ftmp[i] < 2^57 + 2^58 + 2 < 2^59 p224_felem_sum(ftmp2, delta); - /* ftmp2[i] < 2^57 + 2^57 = 2^58 */ + // ftmp2[i] < 2^57 + 2^57 = 2^58 p224_felem_scalar(ftmp2, 3); - /* ftmp2[i] < 3 * 2^58 < 2^60 */ + // ftmp2[i] < 3 * 2^58 < 2^60 p224_felem_mul(tmp, ftmp, ftmp2); - /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */ + // tmp[i] < 2^60 * 2^59 * 4 = 2^121 p224_felem_reduce(alpha, tmp); - /* x' = alpha^2 - 8*beta */ + // x' = alpha^2 - 8*beta p224_felem_square(tmp, alpha); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 p224_felem_assign(ftmp, beta); p224_felem_scalar(ftmp, 8); - /* ftmp[i] < 8 * 2^57 = 2^60 */ + // ftmp[i] < 8 * 2^57 = 2^60 p224_felem_diff_128_64(tmp, ftmp); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(x_out, tmp); - /* z' = (y + z)^2 - gamma - delta */ + // z' = (y + z)^2 - gamma - delta p224_felem_sum(delta, gamma); - /* delta[i] < 2^57 + 2^57 = 2^58 */ + // delta[i] < 2^57 + 2^57 = 2^58 p224_felem_assign(ftmp, y_in); p224_felem_sum(ftmp, z_in); - /* ftmp[i] < 2^57 + 2^57 = 2^58 */ + // ftmp[i] < 2^57 + 2^57 = 2^58 p224_felem_square(tmp, ftmp); - /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */ + // tmp[i] < 4 * 2^58 * 2^58 = 2^118 p224_felem_diff_128_64(tmp, delta); - /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */ + // tmp[i] < 2^118 + 2^64 + 8 < 2^119 p224_felem_reduce(z_out, tmp); - /* y' = alpha*(4*beta - x') - 8*gamma^2 */ + // y' = alpha*(4*beta - x') - 8*gamma^2 p224_felem_scalar(beta, 4); - /* beta[i] < 4 * 2^57 = 2^59 */ + // beta[i] < 4 * 2^57 = 2^59 p224_felem_diff(beta, x_out); - /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */ + // beta[i] < 2^59 + 2^58 + 2 < 2^60 p224_felem_mul(tmp, alpha, beta); - /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */ + // tmp[i] < 4 * 2^57 * 2^60 = 2^119 p224_felem_square(tmp2, gamma); - /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */ + // tmp2[i] < 4 * 2^57 * 2^57 = 2^116 p224_widefelem_scalar(tmp2, 8); - /* tmp2[i] < 8 * 2^116 = 2^119 */ + // tmp2[i] < 8 * 2^116 = 2^119 p224_widefelem_diff(tmp, tmp2); - /* tmp[i] < 2^119 + 2^120 < 2^121 */ + // tmp[i] < 2^119 + 2^120 < 2^121 p224_felem_reduce(y_out, tmp); } -/* Add two elliptic curve points: - * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where - * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - - * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * - * X_1)^2 - X_3) - - * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) - * - * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. */ - -/* This function is not entirely constant-time: it includes a branch for - * checking whether the two input points are equal, (while not equal to the - * point at infinity). This case never happens during single point - * multiplication, so there is no timing leak for ECDH or ECDSA signing. */ +// Add two elliptic curve points: +// (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where +// X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - +// 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 +// Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * +// X_1)^2 - X_3) - +// Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3 +// Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) +// +// This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. + +// This function is not entirely constant-time: it includes a branch for +// checking whether the two input points are equal, (while not equal to the +// point at infinity). This case never happens during single point +// multiplication, so there is no timing leak for ECDH or ECDSA signing. static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3, const p224_felem x1, const p224_felem y1, const p224_felem z1, const int mixed, @@ -733,136 +733,136 @@ static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3, p224_limb z1_is_zero, z2_is_zero, x_equal, y_equal; if (!mixed) { - /* ftmp2 = z2^2 */ + // ftmp2 = z2^2 p224_felem_square(tmp, z2); p224_felem_reduce(ftmp2, tmp); - /* ftmp4 = z2^3 */ + // ftmp4 = z2^3 p224_felem_mul(tmp, ftmp2, z2); p224_felem_reduce(ftmp4, tmp); - /* ftmp4 = z2^3*y1 */ + // ftmp4 = z2^3*y1 p224_felem_mul(tmp2, ftmp4, y1); p224_felem_reduce(ftmp4, tmp2); - /* ftmp2 = z2^2*x1 */ + // ftmp2 = z2^2*x1 p224_felem_mul(tmp2, ftmp2, x1); p224_felem_reduce(ftmp2, tmp2); } else { - /* We'll assume z2 = 1 (special case z2 = 0 is handled later) */ + // We'll assume z2 = 1 (special case z2 = 0 is handled later) - /* ftmp4 = z2^3*y1 */ + // ftmp4 = z2^3*y1 p224_felem_assign(ftmp4, y1); - /* ftmp2 = z2^2*x1 */ + // ftmp2 = z2^2*x1 p224_felem_assign(ftmp2, x1); } - /* ftmp = z1^2 */ + // ftmp = z1^2 p224_felem_square(tmp, z1); p224_felem_reduce(ftmp, tmp); - /* ftmp3 = z1^3 */ + // ftmp3 = z1^3 p224_felem_mul(tmp, ftmp, z1); p224_felem_reduce(ftmp3, tmp); - /* tmp = z1^3*y2 */ + // tmp = z1^3*y2 p224_felem_mul(tmp, ftmp3, y2); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 - /* ftmp3 = z1^3*y2 - z2^3*y1 */ + // ftmp3 = z1^3*y2 - z2^3*y1 p224_felem_diff_128_64(tmp, ftmp4); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(ftmp3, tmp); - /* tmp = z1^2*x2 */ + // tmp = z1^2*x2 p224_felem_mul(tmp, ftmp, x2); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 - /* ftmp = z1^2*x2 - z2^2*x1 */ + // ftmp = z1^2*x2 - z2^2*x1 p224_felem_diff_128_64(tmp, ftmp2); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(ftmp, tmp); - /* the formulae are incorrect if the points are equal - * so we check for this and do doubling if this happens */ + // the formulae are incorrect if the points are equal + // so we check for this and do doubling if this happens x_equal = p224_felem_is_zero(ftmp); y_equal = p224_felem_is_zero(ftmp3); z1_is_zero = p224_felem_is_zero(z1); z2_is_zero = p224_felem_is_zero(z2); - /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */ + // In affine coordinates, (X_1, Y_1) == (X_2, Y_2) if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) { p224_point_double(x3, y3, z3, x1, y1, z1); return; } - /* ftmp5 = z1*z2 */ + // ftmp5 = z1*z2 if (!mixed) { p224_felem_mul(tmp, z1, z2); p224_felem_reduce(ftmp5, tmp); } else { - /* special case z2 = 0 is handled later */ + // special case z2 = 0 is handled later p224_felem_assign(ftmp5, z1); } - /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */ + // z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) p224_felem_mul(tmp, ftmp, ftmp5); p224_felem_reduce(z_out, tmp); - /* ftmp = (z1^2*x2 - z2^2*x1)^2 */ + // ftmp = (z1^2*x2 - z2^2*x1)^2 p224_felem_assign(ftmp5, ftmp); p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); - /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */ + // ftmp5 = (z1^2*x2 - z2^2*x1)^3 p224_felem_mul(tmp, ftmp, ftmp5); p224_felem_reduce(ftmp5, tmp); - /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ + // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 p224_felem_mul(tmp, ftmp2, ftmp); p224_felem_reduce(ftmp2, tmp); - /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ + // tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 p224_felem_mul(tmp, ftmp4, ftmp5); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 - /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */ + // tmp2 = (z1^3*y2 - z2^3*y1)^2 p224_felem_square(tmp2, ftmp3); - /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */ + // tmp2[i] < 4 * 2^57 * 2^57 < 2^116 - /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */ + // tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 p224_felem_diff_128_64(tmp2, ftmp5); - /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */ + // tmp2[i] < 2^116 + 2^64 + 8 < 2^117 - /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ + // ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 p224_felem_assign(ftmp5, ftmp2); p224_felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2 * 2^57 = 2^58 */ + // ftmp5[i] < 2 * 2^57 = 2^58 /* x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 - 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ p224_felem_diff_128_64(tmp2, ftmp5); - /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */ + // tmp2[i] < 2^117 + 2^64 + 8 < 2^118 p224_felem_reduce(x_out, tmp2); - /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */ + // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out p224_felem_diff(ftmp2, x_out); - /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */ + // ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 - /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) */ + // tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) p224_felem_mul(tmp2, ftmp3, ftmp2); - /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */ + // tmp2[i] < 4 * 2^57 * 2^59 = 2^118 /* y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) - z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ p224_widefelem_diff(tmp2, tmp); - /* tmp2[i] < 2^118 + 2^120 < 2^121 */ + // tmp2[i] < 2^118 + 2^120 < 2^121 p224_felem_reduce(y_out, tmp2); - /* the result (x_out, y_out, z_out) is incorrect if one of the inputs is - * the point at infinity, so we need to check for this separately */ + // the result (x_out, y_out, z_out) is incorrect if one of the inputs is + // the point at infinity, so we need to check for this separately - /* if point 1 is at infinity, copy point 2 to output, and vice versa */ + // if point 1 is at infinity, copy point 2 to output, and vice versa p224_copy_conditional(x_out, x2, z1_is_zero); p224_copy_conditional(x_out, x1, z2_is_zero); p224_copy_conditional(y_out, y2, z1_is_zero); @@ -874,8 +874,8 @@ static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3, p224_felem_assign(z3, z_out); } -/* p224_select_point selects the |idx|th point from a precomputation table and - * copies it to out. */ +// p224_select_point selects the |idx|th point from a precomputation table and +// copies it to out. static void p224_select_point(const uint64_t idx, size_t size, const p224_felem pre_comp[/*size*/][3], p224_felem out[3]) { @@ -896,7 +896,7 @@ static void p224_select_point(const uint64_t idx, size_t size, } } -/* p224_get_bit returns the |i|th bit in |in| */ +// p224_get_bit returns the |i|th bit in |in| static char p224_get_bit(const p224_felem_bytearray in, size_t i) { if (i >= 224) { return 0; @@ -904,11 +904,11 @@ static char p224_get_bit(const p224_felem_bytearray in, size_t i) { return (in[i >> 3] >> (i & 7)) & 1; } -/* Interleaved point multiplication using precomputed point multiples: - * The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars - * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple - * of the generator, using certain (large) precomputed multiples in - * g_p224_pre_comp. Output point (X, Y, Z) is stored in x_out, y_out, z_out */ +// Interleaved point multiplication using precomputed point multiples: +// The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars +// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple +// of the generator, using certain (large) precomputed multiples in +// g_p224_pre_comp. Output point (X, Y, Z) is stored in x_out, y_out, z_out static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, const uint8_t *p_scalar, const uint8_t *g_scalar, const p224_felem p_pre_comp[17][3]) { @@ -916,28 +916,28 @@ static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, uint64_t bits; uint8_t sign, digit; - /* set nq to the point at infinity */ + // set nq to the point at infinity OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem)); - /* Loop over both scalars msb-to-lsb, interleaving additions of multiples of - * the generator (two in each of the last 28 rounds) and additions of p (every - * 5th round). */ - int skip = 1; /* save two point operations in the first round */ + // Loop over both scalars msb-to-lsb, interleaving additions of multiples of + // the generator (two in each of the last 28 rounds) and additions of p (every + // 5th round). + int skip = 1; // save two point operations in the first round size_t i = p_scalar != NULL ? 220 : 27; for (;;) { - /* double */ + // double if (!skip) { p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } - /* add multiples of the generator */ + // add multiples of the generator if (g_scalar != NULL && i <= 27) { - /* first, look 28 bits upwards */ + // first, look 28 bits upwards bits = p224_get_bit(g_scalar, i + 196) << 3; bits |= p224_get_bit(g_scalar, i + 140) << 2; bits |= p224_get_bit(g_scalar, i + 84) << 1; bits |= p224_get_bit(g_scalar, i + 28); - /* select the point to add, in constant time */ + // select the point to add, in constant time p224_select_point(bits, 16, g_p224_pre_comp[1], tmp); if (!skip) { @@ -948,18 +948,18 @@ static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, skip = 0; } - /* second, look at the current position */ + // second, look at the current position bits = p224_get_bit(g_scalar, i + 168) << 3; bits |= p224_get_bit(g_scalar, i + 112) << 2; bits |= p224_get_bit(g_scalar, i + 56) << 1; bits |= p224_get_bit(g_scalar, i); - /* select the point to add, in constant time */ + // select the point to add, in constant time p224_select_point(bits, 16, g_p224_pre_comp[0], tmp); p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } - /* do other additions every 5 doublings */ + // do other additions every 5 doublings if (p_scalar != NULL && i % 5 == 0) { bits = p224_get_bit(p_scalar, i + 4) << 5; bits |= p224_get_bit(p_scalar, i + 3) << 4; @@ -969,9 +969,9 @@ static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, bits |= p224_get_bit(p_scalar, i - 1); ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); - /* select the point to add or subtract */ + // select the point to add or subtract p224_select_point(digit, 17, p_pre_comp, tmp); - p224_felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative point */ + p224_felem_neg(tmp[3], tmp[1]); // (X, -Y, Z) is the negative point p224_copy_conditional(tmp[1], tmp[3], sign); if (!skip) { @@ -993,8 +993,8 @@ static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, p224_felem_assign(z_out, nq[2]); } -/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns - * (X', Y') = (X/Z^2, Y/Z^3) */ +// Takes the Jacobian coordinates (X, Y, Z) of a point and returns +// (X', Y') = (X/Z^2, Y/Z^3) static int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, @@ -1065,15 +1065,15 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, } if (p != NULL && p_scalar != NULL) { - /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e., - * they contribute nothing to the linear combination. */ + // We treat NULL scalars as 0, and NULL points as points at infinity, i.e., + // they contribute nothing to the linear combination. OPENSSL_memset(&p_secret, 0, sizeof(p_secret)); OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); size_t num_bytes; - /* reduce g_scalar to 0 <= g_scalar < 2^224 */ + // reduce g_scalar to 0 <= g_scalar < 2^224 if (BN_num_bits(p_scalar) > 224 || BN_is_negative(p_scalar)) { - /* this is an unusual input, and we don't guarantee - * constant-timeness */ + // this is an unusual input, and we don't guarantee + // constant-timeness if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); goto err; @@ -1084,7 +1084,7 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, } p224_flip_endian(p_secret, tmp, num_bytes); - /* precompute multiples */ + // precompute multiples if (!p224_BN_to_felem(x_out, &p->X) || !p224_BN_to_felem(y_out, &p->Y) || !p224_BN_to_felem(z_out, &p->Z)) { @@ -1112,9 +1112,9 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, if (g_scalar != NULL) { OPENSSL_memset(g_secret, 0, sizeof(g_secret)); size_t num_bytes; - /* reduce g_scalar to 0 <= g_scalar < 2^224 */ + // reduce g_scalar to 0 <= g_scalar < 2^224 if (BN_num_bits(g_scalar) > 224 || BN_is_negative(g_scalar)) { - /* this is an unusual input, and we don't guarantee constant-timeness */ + // this is an unusual input, and we don't guarantee constant-timeness if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); goto err; @@ -1130,7 +1130,7 @@ static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, x_out, y_out, z_out, (p != NULL && p_scalar != NULL) ? p_secret : NULL, g_scalar != NULL ? g_secret : NULL, (const p224_felem(*)[3])p_pre_comp); - /* reduce the output to its unique minimal representation */ + // reduce the output to its unique minimal representation p224_felem_contract(x_in, x_out); p224_felem_contract(y_in, y_out); p224_felem_contract(z_in, z_out); @@ -1162,4 +1162,4 @@ DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp224_method) { out->field_decode = NULL; }; -#endif /* 64_BIT && !WINDOWS && !SMALL */ +#endif // 64_BIT && !WINDOWS && !SMALL diff --git a/crypto/fipsmodule/ec/p256-64.c b/crypto/fipsmodule/ec/p256-64.c index 8952aa2e..f7d1ff11 100644 --- a/crypto/fipsmodule/ec/p256-64.c +++ b/crypto/fipsmodule/ec/p256-64.c @@ -12,12 +12,12 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* A 64-bit implementation of the NIST P-256 elliptic curve point - * multiplication - * - * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c. - * Otherwise based on Emilia's P224 work, which was inspired by my curve25519 - * work which got its smarts from Daniel J. Bernstein's work on the same. */ +// A 64-bit implementation of the NIST P-256 elliptic curve point +// multiplication +// +// OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c. +// Otherwise based on Emilia's P224 work, which was inspired by my curve25519 +// work which got its smarts from Daniel J. Bernstein's work on the same. #include @@ -35,29 +35,29 @@ #include "internal.h" -/* The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We - * can serialise an element of this field into 32 bytes. We call this an - * felem_bytearray. */ +// The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We +// can serialise an element of this field into 32 bytes. We call this an +// felem_bytearray. typedef uint8_t felem_bytearray[32]; -/* The representation of field elements. - * ------------------------------------ - * - * We represent field elements with either four 128-bit values, eight 128-bit - * values, or four 64-bit values. The field element represented is: - * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p) - * or: - * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p) - * - * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits - * apart, but are 128-bits wide, the most significant bits of each limb overlap - * with the least significant bits of the next. - * - * A field element with four limbs is an 'felem'. One with eight limbs is a - * 'longfelem' - * - * A field element with four, 64-bit values is called a 'smallfelem'. Small - * values are used as intermediate values before multiplication. */ +// The representation of field elements. +// ------------------------------------ +// +// We represent field elements with either four 128-bit values, eight 128-bit +// values, or four 64-bit values. The field element represented is: +// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p) +// or: +// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p) +// +// 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits +// apart, but are 128-bits wide, the most significant bits of each limb overlap +// with the least significant bits of the next. +// +// A field element with four limbs is an 'felem'. One with eight limbs is a +// 'longfelem' +// +// A field element with four, 64-bit values is called a 'smallfelem'. Small +// values are used as intermediate values before multiplication. #define NLIMBS 4 @@ -66,7 +66,7 @@ typedef limb felem[NLIMBS]; typedef limb longfelem[NLIMBS * 2]; typedef uint64_t smallfelem[NLIMBS]; -/* This is the value of the prime as four 64-bit words, little-endian. */ +// This is the value of the prime as four 64-bit words, little-endian. static const uint64_t kPrime[4] = {0xfffffffffffffffful, 0xffffffff, 0, 0xffffffff00000001ul}; static const uint64_t bottom63bits = 0x7ffffffffffffffful; @@ -81,8 +81,8 @@ static void store_u64(uint8_t out[8], uint64_t in) { OPENSSL_memcpy(out, &in, sizeof(in)); } -/* bin32_to_felem takes a little-endian byte array and converts it into felem - * form. This assumes that the CPU is little-endian. */ +// bin32_to_felem takes a little-endian byte array and converts it into felem +// form. This assumes that the CPU is little-endian. static void bin32_to_felem(felem out, const uint8_t in[32]) { out[0] = load_u64(&in[0]); out[1] = load_u64(&in[8]); @@ -90,8 +90,8 @@ static void bin32_to_felem(felem out, const uint8_t in[32]) { out[3] = load_u64(&in[24]); } -/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian, - * 32 byte array. This assumes that the CPU is little-endian. */ +// smallfelem_to_bin32 takes a smallfelem and serialises into a little endian, +// 32 byte array. This assumes that the CPU is little-endian. static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) { store_u64(&out[0], in[0]); store_u64(&out[8], in[1]); @@ -99,14 +99,14 @@ static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) { store_u64(&out[24], in[3]); } -/* To preserve endianness when using BN_bn2bin and BN_bin2bn. */ +// To preserve endianness when using BN_bn2bin and BN_bin2bn. static void flip_endian(uint8_t *out, const uint8_t *in, size_t len) { for (size_t i = 0; i < len; ++i) { out[i] = in[len - 1 - i]; } } -/* BN_to_felem converts an OpenSSL BIGNUM into an felem. */ +// BN_to_felem converts an OpenSSL BIGNUM into an felem. static int BN_to_felem(felem out, const BIGNUM *bn) { if (BN_is_negative(bn)) { OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE); @@ -114,7 +114,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) { } felem_bytearray b_out; - /* BN_bn2bin eats leading zeroes */ + // BN_bn2bin eats leading zeroes OPENSSL_memset(b_out, 0, sizeof(b_out)); size_t num_bytes = BN_num_bytes(bn); if (num_bytes > sizeof(b_out)) { @@ -129,7 +129,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) { return 1; } -/* felem_to_BN converts an felem into an OpenSSL BIGNUM. */ +// felem_to_BN converts an felem into an OpenSSL BIGNUM. static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) { felem_bytearray b_in, b_out; smallfelem_to_bin32(b_in, in); @@ -137,7 +137,7 @@ static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) { return BN_bin2bn(b_out, sizeof(b_out), out); } -/* Field operations. */ +// Field operations. static void felem_assign(felem out, const felem in) { out[0] = in[0]; @@ -146,7 +146,7 @@ static void felem_assign(felem out, const felem in) { out[3] = in[3]; } -/* felem_sum sets out = out + in. */ +// felem_sum sets out = out + in. static void felem_sum(felem out, const felem in) { out[0] += in[0]; out[1] += in[1]; @@ -154,7 +154,7 @@ static void felem_sum(felem out, const felem in) { out[3] += in[3]; } -/* felem_small_sum sets out = out + in. */ +// felem_small_sum sets out = out + in. static void felem_small_sum(felem out, const smallfelem in) { out[0] += in[0]; out[1] += in[1]; @@ -162,7 +162,7 @@ static void felem_small_sum(felem out, const smallfelem in) { out[3] += in[3]; } -/* felem_scalar sets out = out * scalar */ +// felem_scalar sets out = out * scalar static void felem_scalar(felem out, const uint64_t scalar) { out[0] *= scalar; out[1] *= scalar; @@ -170,7 +170,7 @@ static void felem_scalar(felem out, const uint64_t scalar) { out[3] *= scalar; } -/* longfelem_scalar sets out = out * scalar */ +// longfelem_scalar sets out = out * scalar static void longfelem_scalar(longfelem out, const uint64_t scalar) { out[0] *= scalar; out[1] *= scalar; @@ -186,27 +186,27 @@ static void longfelem_scalar(longfelem out, const uint64_t scalar) { #define two105 (((limb)1) << 105) #define two105m41p9 ((((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9)) -/* zero105 is 0 mod p */ +// zero105 is 0 mod p static const felem zero105 = {two105m41m9, two105, two105m41p9, two105m41p9}; -/* smallfelem_neg sets |out| to |-small| - * On exit: - * out[i] < out[i] + 2^105 */ +// smallfelem_neg sets |out| to |-small| +// On exit: +// out[i] < out[i] + 2^105 static void smallfelem_neg(felem out, const smallfelem small) { - /* In order to prevent underflow, we subtract from 0 mod p. */ + // In order to prevent underflow, we subtract from 0 mod p. out[0] = zero105[0] - small[0]; out[1] = zero105[1] - small[1]; out[2] = zero105[2] - small[2]; out[3] = zero105[3] - small[3]; } -/* felem_diff subtracts |in| from |out| - * On entry: - * in[i] < 2^104 - * On exit: - * out[i] < out[i] + 2^105. */ +// felem_diff subtracts |in| from |out| +// On entry: +// in[i] < 2^104 +// On exit: +// out[i] < out[i] + 2^105. static void felem_diff(felem out, const felem in) { - /* In order to prevent underflow, we add 0 mod p before subtracting. */ + // In order to prevent underflow, we add 0 mod p before subtracting. out[0] += zero105[0]; out[1] += zero105[1]; out[2] += zero105[2]; @@ -224,17 +224,17 @@ static void felem_diff(felem out, const felem in) { #define two107m43p11 \ ((((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11)) -/* zero107 is 0 mod p */ +// zero107 is 0 mod p static const felem zero107 = {two107m43m11, two107, two107m43p11, two107m43p11}; -/* An alternative felem_diff for larger inputs |in| - * felem_diff_zero107 subtracts |in| from |out| - * On entry: - * in[i] < 2^106 - * On exit: - * out[i] < out[i] + 2^107. */ +// An alternative felem_diff for larger inputs |in| +// felem_diff_zero107 subtracts |in| from |out| +// On entry: +// in[i] < 2^106 +// On exit: +// out[i] < out[i] + 2^107. static void felem_diff_zero107(felem out, const felem in) { - /* In order to prevent underflow, we add 0 mod p before subtracting. */ + // In order to prevent underflow, we add 0 mod p before subtracting. out[0] += zero107[0]; out[1] += zero107[1]; out[2] += zero107[2]; @@ -246,11 +246,11 @@ static void felem_diff_zero107(felem out, const felem in) { out[3] -= in[3]; } -/* longfelem_diff subtracts |in| from |out| - * On entry: - * in[i] < 7*2^67 - * On exit: - * out[i] < out[i] + 2^70 + 2^40. */ +// longfelem_diff subtracts |in| from |out| +// On entry: +// in[i] < 7*2^67 +// On exit: +// out[i] < out[i] + 2^70 + 2^40. static void longfelem_diff(longfelem out, const longfelem in) { static const limb two70m8p6 = (((limb)1) << 70) - (((limb)1) << 8) + (((limb)1) << 6); @@ -260,7 +260,7 @@ static void longfelem_diff(longfelem out, const longfelem in) { (((limb)1) << 38) + (((limb)1) << 6); static const limb two70m6 = (((limb)1) << 70) - (((limb)1) << 6); - /* add 0 mod p to avoid underflow */ + // add 0 mod p to avoid underflow out[0] += two70m8p6; out[1] += two70p40; out[2] += two70; @@ -270,7 +270,7 @@ static void longfelem_diff(longfelem out, const longfelem in) { out[6] += two70m6; out[7] += two70m6; - /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */ + // in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 out[0] -= in[0]; out[1] -= in[1]; out[2] -= in[2]; @@ -286,80 +286,80 @@ static void longfelem_diff(longfelem out, const longfelem in) { #define two64m46 ((((limb)1) << 64) - (((limb)1) << 46)) #define two64m32 ((((limb)1) << 64) - (((limb)1) << 32)) -/* zero110 is 0 mod p. */ +// zero110 is 0 mod p. static const felem zero110 = {two64m0, two110p32m0, two64m46, two64m32}; -/* felem_shrink converts an felem into a smallfelem. The result isn't quite - * minimal as the value may be greater than p. - * - * On entry: - * in[i] < 2^109 - * On exit: - * out[i] < 2^64. */ +// felem_shrink converts an felem into a smallfelem. The result isn't quite +// minimal as the value may be greater than p. +// +// On entry: +// in[i] < 2^109 +// On exit: +// out[i] < 2^64. static void felem_shrink(smallfelem out, const felem in) { felem tmp; uint64_t a, b, mask; int64_t high, low; static const uint64_t kPrime3Test = - 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */ + 0x7fffffff00000001ul; // 2^63 - 2^32 + 1 - /* Carry 2->3 */ + // Carry 2->3 tmp[3] = zero110[3] + in[3] + ((uint64_t)(in[2] >> 64)); - /* tmp[3] < 2^110 */ + // tmp[3] < 2^110 tmp[2] = zero110[2] + (uint64_t)in[2]; tmp[0] = zero110[0] + in[0]; tmp[1] = zero110[1] + in[1]; - /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */ + // tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 - /* We perform two partial reductions where we eliminate the high-word of - * tmp[3]. We don't update the other words till the end. */ - a = tmp[3] >> 64; /* a < 2^46 */ + // We perform two partial reductions where we eliminate the high-word of + // tmp[3]. We don't update the other words till the end. + a = tmp[3] >> 64; // a < 2^46 tmp[3] = (uint64_t)tmp[3]; tmp[3] -= a; tmp[3] += ((limb)a) << 32; - /* tmp[3] < 2^79 */ + // tmp[3] < 2^79 b = a; - a = tmp[3] >> 64; /* a < 2^15 */ - b += a; /* b < 2^46 + 2^15 < 2^47 */ + a = tmp[3] >> 64; // a < 2^15 + b += a; // b < 2^46 + 2^15 < 2^47 tmp[3] = (uint64_t)tmp[3]; tmp[3] -= a; tmp[3] += ((limb)a) << 32; - /* tmp[3] < 2^64 + 2^47 */ + // tmp[3] < 2^64 + 2^47 - /* This adjusts the other two words to complete the two partial - * reductions. */ + // This adjusts the other two words to complete the two partial + // reductions. tmp[0] += b; tmp[1] -= (((limb)b) << 32); - /* In order to make space in tmp[3] for the carry from 2 -> 3, we - * conditionally subtract kPrime if tmp[3] is large enough. */ + // In order to make space in tmp[3] for the carry from 2 -> 3, we + // conditionally subtract kPrime if tmp[3] is large enough. high = tmp[3] >> 64; - /* As tmp[3] < 2^65, high is either 1 or 0 */ + // As tmp[3] < 2^65, high is either 1 or 0 high = ~(high - 1); - /* high is: - * all ones if the high word of tmp[3] is 1 - * all zeros if the high word of tmp[3] if 0 */ + // high is: + // all ones if the high word of tmp[3] is 1 + // all zeros if the high word of tmp[3] if 0 low = tmp[3]; mask = low >> 63; - /* mask is: - * all ones if the MSB of low is 1 - * all zeros if the MSB of low if 0 */ + // mask is: + // all ones if the MSB of low is 1 + // all zeros if the MSB of low if 0 low &= bottom63bits; low -= kPrime3Test; - /* if low was greater than kPrime3Test then the MSB is zero */ + // if low was greater than kPrime3Test then the MSB is zero low = ~low; low >>= 63; - /* low is: - * all ones if low was > kPrime3Test - * all zeros if low was <= kPrime3Test */ + // low is: + // all ones if low was > kPrime3Test + // all zeros if low was <= kPrime3Test mask = (mask & low) | high; tmp[0] -= mask & kPrime[0]; tmp[1] -= mask & kPrime[1]; - /* kPrime[2] is zero, so omitted */ + // kPrime[2] is zero, so omitted tmp[3] -= mask & kPrime[3]; - /* tmp[3] < 2**64 - 2**32 + 1 */ + // tmp[3] < 2**64 - 2**32 + 1 tmp[1] += ((uint64_t)(tmp[0] >> 64)); tmp[0] = (uint64_t)tmp[0]; @@ -367,7 +367,7 @@ static void felem_shrink(smallfelem out, const felem in) { tmp[1] = (uint64_t)tmp[1]; tmp[3] += ((uint64_t)(tmp[2] >> 64)); tmp[2] = (uint64_t)tmp[2]; - /* tmp[i] < 2^64 */ + // tmp[i] < 2^64 out[0] = tmp[0]; out[1] = tmp[1]; @@ -375,7 +375,7 @@ static void felem_shrink(smallfelem out, const felem in) { out[3] = tmp[3]; } -/* smallfelem_expand converts a smallfelem to an felem */ +// smallfelem_expand converts a smallfelem to an felem static void smallfelem_expand(felem out, const smallfelem in) { out[0] = in[0]; out[1] = in[1]; @@ -383,11 +383,11 @@ static void smallfelem_expand(felem out, const smallfelem in) { out[3] = in[3]; } -/* smallfelem_square sets |out| = |small|^2 - * On entry: - * small[i] < 2^64 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// smallfelem_square sets |out| = |small|^2 +// On entry: +// small[i] < 2^64 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void smallfelem_square(longfelem out, const smallfelem small) { limb a; uint64_t high, low; @@ -459,23 +459,23 @@ static void smallfelem_square(longfelem out, const smallfelem small) { out[7] = high; } -/*felem_square sets |out| = |in|^2 - * On entry: - * in[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67. */ +//felem_square sets |out| = |in|^2 +// On entry: +// in[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67. static void felem_square(longfelem out, const felem in) { uint64_t small[4]; felem_shrink(small, in); smallfelem_square(out, small); } -/* smallfelem_mul sets |out| = |small1| * |small2| - * On entry: - * small1[i] < 2^64 - * small2[i] < 2^64 - * On exit: - * out[i] < 7 * 2^64 < 2^67. */ +// smallfelem_mul sets |out| = |small1| * |small2| +// On entry: +// small1[i] < 2^64 +// small2[i] < 2^64 +// On exit: +// out[i] < 7 * 2^64 < 2^67. static void smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2) { limb a; @@ -578,12 +578,12 @@ static void smallfelem_mul(longfelem out, const smallfelem small1, out[7] = high; } -/* felem_mul sets |out| = |in1| * |in2| - * On entry: - * in1[i] < 2^109 - * in2[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// felem_mul sets |out| = |in1| * |in2| +// On entry: +// in1[i] < 2^109 +// in2[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void felem_mul(longfelem out, const felem in1, const felem in2) { smallfelem small1, small2; felem_shrink(small1, in1); @@ -591,12 +591,12 @@ static void felem_mul(longfelem out, const felem in1, const felem in2) { smallfelem_mul(out, small1, small2); } -/* felem_small_mul sets |out| = |small1| * |in2| - * On entry: - * small1[i] < 2^64 - * in2[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// felem_small_mul sets |out| = |small1| * |in2| +// On entry: +// small1[i] < 2^64 +// in2[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void felem_small_mul(longfelem out, const smallfelem small1, const felem in2) { smallfelem small2; @@ -608,24 +608,24 @@ static void felem_small_mul(longfelem out, const smallfelem small1, #define two100 (((limb)1) << 100) #define two100m36p4 ((((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4)) -/* zero100 is 0 mod p */ +// zero100 is 0 mod p static const felem zero100 = {two100m36m4, two100, two100m36p4, two100m36p4}; -/* Internal function for the different flavours of felem_reduce. - * felem_reduce_ reduces the higher coefficients in[4]-in[7]. - * On entry: - * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7] - * out[1] >= in[7] + 2^32*in[4] - * out[2] >= in[5] + 2^32*in[5] - * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6] - * On exit: - * out[0] <= out[0] + in[4] + 2^32*in[5] - * out[1] <= out[1] + in[5] + 2^33*in[6] - * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7] - * out[3] <= out[3] + 2^32*in[4] + 3*in[7] */ +// Internal function for the different flavours of felem_reduce. +// felem_reduce_ reduces the higher coefficients in[4]-in[7]. +// On entry: +// out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7] +// out[1] >= in[7] + 2^32*in[4] +// out[2] >= in[5] + 2^32*in[5] +// out[3] >= in[4] + 2^32*in[5] + 2^32*in[6] +// On exit: +// out[0] <= out[0] + in[4] + 2^32*in[5] +// out[1] <= out[1] + in[5] + 2^33*in[6] +// out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7] +// out[3] <= out[3] + 2^32*in[4] + 3*in[7] static void felem_reduce_(felem out, const longfelem in) { int128_t c; - /* combine common terms from below */ + // combine common terms from below c = in[4] + (in[5] << 32); out[0] += c; out[3] -= c; @@ -634,35 +634,35 @@ static void felem_reduce_(felem out, const longfelem in) { out[1] += c; out[2] -= c; - /* the remaining terms */ - /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */ + // the remaining terms + // 256: [(0,1),(96,-1),(192,-1),(224,1)] out[1] -= (in[4] << 32); out[3] += (in[4] << 32); - /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */ + // 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] out[2] -= (in[5] << 32); - /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */ + // 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] out[0] -= in[6]; out[0] -= (in[6] << 32); out[1] += (in[6] << 33); out[2] += (in[6] * 2); out[3] -= (in[6] << 32); - /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */ + // 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] out[0] -= in[7]; out[0] -= (in[7] << 32); out[2] += (in[7] << 33); out[3] += (in[7] * 3); } -/* felem_reduce converts a longfelem into an felem. - * To be called directly after felem_square or felem_mul. - * On entry: - * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64 - * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64 - * On exit: - * out[i] < 2^101 */ +// felem_reduce converts a longfelem into an felem. +// To be called directly after felem_square or felem_mul. +// On entry: +// in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64 +// in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64 +// On exit: +// out[i] < 2^101 static void felem_reduce(felem out, const longfelem in) { out[0] = zero100[0] + in[0]; out[1] = zero100[1] + in[1]; @@ -671,22 +671,22 @@ static void felem_reduce(felem out, const longfelem in) { felem_reduce_(out, in); - /* out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0 - * out[1] > 2^100 - 2^64 - 7*2^96 > 0 - * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0 - * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0 - * - * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 - * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101 - * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101 - * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 */ + // out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0 + // out[1] > 2^100 - 2^64 - 7*2^96 > 0 + // out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0 + // out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0 + // + // out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 + // out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101 + // out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101 + // out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 } -/* felem_reduce_zero105 converts a larger longfelem into an felem. - * On entry: - * in[0] < 2^71 - * On exit: - * out[i] < 2^106 */ +// felem_reduce_zero105 converts a larger longfelem into an felem. +// On entry: +// in[0] < 2^71 +// On exit: +// out[i] < 2^106 static void felem_reduce_zero105(felem out, const longfelem in) { out[0] = zero105[0] + in[0]; out[1] = zero105[1] + in[1]; @@ -695,19 +695,19 @@ static void felem_reduce_zero105(felem out, const longfelem in) { felem_reduce_(out, in); - /* out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0 - * out[1] > 2^105 - 2^71 - 2^103 > 0 - * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0 - * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0 - * - * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 - * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 - * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106 - * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 */ + // out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0 + // out[1] > 2^105 - 2^71 - 2^103 > 0 + // out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0 + // out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0 + // + // out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 + // out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 + // out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106 + // out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 } -/* subtract_u64 sets *result = *result - v and *carry to one if the - * subtraction underflowed. */ +// subtract_u64 sets *result = *result - v and *carry to one if the +// subtraction underflowed. static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) { uint128_t r = *result; r -= v; @@ -715,28 +715,28 @@ static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) { *result = (uint64_t)r; } -/* felem_contract converts |in| to its unique, minimal representation. On - * entry: in[i] < 2^109. */ +// felem_contract converts |in| to its unique, minimal representation. On +// entry: in[i] < 2^109. static void felem_contract(smallfelem out, const felem in) { uint64_t all_equal_so_far = 0, result = 0; felem_shrink(out, in); - /* small is minimal except that the value might be > p */ + // small is minimal except that the value might be > p all_equal_so_far--; - /* We are doing a constant time test if out >= kPrime. We need to compare - * each uint64_t, from most-significant to least significant. For each one, if - * all words so far have been equal (m is all ones) then a non-equal - * result is the answer. Otherwise we continue. */ + // We are doing a constant time test if out >= kPrime. We need to compare + // each uint64_t, from most-significant to least significant. For each one, if + // all words so far have been equal (m is all ones) then a non-equal + // result is the answer. Otherwise we continue. for (size_t i = 3; i < 4; i--) { uint64_t equal; uint128_t a = ((uint128_t)kPrime[i]) - out[i]; - /* if out[i] > kPrime[i] then a will underflow and the high 64-bits - * will all be set. */ + // if out[i] > kPrime[i] then a will underflow and the high 64-bits + // will all be set. result |= all_equal_so_far & ((uint64_t)(a >> 64)); - /* if kPrime[i] == out[i] then |equal| will be all zeros and the - * decrement will make it all ones. */ + // if kPrime[i] == out[i] then |equal| will be all zeros and the + // decrement will make it all ones. equal = kPrime[i] ^ out[i]; equal--; equal &= equal << 32; @@ -750,11 +750,11 @@ static void felem_contract(smallfelem out, const felem in) { all_equal_so_far &= equal; } - /* if all_equal_so_far is still all ones then the two values are equal - * and so out >= kPrime is true. */ + // if all_equal_so_far is still all ones then the two values are equal + // and so out >= kPrime is true. result |= all_equal_so_far; - /* if out >= kPrime then we subtract kPrime. */ + // if out >= kPrime then we subtract kPrime. uint64_t carry; subtract_u64(&out[0], &carry, result & kPrime[0]); subtract_u64(&out[1], &carry, carry); @@ -771,10 +771,10 @@ static void felem_contract(smallfelem out, const felem in) { subtract_u64(&out[3], &carry, result & kPrime[3]); } -/* felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0 - * otherwise. - * On entry: - * small[i] < 2^64 */ +// felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0 +// otherwise. +// On entry: +// small[i] < 2^64 static limb smallfelem_is_zero(const smallfelem small) { limb result; uint64_t is_p; @@ -807,118 +807,118 @@ static limb smallfelem_is_zero(const smallfelem small) { return result; } -/* felem_inv calculates |out| = |in|^{-1} - * - * Based on Fermat's Little Theorem: - * a^p = a (mod p) - * a^{p-1} = 1 (mod p) - * a^{p-2} = a^{-1} (mod p) */ +// felem_inv calculates |out| = |in|^{-1} +// +// Based on Fermat's Little Theorem: +// a^p = a (mod p) +// a^{p-1} = 1 (mod p) +// a^{p-2} = a^{-1} (mod p) static void felem_inv(felem out, const felem in) { felem ftmp, ftmp2; - /* each e_I will hold |in|^{2^I - 1} */ + // each e_I will hold |in|^{2^I - 1} felem e2, e4, e8, e16, e32, e64; longfelem tmp; felem_square(tmp, in); - felem_reduce(ftmp, tmp); /* 2^1 */ + felem_reduce(ftmp, tmp); // 2^1 felem_mul(tmp, in, ftmp); - felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^2 - 2^0 felem_assign(e2, ftmp); felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */ + felem_reduce(ftmp, tmp); // 2^3 - 2^1 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */ + felem_reduce(ftmp, tmp); // 2^4 - 2^2 felem_mul(tmp, ftmp, e2); - felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^4 - 2^0 felem_assign(e4, ftmp); felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */ + felem_reduce(ftmp, tmp); // 2^5 - 2^1 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */ + felem_reduce(ftmp, tmp); // 2^6 - 2^2 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */ + felem_reduce(ftmp, tmp); // 2^7 - 2^3 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */ + felem_reduce(ftmp, tmp); // 2^8 - 2^4 felem_mul(tmp, ftmp, e4); - felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^8 - 2^0 felem_assign(e8, ftmp); for (size_t i = 0; i < 8; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^16 - 2^8 */ + } // 2^16 - 2^8 felem_mul(tmp, ftmp, e8); - felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^16 - 2^0 felem_assign(e16, ftmp); for (size_t i = 0; i < 16; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^32 - 2^16 */ + } // 2^32 - 2^16 felem_mul(tmp, ftmp, e16); - felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^32 - 2^0 felem_assign(e32, ftmp); for (size_t i = 0; i < 32; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^64 - 2^32 */ + } // 2^64 - 2^32 felem_assign(e64, ftmp); felem_mul(tmp, ftmp, in); - felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */ + felem_reduce(ftmp, tmp); // 2^64 - 2^32 + 2^0 for (size_t i = 0; i < 192; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^256 - 2^224 + 2^192 */ + } // 2^256 - 2^224 + 2^192 felem_mul(tmp, e64, e32); - felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^64 - 2^0 for (size_t i = 0; i < 16; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^80 - 2^16 */ + } // 2^80 - 2^16 felem_mul(tmp, ftmp2, e16); - felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^80 - 2^0 for (size_t i = 0; i < 8; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^88 - 2^8 */ + } // 2^88 - 2^8 felem_mul(tmp, ftmp2, e8); - felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^88 - 2^0 for (size_t i = 0; i < 4; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^92 - 2^4 */ + } // 2^92 - 2^4 felem_mul(tmp, ftmp2, e4); - felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^92 - 2^0 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */ + felem_reduce(ftmp2, tmp); // 2^93 - 2^1 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */ + felem_reduce(ftmp2, tmp); // 2^94 - 2^2 felem_mul(tmp, ftmp2, e2); - felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^94 - 2^0 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */ + felem_reduce(ftmp2, tmp); // 2^95 - 2^1 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */ + felem_reduce(ftmp2, tmp); // 2^96 - 2^2 felem_mul(tmp, ftmp2, in); - felem_reduce(ftmp2, tmp); /* 2^96 - 3 */ + felem_reduce(ftmp2, tmp); // 2^96 - 3 felem_mul(tmp, ftmp2, ftmp); - felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */ + felem_reduce(out, tmp); // 2^256 - 2^224 + 2^192 + 2^96 - 3 } -/* Group operations - * ---------------- - * - * Building on top of the field operations we have the operations on the - * elliptic curve group itself. Points on the curve are represented in Jacobian - * coordinates. */ - -/* point_double calculates 2*(x_in, y_in, z_in) - * - * The method is taken from: - * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b - * - * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed. - * while x_out == y_in is not (maybe this works, but it's not tested). */ +// Group operations +// ---------------- +// +// Building on top of the field operations we have the operations on the +// elliptic curve group itself. Points on the curve are represented in Jacobian +// coordinates. + +// point_double calculates 2*(x_in, y_in, z_in) +// +// The method is taken from: +// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b +// +// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed. +// while x_out == y_in is not (maybe this works, but it's not tested). static void point_double(felem x_out, felem y_out, felem z_out, const felem x_in, const felem y_in, const felem z_in) { longfelem tmp, tmp2; @@ -926,77 +926,77 @@ static void point_double(felem x_out, felem y_out, felem z_out, smallfelem small1, small2; felem_assign(ftmp, x_in); - /* ftmp[i] < 2^106 */ + // ftmp[i] < 2^106 felem_assign(ftmp2, x_in); - /* ftmp2[i] < 2^106 */ + // ftmp2[i] < 2^106 - /* delta = z^2 */ + // delta = z^2 felem_square(tmp, z_in); felem_reduce(delta, tmp); - /* delta[i] < 2^101 */ + // delta[i] < 2^101 - /* gamma = y^2 */ + // gamma = y^2 felem_square(tmp, y_in); felem_reduce(gamma, tmp); - /* gamma[i] < 2^101 */ + // gamma[i] < 2^101 felem_shrink(small1, gamma); - /* beta = x*gamma */ + // beta = x*gamma felem_small_mul(tmp, small1, x_in); felem_reduce(beta, tmp); - /* beta[i] < 2^101 */ + // beta[i] < 2^101 - /* alpha = 3*(x-delta)*(x+delta) */ + // alpha = 3*(x-delta)*(x+delta) felem_diff(ftmp, delta); - /* ftmp[i] < 2^105 + 2^106 < 2^107 */ + // ftmp[i] < 2^105 + 2^106 < 2^107 felem_sum(ftmp2, delta); - /* ftmp2[i] < 2^105 + 2^106 < 2^107 */ + // ftmp2[i] < 2^105 + 2^106 < 2^107 felem_scalar(ftmp2, 3); - /* ftmp2[i] < 3 * 2^107 < 2^109 */ + // ftmp2[i] < 3 * 2^107 < 2^109 felem_mul(tmp, ftmp, ftmp2); felem_reduce(alpha, tmp); - /* alpha[i] < 2^101 */ + // alpha[i] < 2^101 felem_shrink(small2, alpha); - /* x' = alpha^2 - 8*beta */ + // x' = alpha^2 - 8*beta smallfelem_square(tmp, small2); felem_reduce(x_out, tmp); felem_assign(ftmp, beta); felem_scalar(ftmp, 8); - /* ftmp[i] < 8 * 2^101 = 2^104 */ + // ftmp[i] < 8 * 2^101 = 2^104 felem_diff(x_out, ftmp); - /* x_out[i] < 2^105 + 2^101 < 2^106 */ + // x_out[i] < 2^105 + 2^101 < 2^106 - /* z' = (y + z)^2 - gamma - delta */ + // z' = (y + z)^2 - gamma - delta felem_sum(delta, gamma); - /* delta[i] < 2^101 + 2^101 = 2^102 */ + // delta[i] < 2^101 + 2^101 = 2^102 felem_assign(ftmp, y_in); felem_sum(ftmp, z_in); - /* ftmp[i] < 2^106 + 2^106 = 2^107 */ + // ftmp[i] < 2^106 + 2^106 = 2^107 felem_square(tmp, ftmp); felem_reduce(z_out, tmp); felem_diff(z_out, delta); - /* z_out[i] < 2^105 + 2^101 < 2^106 */ + // z_out[i] < 2^105 + 2^101 < 2^106 - /* y' = alpha*(4*beta - x') - 8*gamma^2 */ + // y' = alpha*(4*beta - x') - 8*gamma^2 felem_scalar(beta, 4); - /* beta[i] < 4 * 2^101 = 2^103 */ + // beta[i] < 4 * 2^101 = 2^103 felem_diff_zero107(beta, x_out); - /* beta[i] < 2^107 + 2^103 < 2^108 */ + // beta[i] < 2^107 + 2^103 < 2^108 felem_small_mul(tmp, small2, beta); - /* tmp[i] < 7 * 2^64 < 2^67 */ + // tmp[i] < 7 * 2^64 < 2^67 smallfelem_square(tmp2, small1); - /* tmp2[i] < 7 * 2^64 */ + // tmp2[i] < 7 * 2^64 longfelem_scalar(tmp2, 8); - /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */ + // tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 longfelem_diff(tmp, tmp2); - /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */ + // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 felem_reduce_zero105(y_out, tmp); - /* y_out[i] < 2^106 */ + // y_out[i] < 2^106 } -/* point_double_small is the same as point_double, except that it operates on - * smallfelems. */ +// point_double_small is the same as point_double, except that it operates on +// smallfelems. static void point_double_small(smallfelem x_out, smallfelem y_out, smallfelem z_out, const smallfelem x_in, const smallfelem y_in, const smallfelem z_in) { @@ -1013,7 +1013,7 @@ static void point_double_small(smallfelem x_out, smallfelem y_out, felem_shrink(z_out, felem_z_out); } -/* p256_copy_conditional copies in to out iff mask is all ones. */ +// p256_copy_conditional copies in to out iff mask is all ones. static void p256_copy_conditional(felem out, const felem in, limb mask) { for (size_t i = 0; i < NLIMBS; ++i) { const limb tmp = mask & (in[i] ^ out[i]); @@ -1021,7 +1021,7 @@ static void p256_copy_conditional(felem out, const felem in, limb mask) { } } -/* copy_small_conditional copies in to out iff mask is all ones. */ +// copy_small_conditional copies in to out iff mask is all ones. static void copy_small_conditional(felem out, const smallfelem in, limb mask) { const uint64_t mask64 = mask; for (size_t i = 0; i < NLIMBS; ++i) { @@ -1029,16 +1029,16 @@ static void copy_small_conditional(felem out, const smallfelem in, limb mask) { } } -/* point_add calcuates (x1, y1, z1) + (x2, y2, z2) - * - * The method is taken from: - * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl, - * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity). - * - * This function includes a branch for checking whether the two input points - * are equal, (while not equal to the point at infinity). This case never - * happens during single point multiplication, so there is no timing leak for - * ECDH or ECDSA signing. */ +// point_add calcuates (x1, y1, z1) + (x2, y2, z2) +// +// The method is taken from: +// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl, +// adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity). +// +// This function includes a branch for checking whether the two input points +// are equal, (while not equal to the point at infinity). This case never +// happens during single point multiplication, so there is no timing leak for +// ECDH or ECDSA signing. static void point_add(felem x3, felem y3, felem z3, const felem x1, const felem y1, const felem z1, const int mixed, const smallfelem x2, const smallfelem y2, @@ -1053,94 +1053,94 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1, z1_is_zero = smallfelem_is_zero(small3); z2_is_zero = smallfelem_is_zero(z2); - /* ftmp = z1z1 = z1**2 */ + // ftmp = z1z1 = z1**2 smallfelem_square(tmp, small3); felem_reduce(ftmp, tmp); - /* ftmp[i] < 2^101 */ + // ftmp[i] < 2^101 felem_shrink(small1, ftmp); if (!mixed) { - /* ftmp2 = z2z2 = z2**2 */ + // ftmp2 = z2z2 = z2**2 smallfelem_square(tmp, z2); felem_reduce(ftmp2, tmp); - /* ftmp2[i] < 2^101 */ + // ftmp2[i] < 2^101 felem_shrink(small2, ftmp2); felem_shrink(small5, x1); - /* u1 = ftmp3 = x1*z2z2 */ + // u1 = ftmp3 = x1*z2z2 smallfelem_mul(tmp, small5, small2); felem_reduce(ftmp3, tmp); - /* ftmp3[i] < 2^101 */ + // ftmp3[i] < 2^101 - /* ftmp5 = z1 + z2 */ + // ftmp5 = z1 + z2 felem_assign(ftmp5, z1); felem_small_sum(ftmp5, z2); - /* ftmp5[i] < 2^107 */ + // ftmp5[i] < 2^107 - /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */ + // ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 felem_square(tmp, ftmp5); felem_reduce(ftmp5, tmp); - /* ftmp2 = z2z2 + z1z1 */ + // ftmp2 = z2z2 + z1z1 felem_sum(ftmp2, ftmp); - /* ftmp2[i] < 2^101 + 2^101 = 2^102 */ + // ftmp2[i] < 2^101 + 2^101 = 2^102 felem_diff(ftmp5, ftmp2); - /* ftmp5[i] < 2^105 + 2^101 < 2^106 */ + // ftmp5[i] < 2^105 + 2^101 < 2^106 - /* ftmp2 = z2 * z2z2 */ + // ftmp2 = z2 * z2z2 smallfelem_mul(tmp, small2, z2); felem_reduce(ftmp2, tmp); - /* s1 = ftmp2 = y1 * z2**3 */ + // s1 = ftmp2 = y1 * z2**3 felem_mul(tmp, y1, ftmp2); felem_reduce(ftmp6, tmp); - /* ftmp6[i] < 2^101 */ + // ftmp6[i] < 2^101 } else { - /* We'll assume z2 = 1 (special case z2 = 0 is handled later). */ + // We'll assume z2 = 1 (special case z2 = 0 is handled later). - /* u1 = ftmp3 = x1*z2z2 */ + // u1 = ftmp3 = x1*z2z2 felem_assign(ftmp3, x1); - /* ftmp3[i] < 2^106 */ + // ftmp3[i] < 2^106 - /* ftmp5 = 2z1z2 */ + // ftmp5 = 2z1z2 felem_assign(ftmp5, z1); felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2*2^106 = 2^107 */ + // ftmp5[i] < 2*2^106 = 2^107 - /* s1 = ftmp2 = y1 * z2**3 */ + // s1 = ftmp2 = y1 * z2**3 felem_assign(ftmp6, y1); - /* ftmp6[i] < 2^106 */ + // ftmp6[i] < 2^106 } - /* u2 = x2*z1z1 */ + // u2 = x2*z1z1 smallfelem_mul(tmp, x2, small1); felem_reduce(ftmp4, tmp); - /* h = ftmp4 = u2 - u1 */ + // h = ftmp4 = u2 - u1 felem_diff_zero107(ftmp4, ftmp3); - /* ftmp4[i] < 2^107 + 2^101 < 2^108 */ + // ftmp4[i] < 2^107 + 2^101 < 2^108 felem_shrink(small4, ftmp4); x_equal = smallfelem_is_zero(small4); - /* z_out = ftmp5 * h */ + // z_out = ftmp5 * h felem_small_mul(tmp, small4, ftmp5); felem_reduce(z_out, tmp); - /* z_out[i] < 2^101 */ + // z_out[i] < 2^101 - /* ftmp = z1 * z1z1 */ + // ftmp = z1 * z1z1 smallfelem_mul(tmp, small1, small3); felem_reduce(ftmp, tmp); - /* s2 = tmp = y2 * z1**3 */ + // s2 = tmp = y2 * z1**3 felem_small_mul(tmp, y2, ftmp); felem_reduce(ftmp5, tmp); - /* r = ftmp5 = (s2 - s1)*2 */ + // r = ftmp5 = (s2 - s1)*2 felem_diff_zero107(ftmp5, ftmp6); - /* ftmp5[i] < 2^107 + 2^107 = 2^108 */ + // ftmp5[i] < 2^107 + 2^107 = 2^108 felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2^109 */ + // ftmp5[i] < 2^109 felem_shrink(small1, ftmp5); y_equal = smallfelem_is_zero(small1); @@ -1149,42 +1149,42 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1, return; } - /* I = ftmp = (2h)**2 */ + // I = ftmp = (2h)**2 felem_assign(ftmp, ftmp4); felem_scalar(ftmp, 2); - /* ftmp[i] < 2*2^108 = 2^109 */ + // ftmp[i] < 2*2^108 = 2^109 felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - /* J = ftmp2 = h * I */ + // J = ftmp2 = h * I felem_mul(tmp, ftmp4, ftmp); felem_reduce(ftmp2, tmp); - /* V = ftmp4 = U1 * I */ + // V = ftmp4 = U1 * I felem_mul(tmp, ftmp3, ftmp); felem_reduce(ftmp4, tmp); - /* x_out = r**2 - J - 2V */ + // x_out = r**2 - J - 2V smallfelem_square(tmp, small1); felem_reduce(x_out, tmp); felem_assign(ftmp3, ftmp4); felem_scalar(ftmp4, 2); felem_sum(ftmp4, ftmp2); - /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */ + // ftmp4[i] < 2*2^101 + 2^101 < 2^103 felem_diff(x_out, ftmp4); - /* x_out[i] < 2^105 + 2^101 */ + // x_out[i] < 2^105 + 2^101 - /* y_out = r(V-x_out) - 2 * s1 * J */ + // y_out = r(V-x_out) - 2 * s1 * J felem_diff_zero107(ftmp3, x_out); - /* ftmp3[i] < 2^107 + 2^101 < 2^108 */ + // ftmp3[i] < 2^107 + 2^101 < 2^108 felem_small_mul(tmp, small1, ftmp3); felem_mul(tmp2, ftmp6, ftmp2); longfelem_scalar(tmp2, 2); - /* tmp2[i] < 2*2^67 = 2^68 */ + // tmp2[i] < 2*2^67 = 2^68 longfelem_diff(tmp, tmp2); - /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */ + // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 felem_reduce_zero105(y_out, tmp); - /* y_out[i] < 2^106 */ + // y_out[i] < 2^106 copy_small_conditional(x_out, x2, z1_is_zero); p256_copy_conditional(x_out, x1, z2_is_zero); @@ -1197,8 +1197,8 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1, felem_assign(z3, z_out); } -/* point_add_small is the same as point_add, except that it operates on - * smallfelems. */ +// point_add_small is the same as point_add, except that it operates on +// smallfelems. static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3, smallfelem x1, smallfelem y1, smallfelem z1, smallfelem x2, smallfelem y2, smallfelem z2) { @@ -1214,42 +1214,42 @@ static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3, felem_shrink(z3, felem_z3); } -/* Base point pre computation - * -------------------------- - * - * Two different sorts of precomputed tables are used in the following code. - * Each contain various points on the curve, where each point is three field - * elements (x, y, z). - * - * For the base point table, z is usually 1 (0 for the point at infinity). - * This table has 2 * 16 elements, starting with the following: - * index | bits | point - * ------+---------+------------------------------ - * 0 | 0 0 0 0 | 0G - * 1 | 0 0 0 1 | 1G - * 2 | 0 0 1 0 | 2^64G - * 3 | 0 0 1 1 | (2^64 + 1)G - * 4 | 0 1 0 0 | 2^128G - * 5 | 0 1 0 1 | (2^128 + 1)G - * 6 | 0 1 1 0 | (2^128 + 2^64)G - * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G - * 8 | 1 0 0 0 | 2^192G - * 9 | 1 0 0 1 | (2^192 + 1)G - * 10 | 1 0 1 0 | (2^192 + 2^64)G - * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G - * 12 | 1 1 0 0 | (2^192 + 2^128)G - * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G - * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G - * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G - * followed by a copy of this with each element multiplied by 2^32. - * - * The reason for this is so that we can clock bits into four different - * locations when doing simple scalar multiplies against the base point, - * and then another four locations using the second 16 elements. - * - * Tables for other points have table[i] = iG for i in 0 .. 16. */ - -/* g_pre_comp is the table of precomputed base points */ +// Base point pre computation +// -------------------------- +// +// Two different sorts of precomputed tables are used in the following code. +// Each contain various points on the curve, where each point is three field +// elements (x, y, z). +// +// For the base point table, z is usually 1 (0 for the point at infinity). +// This table has 2 * 16 elements, starting with the following: +// index | bits | point +// ------+---------+------------------------------ +// 0 | 0 0 0 0 | 0G +// 1 | 0 0 0 1 | 1G +// 2 | 0 0 1 0 | 2^64G +// 3 | 0 0 1 1 | (2^64 + 1)G +// 4 | 0 1 0 0 | 2^128G +// 5 | 0 1 0 1 | (2^128 + 1)G +// 6 | 0 1 1 0 | (2^128 + 2^64)G +// 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G +// 8 | 1 0 0 0 | 2^192G +// 9 | 1 0 0 1 | (2^192 + 1)G +// 10 | 1 0 1 0 | (2^192 + 2^64)G +// 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G +// 12 | 1 1 0 0 | (2^192 + 2^128)G +// 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G +// 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G +// 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G +// followed by a copy of this with each element multiplied by 2^32. +// +// The reason for this is so that we can clock bits into four different +// locations when doing simple scalar multiplies against the base point, +// and then another four locations using the second 16 elements. +// +// Tables for other points have table[i] = iG for i in 0 .. 16. + +// g_pre_comp is the table of precomputed base points static const smallfelem g_pre_comp[2][16][3] = { {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2, @@ -1404,8 +1404,8 @@ static const smallfelem g_pre_comp[2][16][3] = { 0x4ab5b6b2b8753f81}, {1, 0, 0, 0}}}}; -/* select_point selects the |idx|th point from a precomputation table and - * copies it to out. */ +// select_point selects the |idx|th point from a precomputation table and +// copies it to out. static void select_point(const uint64_t idx, size_t size, const smallfelem pre_comp[/*size*/][3], smallfelem out[3]) { @@ -1426,7 +1426,7 @@ static void select_point(const uint64_t idx, size_t size, } } -/* get_bit returns the |i|th bit in |in| */ +// get_bit returns the |i|th bit in |in| static char get_bit(const felem_bytearray in, int i) { if (i < 0 || i >= 256) { return 0; @@ -1434,11 +1434,11 @@ static char get_bit(const felem_bytearray in, int i) { return (in[i >> 3] >> (i & 7)) & 1; } -/* Interleaved point multiplication using precomputed point multiples: The - * small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar - * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple - * of the generator, using certain (large) precomputed multiples in g_pre_comp. - * Output point (X, Y, Z) is stored in x_out, y_out, z_out. */ +// Interleaved point multiplication using precomputed point multiples: The +// small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar +// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple +// of the generator, using certain (large) precomputed multiples in g_pre_comp. +// Output point (X, Y, Z) is stored in x_out, y_out, z_out. static void batch_mul(felem x_out, felem y_out, felem z_out, const uint8_t *p_scalar, const uint8_t *g_scalar, const smallfelem p_pre_comp[17][3]) { @@ -1447,29 +1447,29 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, uint64_t bits; uint8_t sign, digit; - /* set nq to the point at infinity */ + // set nq to the point at infinity OPENSSL_memset(nq, 0, 3 * sizeof(felem)); - /* Loop over both scalars msb-to-lsb, interleaving additions of multiples - * of the generator (two in each of the last 32 rounds) and additions of p - * (every 5th round). */ + // Loop over both scalars msb-to-lsb, interleaving additions of multiples + // of the generator (two in each of the last 32 rounds) and additions of p + // (every 5th round). - int skip = 1; /* save two point operations in the first round */ + int skip = 1; // save two point operations in the first round size_t i = p_scalar != NULL ? 255 : 31; for (;;) { - /* double */ + // double if (!skip) { point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } - /* add multiples of the generator */ + // add multiples of the generator if (g_scalar != NULL && i <= 31) { - /* first, look 32 bits upwards */ + // first, look 32 bits upwards bits = get_bit(g_scalar, i + 224) << 3; bits |= get_bit(g_scalar, i + 160) << 2; bits |= get_bit(g_scalar, i + 96) << 1; bits |= get_bit(g_scalar, i + 32); - /* select the point to add, in constant time */ + // select the point to add, in constant time select_point(bits, 16, g_pre_comp[1], tmp); if (!skip) { @@ -1482,18 +1482,18 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, skip = 0; } - /* second, look at the current position */ + // second, look at the current position bits = get_bit(g_scalar, i + 192) << 3; bits |= get_bit(g_scalar, i + 128) << 2; bits |= get_bit(g_scalar, i + 64) << 1; bits |= get_bit(g_scalar, i); - /* select the point to add, in constant time */ + // select the point to add, in constant time select_point(bits, 16, g_pre_comp[0], tmp); point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } - /* do other additions every 5 doublings */ + // do other additions every 5 doublings if (p_scalar != NULL && i % 5 == 0) { bits = get_bit(p_scalar, i + 4) << 5; bits |= get_bit(p_scalar, i + 3) << 4; @@ -1503,10 +1503,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, bits |= get_bit(p_scalar, i - 1); ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); - /* select the point to add or subtract, in constant time. */ + // select the point to add or subtract, in constant time. select_point(digit, 17, p_pre_comp, tmp); - smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative - * point */ + smallfelem_neg(ftmp, tmp[1]); // (X, -Y, Z) is the negative + // point copy_small_conditional(ftmp, tmp[1], (((limb)sign) - 1)); felem_contract(tmp[1], ftmp); @@ -1531,13 +1531,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, felem_assign(z_out, nq[2]); } -/******************************************************************************/ -/* - * OPENSSL EC_METHOD FUNCTIONS - */ +// OPENSSL EC_METHOD FUNCTIONS -/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') = - * (X/Z^2, Y/Z^3). */ +// Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') = +// (X/Z^2, Y/Z^3). static int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, @@ -1612,14 +1609,14 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, } if (p != NULL && p_scalar != NULL) { - /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e., - * they contribute nothing to the linear combination. */ + // We treat NULL scalars as 0, and NULL points as points at infinity, i.e., + // they contribute nothing to the linear combination. OPENSSL_memset(&p_secret, 0, sizeof(p_secret)); OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); size_t num_bytes; - /* Reduce g_scalar to 0 <= g_scalar < 2^256. */ + // Reduce g_scalar to 0 <= g_scalar < 2^256. if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) { - /* This is an unusual input, and we don't guarantee constant-timeness. */ + // This is an unusual input, and we don't guarantee constant-timeness. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); goto err; @@ -1629,7 +1626,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, num_bytes = BN_bn2bin(p_scalar, tmp); } flip_endian(p_secret, tmp, num_bytes); - /* Precompute multiples. */ + // Precompute multiples. if (!BN_to_felem(x_out, &p->X) || !BN_to_felem(y_out, &p->Y) || !BN_to_felem(z_out, &p->Z)) { @@ -1657,10 +1654,10 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, size_t num_bytes; OPENSSL_memset(g_secret, 0, sizeof(g_secret)); - /* reduce g_scalar to 0 <= g_scalar < 2^256 */ + // reduce g_scalar to 0 <= g_scalar < 2^256 if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) { - /* this is an unusual input, and we don't guarantee - * constant-timeness. */ + // this is an unusual input, and we don't guarantee + // constant-timeness. if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) { OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); goto err; @@ -1676,7 +1673,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, g_scalar != NULL ? g_secret : NULL, (const smallfelem(*)[3]) &p_pre_comp); - /* reduce the output to its unique minimal representation */ + // reduce the output to its unique minimal representation felem_contract(x_in, x_out); felem_contract(y_in, y_out); felem_contract(z_in, z_out); @@ -1708,4 +1705,4 @@ DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp256_method) { out->field_decode = NULL; }; -#endif /* 64_BIT && !WINDOWS */ +#endif // 64_BIT && !WINDOWS diff --git a/crypto/fipsmodule/ec/p256-x86_64-table.h b/crypto/fipsmodule/ec/p256-x86_64-table.h index e4705f8e..575a2034 100644 --- a/crypto/fipsmodule/ec/p256-x86_64-table.h +++ b/crypto/fipsmodule/ec/p256-x86_64-table.h @@ -12,17 +12,17 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This is the precomputed constant time access table for the code in - * p256-x86_64.c, for the default generator. The table consists of 37 - * subtables, each subtable contains 64 affine points. The affine points are - * encoded as eight uint64's, four for the x coordinate and four for the y. - * Both values are in little-endian order. There are 37 tables because a - * signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37. - * Within each table there are 64 values because the 6-bit wNAF value can take - * 64 values, ignoring the sign bit, which is implemented by performing a - * negation of the affine point when required. We would like to align it to 2MB - * in order to increase the chances of using a large page but that appears to - * lead to invalid ELF files being produced. */ +// This is the precomputed constant time access table for the code in +// p256-x86_64.c, for the default generator. The table consists of 37 +// subtables, each subtable contains 64 affine points. The affine points are +// encoded as eight uint64's, four for the x coordinate and four for the y. +// Both values are in little-endian order. There are 37 tables because a +// signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37. +// Within each table there are 64 values because the 6-bit wNAF value can take +// 64 values, ignoring the sign bit, which is implemented by performing a +// negation of the affine point when required. We would like to align it to 2MB +// in order to increase the chances of using a large page but that appears to +// lead to invalid ELF files being produced. static const alignas(4096) BN_ULONG ecp_nistz256_precomputed[37][64 * sizeof(P256_POINT_AFFINE) / diff --git a/crypto/fipsmodule/ec/p256-x86_64.c b/crypto/fipsmodule/ec/p256-x86_64.c index de80dca0..8b516773 100644 --- a/crypto/fipsmodule/ec/p256-x86_64.c +++ b/crypto/fipsmodule/ec/p256-x86_64.c @@ -12,13 +12,13 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Developers and authors: - * Shay Gueron (1, 2), and Vlad Krasnov (1) - * (1) Intel Corporation, Israel Development Center - * (2) University of Haifa - * Reference: - * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with - * 256 Bit Primes" */ +// Developers and authors: +// Shay Gueron (1, 2), and Vlad Krasnov (1) +// (1) Intel Corporation, Israel Development Center +// (2) University of Haifa +// Reference: +// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with +// 256 Bit Primes" #include @@ -42,16 +42,16 @@ typedef P256_POINT_AFFINE PRECOMP256_ROW[64]; -/* One converted into the Montgomery domain */ +// One converted into the Montgomery domain static const BN_ULONG ONE[P256_LIMBS] = { TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000), TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe), }; -/* Precomputed tables for the default generator */ +// Precomputed tables for the default generator #include "p256-x86_64-table.h" -/* Recode window to a signed digit, see util-64.c for details */ +// Recode window to a signed digit, see util-64.c for details static unsigned booth_recode_w5(unsigned in) { unsigned s, d; @@ -74,11 +74,11 @@ static unsigned booth_recode_w7(unsigned in) { return (d << 1) + (s & 1); } -/* copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is - * if |move| is zero. - * - * WARNING: this breaks the usual convention of constant-time functions - * returning masks. */ +// copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is +// if |move| is zero. +// +// WARNING: this breaks the usual convention of constant-time functions +// returning masks. static void copy_conditional(BN_ULONG dst[P256_LIMBS], const BN_ULONG src[P256_LIMBS], BN_ULONG move) { BN_ULONG mask1 = ((BN_ULONG)0) - move; @@ -96,32 +96,32 @@ static void copy_conditional(BN_ULONG dst[P256_LIMBS], } } -/* is_not_zero returns one iff in != 0 and zero otherwise. - * - * WARNING: this breaks the usual convention of constant-time functions - * returning masks. - * - * (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64) - * (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f) - * ) - * - * (declare-fun x () (_ BitVec 64)) - * - * (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001))) - * (check-sat) - * - * (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000))) - * (check-sat) - * */ +// is_not_zero returns one iff in != 0 and zero otherwise. +// +// WARNING: this breaks the usual convention of constant-time functions +// returning masks. +// +// (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64) +// (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f) +// ) +// +// (declare-fun x () (_ BitVec 64)) +// +// (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001))) +// (check-sat) +// +// (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000))) +// (check-sat) +// static BN_ULONG is_not_zero(BN_ULONG in) { in |= (0 - in); in >>= BN_BITS2 - 1; return in; } -/* ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p. - * That is, |r| is the modular inverse of |in| for input and output in the - * Montgomery domain. */ +// ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p. +// That is, |r| is the modular inverse of |in| for input and output in the +// Montgomery domain. static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { /* The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff @@ -136,29 +136,29 @@ static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], int i; ecp_nistz256_sqr_mont(res, in); - ecp_nistz256_mul_mont(p2, res, in); /* 3*p */ + ecp_nistz256_mul_mont(p2, res, in); // 3*p ecp_nistz256_sqr_mont(res, p2); ecp_nistz256_sqr_mont(res, res); - ecp_nistz256_mul_mont(p4, res, p2); /* f*p */ + ecp_nistz256_mul_mont(p4, res, p2); // f*p ecp_nistz256_sqr_mont(res, p4); ecp_nistz256_sqr_mont(res, res); ecp_nistz256_sqr_mont(res, res); ecp_nistz256_sqr_mont(res, res); - ecp_nistz256_mul_mont(p8, res, p4); /* ff*p */ + ecp_nistz256_mul_mont(p8, res, p4); // ff*p ecp_nistz256_sqr_mont(res, p8); for (i = 0; i < 7; i++) { ecp_nistz256_sqr_mont(res, res); } - ecp_nistz256_mul_mont(p16, res, p8); /* ffff*p */ + ecp_nistz256_mul_mont(p16, res, p8); // ffff*p ecp_nistz256_sqr_mont(res, p16); for (i = 0; i < 15; i++) { ecp_nistz256_sqr_mont(res, res); } - ecp_nistz256_mul_mont(p32, res, p16); /* ffffffff*p */ + ecp_nistz256_mul_mont(p32, res, p16); // ffffffff*p ecp_nistz256_sqr_mont(res, p32); for (i = 0; i < 31; i++) { @@ -201,8 +201,8 @@ static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], ecp_nistz256_mul_mont(r, res, in); } -/* ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and - * returns one if it fits. Otherwise it returns zero. */ +// ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and +// returns one if it fits. Otherwise it returns zero. static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS], const BIGNUM *in) { if (in->top > P256_LIMBS) { @@ -214,7 +214,7 @@ static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS], return 1; } -/* r = p * p_scalar */ +// r = p * p_scalar static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { @@ -224,9 +224,9 @@ static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, static const unsigned kWindowSize = 5; static const unsigned kMask = (1 << (5 /* kWindowSize */ + 1)) - 1; - /* A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should - * add no more than 63 bytes of overhead. Thus, |table| should require - * ~1599 ((96 * 16) + 63) bytes of stack space. */ + // A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should + // add no more than 63 bytes of overhead. Thus, |table| should require + // ~1599 ((96 * 16) + 63) bytes of stack space. alignas(64) P256_POINT table[16]; uint8_t p_str[33]; @@ -279,9 +279,9 @@ static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, p_str[j] = 0; } - /* table[0] is implicitly (0,0,0) (the point at infinity), therefore it is - * not stored. All other values are actually stored with an offset of -1 in - * table. */ + // table[0] is implicitly (0,0,0) (the point at infinity), therefore it is + // not stored. All other values are actually stored with an offset of -1 in + // table. P256_POINT *row = table; if (!ecp_nistz256_bignum_to_field_elem(row[1 - 1].X, &p->X) || @@ -341,7 +341,7 @@ static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, ecp_nistz256_point_double(r, r); } - /* Final window */ + // Final window wvalue = p_str[0]; wvalue = (wvalue << 1) & kMask; @@ -426,7 +426,7 @@ static int ecp_nistz256_points_mul( p_str[i] = 0; } - /* First window */ + // First window unsigned wvalue = (p_str[0] << 1) & kMask; unsigned index = kWindowSize; @@ -439,9 +439,9 @@ static int ecp_nistz256_points_mul( ecp_nistz256_neg(p.p.Z, p.p.Y); copy_conditional(p.p.Y, p.p.Z, wvalue & 1); - /* Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p| - * is infinity and |ONE| otherwise. |p| was computed from the table, so it - * is infinity iff |wvalue >> 1| is zero. */ + // Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p| + // is infinity and |ONE| otherwise. |p| was computed from the table, so it + // is infinity iff |wvalue >> 1| is zero. OPENSSL_memset(p.p.Z, 0, sizeof(p.p.Z)); copy_conditional(p.p.Z, ONE, is_not_zero(wvalue >> 1)); @@ -478,7 +478,7 @@ static int ecp_nistz256_points_mul( } } - /* Not constant-time, but we're only operating on the public output. */ + // Not constant-time, but we're only operating on the public output. if (!bn_set_words(&r->X, p.p.X, P256_LIMBS) || !bn_set_words(&r->Y, p.p.Y, P256_LIMBS) || !bn_set_words(&r->Z, p.p.Z, P256_LIMBS)) { @@ -516,10 +516,10 @@ static int ecp_nistz256_get_affine(const EC_GROUP *group, const EC_POINT *point, ecp_nistz256_mod_inverse_mont(z_inv3, point_z); ecp_nistz256_sqr_mont(z_inv2, z_inv3); - /* Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate - * and then calling |ecp_nistz256_from_mont| again to convert the |y| - * coordinate below, convert the common factor |z_inv2| once now, saving one - * reduction. */ + // Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate + // and then calling |ecp_nistz256_from_mont| again to convert the |y| + // coordinate below, convert the common factor |z_inv2| once now, saving one + // reduction. ecp_nistz256_from_mont(z_inv2, z_inv2); if (x != NULL) { diff --git a/crypto/fipsmodule/ec/p256-x86_64.h b/crypto/fipsmodule/ec/p256-x86_64.h index 0132348e..6a0bebb7 100644 --- a/crypto/fipsmodule/ec/p256-x86_64.h +++ b/crypto/fipsmodule/ec/p256-x86_64.h @@ -27,30 +27,30 @@ extern "C" { #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ !defined(OPENSSL_SMALL) -/* P-256 field operations. - * - * An element mod P in P-256 is represented as a little-endian array of - * |P256_LIMBS| |BN_ULONG|s, spanning the full range of values. - * - * The following functions take fully-reduced inputs mod P and give - * fully-reduced outputs. They may be used in-place. */ +// P-256 field operations. +// +// An element mod P in P-256 is represented as a little-endian array of +// |P256_LIMBS| |BN_ULONG|s, spanning the full range of values. +// +// The following functions take fully-reduced inputs mod P and give +// fully-reduced outputs. They may be used in-place. #define P256_LIMBS (256 / BN_BITS2) -/* ecp_nistz256_neg sets |res| to -|a| mod P. */ +// ecp_nistz256_neg sets |res| to -|a| mod P. void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. */ +// ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); -/* ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. */ +// ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain - * by multiplying with 1. */ +// ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain +// by multiplying with 1. static inline void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { static const BN_ULONG ONE[P256_LIMBS] = { 1 }; @@ -58,47 +58,47 @@ static inline void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS], } -/* P-256 point operations. - * - * The following functions may be used in-place. All coordinates are in the - * Montgomery domain. */ +// P-256 point operations. +// +// The following functions may be used in-place. All coordinates are in the +// Montgomery domain. -/* A P256_POINT represents a P-256 point in Jacobian coordinates. */ +// A P256_POINT represents a P-256 point in Jacobian coordinates. typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; BN_ULONG Z[P256_LIMBS]; } P256_POINT; -/* A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity - * is encoded as (0, 0). */ +// A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity +// is encoded as (0, 0). typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; } P256_POINT_AFFINE; -/* ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16 - * and all zeros (the point at infinity) if |index| is 0. This is done in - * constant time. */ +// ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16 +// and all zeros (the point at infinity) if |index| is 0. This is done in +// constant time. void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16], int index); -/* ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64 - * and all zeros (the point at infinity) if |index| is 0. This is done in - * constant time. */ +// ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64 +// and all zeros (the point at infinity) if |index| is 0. This is done in +// constant time. void ecp_nistz256_select_w7(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index); -/* ecp_nistz256_point_double sets |r| to |a| doubled. */ +// ecp_nistz256_point_double sets |r| to |a| doubled. void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a); -/* ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. */ +// ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a, const P256_POINT *b); -/* ecp_nistz256_point_add_affine adds |a| to |b| and places the result in - * |r|. |a| and |b| must not represent the same point unless they are both - * infinity. */ +// ecp_nistz256_point_add_affine adds |a| to |b| and places the result in +// |r|. |a| and |b| must not represent the same point unless they are both +// infinity. void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b); @@ -107,7 +107,7 @@ void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, #if defined(__cplusplus) -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_EC_P256_X86_64_H */ +#endif // OPENSSL_HEADER_EC_P256_X86_64_H diff --git a/crypto/fipsmodule/ec/simple.c b/crypto/fipsmodule/ec/simple.c index 1a03d84a..75c06da1 100644 --- a/crypto/fipsmodule/ec/simple.c +++ b/crypto/fipsmodule/ec/simple.c @@ -77,16 +77,16 @@ #include "../../internal.h" -/* Most method functions in this file are designed to work with non-trivial - * representations of field elements if necessary (see ecp_mont.c): while - * standard modular addition and subtraction are used, the field_mul and - * field_sqr methods will be used for multiplication, and field_encode and - * field_decode (if defined) will be used for converting between - * representations. - * - * Functions here specifically assume that if a non-trivial representation is - * used, it is a Montgomery representation (i.e. 'encoding' means multiplying - * by some factor R). */ +// Most method functions in this file are designed to work with non-trivial +// representations of field elements if necessary (see ecp_mont.c): while +// standard modular addition and subtraction are used, the field_mul and +// field_sqr methods will be used for multiplication, and field_encode and +// field_decode (if defined) will be used for converting between +// representations. +// +// Functions here specifically assume that if a non-trivial representation is +// used, it is a Montgomery representation (i.e. 'encoding' means multiplying +// by some factor R). int ec_GFp_simple_group_init(EC_GROUP *group) { BN_init(&group->field); @@ -123,7 +123,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, BN_CTX *new_ctx = NULL; BIGNUM *tmp_a; - /* p must be a prime > 3 */ + // p must be a prime > 3 if (BN_num_bits(p) <= 2 || !BN_is_odd(p)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); return 0; @@ -142,13 +142,13 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->field */ + // group->field if (!BN_copy(&group->field, p)) { goto err; } BN_set_negative(&group->field, 0); - /* group->a */ + // group->a if (!BN_nnmod(tmp_a, a, p, ctx)) { goto err; } @@ -160,7 +160,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->b */ + // group->b if (!BN_nnmod(&group->b, b, p, ctx)) { goto err; } @@ -169,7 +169,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->a_is_minus3 */ + // group->a_is_minus3 if (!BN_add_word(tmp_a, 3)) { goto err; } @@ -360,7 +360,7 @@ int ec_GFp_simple_point_set_affine_coordinates(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { if (x == NULL || y == NULL) { - /* unlike for projective coordinates, we do not tolerate this */ + // unlike for projective coordinates, we do not tolerate this OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } @@ -412,88 +412,87 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, goto end; } - /* Note that in this function we must not read components of 'a' or 'b' - * once we have written the corresponding components of 'r'. - * ('r' might be one of 'a' or 'b'.) - */ + // Note that in this function we must not read components of 'a' or 'b' + // once we have written the corresponding components of 'r'. + // ('r' might be one of 'a' or 'b'.) - /* n1, n2 */ + // n1, n2 int b_Z_is_one = BN_cmp(&b->Z, &group->one) == 0; if (b_Z_is_one) { if (!BN_copy(n1, &a->X) || !BN_copy(n2, &a->Y)) { goto end; } - /* n1 = X_a */ - /* n2 = Y_a */ + // n1 = X_a + // n2 = Y_a } else { if (!field_sqr(group, n0, &b->Z, ctx) || !field_mul(group, n1, &a->X, n0, ctx)) { goto end; } - /* n1 = X_a * Z_b^2 */ + // n1 = X_a * Z_b^2 if (!field_mul(group, n0, n0, &b->Z, ctx) || !field_mul(group, n2, &a->Y, n0, ctx)) { goto end; } - /* n2 = Y_a * Z_b^3 */ + // n2 = Y_a * Z_b^3 } - /* n3, n4 */ + // n3, n4 int a_Z_is_one = BN_cmp(&a->Z, &group->one) == 0; if (a_Z_is_one) { if (!BN_copy(n3, &b->X) || !BN_copy(n4, &b->Y)) { goto end; } - /* n3 = X_b */ - /* n4 = Y_b */ + // n3 = X_b + // n4 = Y_b } else { if (!field_sqr(group, n0, &a->Z, ctx) || !field_mul(group, n3, &b->X, n0, ctx)) { goto end; } - /* n3 = X_b * Z_a^2 */ + // n3 = X_b * Z_a^2 if (!field_mul(group, n0, n0, &a->Z, ctx) || !field_mul(group, n4, &b->Y, n0, ctx)) { goto end; } - /* n4 = Y_b * Z_a^3 */ + // n4 = Y_b * Z_a^3 } - /* n5, n6 */ + // n5, n6 if (!BN_mod_sub_quick(n5, n1, n3, p) || !BN_mod_sub_quick(n6, n2, n4, p)) { goto end; } - /* n5 = n1 - n3 */ - /* n6 = n2 - n4 */ + // n5 = n1 - n3 + // n6 = n2 - n4 if (BN_is_zero(n5)) { if (BN_is_zero(n6)) { - /* a is the same point as b */ + // a is the same point as b BN_CTX_end(ctx); ret = EC_POINT_dbl(group, r, a, ctx); ctx = NULL; goto end; } else { - /* a is the inverse of b */ + // a is the inverse of b BN_zero(&r->Z); ret = 1; goto end; } } - /* 'n7', 'n8' */ + // 'n7', 'n8' if (!BN_mod_add_quick(n1, n1, n3, p) || !BN_mod_add_quick(n2, n2, n4, p)) { goto end; } - /* 'n7' = n1 + n3 */ - /* 'n8' = n2 + n4 */ + // 'n7' = n1 + n3 + // 'n8' = n2 + n4 - /* Z_r */ + // Z_r if (a_Z_is_one && b_Z_is_one) { if (!BN_copy(&r->Z, n5)) { goto end; @@ -515,28 +514,28 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, } } - /* Z_r = Z_a * Z_b * n5 */ + // Z_r = Z_a * Z_b * n5 - /* X_r */ + // X_r if (!field_sqr(group, n0, n6, ctx) || !field_sqr(group, n4, n5, ctx) || !field_mul(group, n3, n1, n4, ctx) || !BN_mod_sub_quick(&r->X, n0, n3, p)) { goto end; } - /* X_r = n6^2 - n5^2 * 'n7' */ + // X_r = n6^2 - n5^2 * 'n7' - /* 'n9' */ + // 'n9' if (!BN_mod_lshift1_quick(n0, &r->X, p) || !BN_mod_sub_quick(n0, n3, n0, p)) { goto end; } - /* n9 = n5^2 * 'n7' - 2 * X_r */ + // n9 = n5^2 * 'n7' - 2 * X_r - /* Y_r */ + // Y_r if (!field_mul(group, n0, n0, n6, ctx) || !field_mul(group, n5, n4, n5, ctx)) { - goto end; /* now n5 is n5^3 */ + goto end; // now n5 is n5^3 } if (!field_mul(group, n1, n2, n5, ctx) || !BN_mod_sub_quick(n0, n0, n1, p)) { @@ -545,17 +544,17 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, if (BN_is_odd(n0) && !BN_add(n0, n0, p)) { goto end; } - /* now 0 <= n0 < 2*p, and n0 is even */ + // now 0 <= n0 < 2*p, and n0 is even if (!BN_rshift1(&r->Y, n0)) { goto end; } - /* Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2 */ + // Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2 ret = 1; end: if (ctx) { - /* otherwise we already called BN_CTX_end */ + // otherwise we already called BN_CTX_end BN_CTX_end(ctx); } BN_CTX_free(new_ctx); @@ -597,12 +596,11 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, goto err; } - /* Note that in this function we must not read components of 'a' - * once we have written the corresponding components of 'r'. - * ('r' might the same as 'a'.) - */ + // Note that in this function we must not read components of 'a' + // once we have written the corresponding components of 'r'. + // ('r' might the same as 'a'.) - /* n1 */ + // n1 if (BN_cmp(&a->Z, &group->one) == 0) { if (!field_sqr(group, n0, &a->X, ctx) || !BN_mod_lshift1_quick(n1, n0, p) || @@ -610,7 +608,7 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n0, &group->a, p)) { goto err; } - /* n1 = 3 * X_a^2 + a_curve */ + // n1 = 3 * X_a^2 + a_curve } else if (group->a_is_minus3) { if (!field_sqr(group, n1, &a->Z, ctx) || !BN_mod_add_quick(n0, &a->X, n1, p) || @@ -620,8 +618,8 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n0, n1, p)) { goto err; } - /* n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2) - * = 3 * X_a^2 - 3 * Z_a^4 */ + // n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2) + // = 3 * X_a^2 - 3 * Z_a^4 } else { if (!field_sqr(group, n0, &a->X, ctx) || !BN_mod_lshift1_quick(n1, n0, p) || @@ -632,10 +630,10 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n1, n0, p)) { goto err; } - /* n1 = 3 * X_a^2 + a_curve * Z_a^4 */ + // n1 = 3 * X_a^2 + a_curve * Z_a^4 } - /* Z_r */ + // Z_r if (BN_cmp(&a->Z, &group->one) == 0) { if (!BN_copy(n0, &a->Y)) { goto err; @@ -646,38 +644,38 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, if (!BN_mod_lshift1_quick(&r->Z, n0, p)) { goto err; } - /* Z_r = 2 * Y_a * Z_a */ + // Z_r = 2 * Y_a * Z_a - /* n2 */ + // n2 if (!field_sqr(group, n3, &a->Y, ctx) || !field_mul(group, n2, &a->X, n3, ctx) || !BN_mod_lshift_quick(n2, n2, 2, p)) { goto err; } - /* n2 = 4 * X_a * Y_a^2 */ + // n2 = 4 * X_a * Y_a^2 - /* X_r */ + // X_r if (!BN_mod_lshift1_quick(n0, n2, p) || !field_sqr(group, &r->X, n1, ctx) || !BN_mod_sub_quick(&r->X, &r->X, n0, p)) { goto err; } - /* X_r = n1^2 - 2 * n2 */ + // X_r = n1^2 - 2 * n2 - /* n3 */ + // n3 if (!field_sqr(group, n0, n3, ctx) || !BN_mod_lshift_quick(n3, n0, 3, p)) { goto err; } - /* n3 = 8 * Y_a^4 */ + // n3 = 8 * Y_a^4 - /* Y_r */ + // Y_r if (!BN_mod_sub_quick(n0, n2, &r->X, p) || !field_mul(group, n0, n1, n0, ctx) || !BN_mod_sub_quick(&r->Y, n0, n3, p)) { goto err; } - /* Y_r = n1 * (n2 - X_r) - n3 */ + // Y_r = n1 * (n2 - X_r) - n3 ret = 1; @@ -689,7 +687,7 @@ err: int ec_GFp_simple_invert(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) { if (EC_POINT_is_at_infinity(group, point) || BN_is_zero(&point->Y)) { - /* point is its own inverse */ + // point is its own inverse return 1; } @@ -734,17 +732,16 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, goto err; } - /* We have a curve defined by a Weierstrass equation - * y^2 = x^3 + a*x + b. - * The point to consider is given in Jacobian projective coordinates - * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3). - * Substituting this and multiplying by Z^6 transforms the above equation - * into - * Y^2 = X^3 + a*X*Z^4 + b*Z^6. - * To test this, we add up the right-hand side in 'rh'. - */ + // We have a curve defined by a Weierstrass equation + // y^2 = x^3 + a*x + b. + // The point to consider is given in Jacobian projective coordinates + // where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3). + // Substituting this and multiplying by Z^6 transforms the above equation + // into + // Y^2 = X^3 + a*X*Z^4 + b*Z^6. + // To test this, we add up the right-hand side in 'rh'. - /* rh := X^2 */ + // rh := X^2 if (!field_sqr(group, rh, &point->X, ctx)) { goto err; } @@ -756,7 +753,7 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, goto err; } - /* rh := (rh + a*Z^4)*X */ + // rh := (rh + a*Z^4)*X if (group->a_is_minus3) { if (!BN_mod_lshift1_quick(tmp, Z4, p) || !BN_mod_add_quick(tmp, tmp, Z4, p) || @@ -772,24 +769,24 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, } } - /* rh := rh + b*Z^6 */ + // rh := rh + b*Z^6 if (!field_mul(group, tmp, &group->b, Z6, ctx) || !BN_mod_add_quick(rh, rh, tmp, p)) { goto err; } } else { - /* rh := (rh + a)*X */ + // rh := (rh + a)*X if (!BN_mod_add_quick(rh, rh, &group->a, p) || !field_mul(group, rh, rh, &point->X, ctx)) { goto err; } - /* rh := rh + b */ + // rh := rh + b if (!BN_mod_add_quick(rh, rh, &group->b, p)) { goto err; } } - /* 'lh' := Y^2 */ + // 'lh' := Y^2 if (!field_sqr(group, tmp, &point->Y, ctx)) { goto err; } @@ -804,11 +801,10 @@ err: int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx) { - /* return values: - * -1 error - * 0 equal (in affine coordinates) - * 1 not equal - */ + // return values: + // -1 error + // 0 equal (in affine coordinates) + // 1 not equal int (*field_mul)(const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); @@ -852,11 +848,10 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, goto end; } - /* We have to decide whether - * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3), - * or equivalently, whether - * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3). - */ + // We have to decide whether + // (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3), + // or equivalently, whether + // (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3). if (!b_Z_is_one) { if (!field_sqr(group, Zb23, &b->Z, ctx) || @@ -877,9 +872,9 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, tmp2_ = &b->X; } - /* compare X_a*Z_b^2 with X_b*Z_a^2 */ + // compare X_a*Z_b^2 with X_b*Z_a^2 if (BN_cmp(tmp1_, tmp2_) != 0) { - ret = 1; /* points differ */ + ret = 1; // points differ goto end; } @@ -889,7 +884,7 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, !field_mul(group, tmp1, &a->Y, Zb23, ctx)) { goto end; } - /* tmp1_ = tmp1 */ + // tmp1_ = tmp1 } else { tmp1_ = &a->Y; } @@ -898,18 +893,18 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, !field_mul(group, tmp2, &b->Y, Za23, ctx)) { goto end; } - /* tmp2_ = tmp2 */ + // tmp2_ = tmp2 } else { tmp2_ = &b->Y; } - /* compare Y_a*Z_b^3 with Y_b*Z_a^3 */ + // compare Y_a*Z_b^3 with Y_b*Z_a^3 if (BN_cmp(tmp1_, tmp2_) != 0) { - ret = 1; /* points differ */ + ret = 1; // points differ goto end; } - /* points are equal */ + // points are equal ret = 0; end: @@ -997,8 +992,8 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } } - /* Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z, - * skipping any zero-valued inputs (pretend that they're 1). */ + // Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z, + // skipping any zero-valued inputs (pretend that they're 1). if (!BN_is_zero(&points[0]->Z)) { if (!BN_copy(prod_Z[0], &points[0]->Z)) { @@ -1023,13 +1018,13 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } } - /* Now use a single explicit inversion to replace every non-zero points[i]->Z - * by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant- - * time inversion using Fermat's Little Theorem because this function is - * usually only used for converting multiples of a public key point to - * affine, and a public key point isn't secret. If we were to use Fermat's - * Little Theorem then the cost of the inversion would usually be so high - * that converting the multiples to affine would be counterproductive. */ + // Now use a single explicit inversion to replace every non-zero points[i]->Z + // by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant- + // time inversion using Fermat's Little Theorem because this function is + // usually only used for converting multiples of a public key point to + // affine, and a public key point isn't secret. If we were to use Fermat's + // Little Theorem then the cost of the inversion would usually be so high + // that converting the multiples to affine would be counterproductive. int no_inverse; if (!BN_mod_inverse_odd(tmp, &no_inverse, prod_Z[num - 1], &group->field, ctx)) { @@ -1038,9 +1033,9 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } if (group->meth->field_encode != NULL) { - /* In the Montgomery case, we just turned R*H (representing H) - * into 1/(R*H), but we need R*(1/H) (representing 1/H); - * i.e. we need to multiply by the Montgomery factor twice. */ + // In the Montgomery case, we just turned R*H (representing H) + // into 1/(R*H), but we need R*(1/H) (representing 1/H); + // i.e. we need to multiply by the Montgomery factor twice. if (!group->meth->field_encode(group, tmp, tmp, ctx) || !group->meth->field_encode(group, tmp, tmp, ctx)) { goto err; @@ -1048,34 +1043,34 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } for (size_t i = num - 1; i > 0; --i) { - /* Loop invariant: tmp is the product of the inverses of - * points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */ + // Loop invariant: tmp is the product of the inverses of + // points[0]->Z .. points[i]->Z (zero-valued inputs skipped). if (BN_is_zero(&points[i]->Z)) { continue; } - /* Set tmp_Z to the inverse of points[i]->Z (as product - * of Z inverses 0 .. i, Z values 0 .. i - 1). */ + // Set tmp_Z to the inverse of points[i]->Z (as product + // of Z inverses 0 .. i, Z values 0 .. i - 1). if (!group->meth->field_mul(group, tmp_Z, prod_Z[i - 1], tmp, ctx) || - /* Update tmp to satisfy the loop invariant for i - 1. */ + // Update tmp to satisfy the loop invariant for i - 1. !group->meth->field_mul(group, tmp, tmp, &points[i]->Z, ctx) || - /* Replace points[i]->Z by its inverse. */ + // Replace points[i]->Z by its inverse. !BN_copy(&points[i]->Z, tmp_Z)) { goto err; } } - /* Replace points[0]->Z by its inverse. */ + // Replace points[0]->Z by its inverse. if (!BN_is_zero(&points[0]->Z) && !BN_copy(&points[0]->Z, tmp)) { goto err; } - /* Finally, fix up the X and Y coordinates for all points. */ + // Finally, fix up the X and Y coordinates for all points. for (size_t i = 0; i < num; i++) { EC_POINT *p = points[i]; if (!BN_is_zero(&p->Z)) { - /* turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1). */ + // turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1). if (!group->meth->field_sqr(group, tmp, &p->Z, ctx) || !group->meth->field_mul(group, &p->X, &p->X, tmp, ctx) || !group->meth->field_mul(group, tmp, tmp, &p->Z, ctx) || diff --git a/crypto/fipsmodule/ec/util-64.c b/crypto/fipsmodule/ec/util-64.c index 40062712..0cb117b4 100644 --- a/crypto/fipsmodule/ec/util-64.c +++ b/crypto/fipsmodule/ec/util-64.c @@ -21,77 +21,77 @@ #include "internal.h" -/* This function looks at 5+1 scalar bits (5 current, 1 adjacent less - * significant bit), and recodes them into a signed digit for use in fast point - * multiplication: the use of signed rather than unsigned digits means that - * fewer points need to be precomputed, given that point inversion is easy (a - * precomputed point dP makes -dP available as well). - * - * BACKGROUND: - * - * Signed digits for multiplication were introduced by Booth ("A signed binary - * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, - * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. - * Booth's original encoding did not generally improve the density of nonzero - * digits over the binary representation, and was merely meant to simplify the - * handling of signed factors given in two's complement; but it has since been - * shown to be the basis of various signed-digit representations that do have - * further advantages, including the wNAF, using the following general - * approach: - * - * (1) Given a binary representation - * - * b_k ... b_2 b_1 b_0, - * - * of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 - * by using bit-wise subtraction as follows: - * - * b_k b_(k-1) ... b_2 b_1 b_0 - * - b_k ... b_3 b_2 b_1 b_0 - * ------------------------------------- - * s_k b_(k-1) ... s_3 s_2 s_1 s_0 - * - * A left-shift followed by subtraction of the original value yields a new - * representation of the same value, using signed bits s_i = b_(i+1) - b_i. - * This representation from Booth's paper has since appeared in the - * literature under a variety of different names including "reversed binary - * form", "alternating greedy expansion", "mutual opposite form", and - * "sign-alternating {+-1}-representation". - * - * An interesting property is that among the nonzero bits, values 1 and -1 - * strictly alternate. - * - * (2) Various window schemes can be applied to the Booth representation of - * integers: for example, right-to-left sliding windows yield the wNAF - * (a signed-digit encoding independently discovered by various researchers - * in the 1990s), and left-to-right sliding windows yield a left-to-right - * equivalent of the wNAF (independently discovered by various researchers - * around 2004). - * - * To prevent leaking information through side channels in point multiplication, - * we need to recode the given integer into a regular pattern: sliding windows - * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few - * decades older: we'll be using the so-called "modified Booth encoding" due to - * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 - * (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five - * signed bits into a signed digit: - * - * s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j) - * - * The sign-alternating property implies that the resulting digit values are - * integers from -16 to 16. - * - * Of course, we don't actually need to compute the signed digits s_i as an - * intermediate step (that's just a nice way to see how this scheme relates - * to the wNAF): a direct computation obtains the recoded digit from the - * six bits b_(4j + 4) ... b_(4j - 1). - * - * This function takes those five bits as an integer (0 .. 63), writing the - * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute - * value, in the range 0 .. 8). Note that this integer essentially provides the - * input bits "shifted to the left" by one position: for example, the input to - * compute the least significant recoded digit, given that there's no bit b_-1, - * has to be b_4 b_3 b_2 b_1 b_0 0. */ +// This function looks at 5+1 scalar bits (5 current, 1 adjacent less +// significant bit), and recodes them into a signed digit for use in fast point +// multiplication: the use of signed rather than unsigned digits means that +// fewer points need to be precomputed, given that point inversion is easy (a +// precomputed point dP makes -dP available as well). +// +// BACKGROUND: +// +// Signed digits for multiplication were introduced by Booth ("A signed binary +// multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, +// pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. +// Booth's original encoding did not generally improve the density of nonzero +// digits over the binary representation, and was merely meant to simplify the +// handling of signed factors given in two's complement; but it has since been +// shown to be the basis of various signed-digit representations that do have +// further advantages, including the wNAF, using the following general +// approach: +// +// (1) Given a binary representation +// +// b_k ... b_2 b_1 b_0, +// +// of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 +// by using bit-wise subtraction as follows: +// +// b_k b_(k-1) ... b_2 b_1 b_0 +// - b_k ... b_3 b_2 b_1 b_0 +// ------------------------------------- +// s_k b_(k-1) ... s_3 s_2 s_1 s_0 +// +// A left-shift followed by subtraction of the original value yields a new +// representation of the same value, using signed bits s_i = b_(i+1) - b_i. +// This representation from Booth's paper has since appeared in the +// literature under a variety of different names including "reversed binary +// form", "alternating greedy expansion", "mutual opposite form", and +// "sign-alternating {+-1}-representation". +// +// An interesting property is that among the nonzero bits, values 1 and -1 +// strictly alternate. +// +// (2) Various window schemes can be applied to the Booth representation of +// integers: for example, right-to-left sliding windows yield the wNAF +// (a signed-digit encoding independently discovered by various researchers +// in the 1990s), and left-to-right sliding windows yield a left-to-right +// equivalent of the wNAF (independently discovered by various researchers +// around 2004). +// +// To prevent leaking information through side channels in point multiplication, +// we need to recode the given integer into a regular pattern: sliding windows +// as in wNAFs won't do, we need their fixed-window equivalent -- which is a few +// decades older: we'll be using the so-called "modified Booth encoding" due to +// MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 +// (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five +// signed bits into a signed digit: +// +// s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j) +// +// The sign-alternating property implies that the resulting digit values are +// integers from -16 to 16. +// +// Of course, we don't actually need to compute the signed digits s_i as an +// intermediate step (that's just a nice way to see how this scheme relates +// to the wNAF): a direct computation obtains the recoded digit from the +// six bits b_(4j + 4) ... b_(4j - 1). +// +// This function takes those five bits as an integer (0 .. 63), writing the +// recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute +// value, in the range 0 .. 8). Note that this integer essentially provides the +// input bits "shifted to the left" by one position: for example, the input to +// compute the least significant recoded digit, given that there's no bit b_-1, +// has to be b_4 b_3 b_2 b_1 b_0 0. void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, uint8_t in) { uint8_t s, d; @@ -106,4 +106,4 @@ void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, *digit = d; } -#endif /* 64_BIT && !WINDOWS */ +#endif // 64_BIT && !WINDOWS diff --git a/crypto/fipsmodule/ec/wnaf.c b/crypto/fipsmodule/ec/wnaf.c index f009469f..0e3ee13a 100644 --- a/crypto/fipsmodule/ec/wnaf.c +++ b/crypto/fipsmodule/ec/wnaf.c @@ -78,19 +78,18 @@ #include "../../internal.h" -/* This file implements the wNAF-based interleaving multi-exponentiation method - * at: - * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13 - * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf */ - -/* Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'. - * This is an array r[] of values that are either zero or odd with an - * absolute value less than 2^w satisfying - * scalar = \sum_j r[j]*2^j - * where at most one of any w+1 consecutive digits is non-zero - * with the exception that the most significant digit may be only - * w-1 zeros away from that next non-zero digit. - */ +// This file implements the wNAF-based interleaving multi-exponentiation method +// at: +// http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13 +// http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf + +// Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'. +// This is an array r[] of values that are either zero or odd with an +// absolute value less than 2^w satisfying +// scalar = \sum_j r[j]*2^j +// where at most one of any w+1 consecutive digits is non-zero +// with the exception that the most significant digit may be only +// w-1 zeros away from that next non-zero digit. static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { int window_val; int ok = 0; @@ -110,14 +109,14 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { return r; } - /* 'int8_t' can represent integers with absolute values less than 2^7. */ + // 'int8_t' can represent integers with absolute values less than 2^7. if (w <= 0 || w > 7) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; } - bit = 1 << w; /* at most 128 */ - next_bit = bit << 1; /* at most 256 */ - mask = next_bit - 1; /* at most 255 */ + bit = 1 << w; // at most 128 + next_bit = bit << 1; // at most 256 + mask = next_bit - 1; // at most 255 if (BN_is_negative(scalar)) { sign = -1; @@ -129,9 +128,9 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { } len = BN_num_bits(scalar); - /* The modified wNAF may be one digit longer than binary representation - * (*ret_len will be set to the actual length, i.e. at most - * BN_num_bits(scalar) + 1). */ + // The modified wNAF may be one digit longer than binary representation + // (*ret_len will be set to the actual length, i.e. at most + // BN_num_bits(scalar) + 1). r = OPENSSL_malloc(len + 1); if (r == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); @@ -139,30 +138,30 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { } window_val = scalar->d[0] & mask; j = 0; - /* If j+w+1 >= len, window_val will not increase. */ + // If j+w+1 >= len, window_val will not increase. while (window_val != 0 || j + w + 1 < len) { int digit = 0; - /* 0 <= window_val <= 2^(w+1) */ + // 0 <= window_val <= 2^(w+1) if (window_val & 1) { - /* 0 < window_val < 2^(w+1) */ + // 0 < window_val < 2^(w+1) if (window_val & bit) { - digit = window_val - next_bit; /* -2^w < digit < 0 */ + digit = window_val - next_bit; // -2^w < digit < 0 -#if 1 /* modified wNAF */ +#if 1 // modified wNAF if (j + w + 1 >= len) { - /* special case for generating modified wNAFs: - * no new bits will be added into window_val, - * so using a positive digit here will decrease - * the total length of the representation */ + // special case for generating modified wNAFs: + // no new bits will be added into window_val, + // so using a positive digit here will decrease + // the total length of the representation - digit = window_val & (mask >> 1); /* 0 < digit < 2^w */ + digit = window_val & (mask >> 1); // 0 < digit < 2^w } #endif } else { - digit = window_val; /* 0 < digit < 2^w */ + digit = window_val; // 0 < digit < 2^w } if (digit <= -bit || digit >= bit || !(digit & 1)) { @@ -172,8 +171,8 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { window_val -= digit; - /* Now window_val is 0 or 2^(w+1) in standard wNAF generation; - * for modified window NAFs, it may also be 2^w. */ + // Now window_val is 0 or 2^(w+1) in standard wNAF generation; + // for modified window NAFs, it may also be 2^w. if (window_val != 0 && window_val != next_bit && window_val != bit) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; @@ -210,10 +209,9 @@ err: } -/* TODO: table should be optimised for the wNAF-based implementation, - * sometimes smaller windows will give better performance - * (thus the boundaries should be increased) - */ +// TODO: table should be optimised for the wNAF-based implementation, +// sometimes smaller windows will give better performance +// (thus the boundaries should be increased) static size_t window_bits_for_scalar_size(size_t b) { if (b >= 2000) { return 6; @@ -248,14 +246,14 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, int k; int r_is_inverted = 0; int r_is_at_infinity = 1; - size_t *wsize = NULL; /* individual window sizes */ - int8_t **wNAF = NULL; /* individual wNAFs */ + size_t *wsize = NULL; // individual window sizes + int8_t **wNAF = NULL; // individual wNAFs size_t *wNAF_len = NULL; size_t max_len = 0; size_t num_val = 0; - EC_POINT **val = NULL; /* precomputation */ + EC_POINT **val = NULL; // precomputation EC_POINT **v; - EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' */ + EC_POINT ***val_sub = NULL; // pointers to sub-arrays of 'val' int ret = 0; if (ctx == NULL) { @@ -265,9 +263,9 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } } - /* TODO: This function used to take |points| and |scalars| as arrays of - * |num| elements. The code below should be simplified to work in terms of |p| - * and |p_scalar|. */ + // TODO: This function used to take |points| and |scalars| as arrays of + // |num| elements. The code below should be simplified to work in terms of |p| + // and |p_scalar|. size_t num = p != NULL ? 1 : 0; const EC_POINT **points = p != NULL ? &p : NULL; const BIGNUM **scalars = p != NULL ? &p_scalar : NULL; @@ -281,7 +279,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - ++total_num; /* treat 'g_scalar' like 'num'-th element of 'scalars' */ + ++total_num; // treat 'g_scalar' like 'num'-th element of 'scalars' } @@ -290,7 +288,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, wNAF = OPENSSL_malloc(total_num * sizeof(wNAF[0])); val_sub = OPENSSL_malloc(total_num * sizeof(val_sub[0])); - /* Ensure wNAF is initialised in case we end up going to err. */ + // Ensure wNAF is initialised in case we end up going to err. if (wNAF != NULL) { OPENSSL_memset(wNAF, 0, total_num * sizeof(wNAF[0])); } @@ -300,7 +298,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - /* num_val will be the total number of temporarily precomputed points */ + // num_val will be the total number of temporarily precomputed points num_val = 0; for (i = 0; i < total_num; i++) { @@ -319,8 +317,8 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } } - /* All points we precompute now go into a single array 'val'. 'val_sub[i]' is - * a pointer to the subarray for the i-th point. */ + // All points we precompute now go into a single array 'val'. 'val_sub[i]' is + // a pointer to the subarray for the i-th point. val = OPENSSL_malloc(num_val * sizeof(val[0])); if (val == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); @@ -328,7 +326,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } OPENSSL_memset(val, 0, num_val * sizeof(val[0])); - /* allocate points for precomputation */ + // allocate points for precomputation v = val; for (i = 0; i < total_num; i++) { val_sub[i] = v; @@ -349,12 +347,11 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - /* prepare precomputed values: - * val_sub[i][0] := points[i] - * val_sub[i][1] := 3 * points[i] - * val_sub[i][2] := 5 * points[i] - * ... - */ + // prepare precomputed values: + // val_sub[i][0] := points[i] + // val_sub[i][1] := 3 * points[i] + // val_sub[i][2] := 5 * points[i] + // ... for (i = 0; i < total_num; i++) { if (i < num) { if (!EC_POINT_copy(val_sub[i][0], points[i])) { @@ -376,7 +373,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } } -#if 1 /* optional; window_bits_for_scalar_size assumes we do this step */ +#if 1 // optional; window_bits_for_scalar_size assumes we do this step if (!EC_POINTs_make_affine(group, num_val, val, ctx)) { goto err; } @@ -408,7 +405,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, r_is_inverted = !r_is_inverted; } - /* digit > 0 */ + // digit > 0 if (r_is_at_infinity) { if (!EC_POINT_copy(r, val_sub[i][digit >> 1])) { diff --git a/crypto/fipsmodule/ecdsa/ecdsa.c b/crypto/fipsmodule/ecdsa/ecdsa.c index c061ab82..dfa3b67d 100644 --- a/crypto/fipsmodule/ecdsa/ecdsa.c +++ b/crypto/fipsmodule/ecdsa/ecdsa.c @@ -64,16 +64,16 @@ #include "../../internal.h" -/* digest_to_bn interprets |digest_len| bytes from |digest| as a big-endian - * number and sets |out| to that value. It then truncates |out| so that it's, - * at most, as long as |order|. It returns one on success and zero otherwise. */ +// digest_to_bn interprets |digest_len| bytes from |digest| as a big-endian +// number and sets |out| to that value. It then truncates |out| so that it's, +// at most, as long as |order|. It returns one on success and zero otherwise. static int digest_to_bn(BIGNUM *out, const uint8_t *digest, size_t digest_len, const BIGNUM *order) { size_t num_bits; num_bits = BN_num_bits(order); - /* Need to truncate digest if it is too long: first truncate whole - * bytes. */ + // Need to truncate digest if it is too long: first truncate whole + // bytes. if (8 * digest_len > num_bits) { digest_len = (num_bits + 7) / 8; } @@ -82,7 +82,7 @@ static int digest_to_bn(BIGNUM *out, const uint8_t *digest, size_t digest_len, return 0; } - /* If still too long truncate remaining bits with a shift */ + // If still too long truncate remaining bits with a shift if ((8 * digest_len > num_bits) && !BN_rshift(out, out, 8 - (num_bits & 0x7))) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); @@ -130,7 +130,7 @@ int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, const EC_GROUP *group; const EC_POINT *pub_key; - /* check input values */ + // check input values if ((group = EC_KEY_get0_group(eckey)) == NULL || (pub_key = EC_KEY_get0_public_key(eckey)) == NULL || sig == NULL) { @@ -160,7 +160,7 @@ int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); goto err; } - /* calculate tmp1 = inv(S) mod order */ + // calculate tmp1 = inv(S) mod order int no_inverse; if (!BN_mod_inverse_odd(u2, &no_inverse, sig->s, order, ctx)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); @@ -169,12 +169,12 @@ int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, if (!digest_to_bn(m, digest, digest_len, order)) { goto err; } - /* u1 = m * tmp mod order */ + // u1 = m * tmp mod order if (!BN_mod_mul(u1, m, u2, order, ctx)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); goto err; } - /* u2 = r * w mod q */ + // u2 = r * w mod q if (!BN_mod_mul(u2, sig->r, u2, order, ctx)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); goto err; @@ -197,7 +197,7 @@ int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); goto err; } - /* if the signature is correct u1 is equal to sig->r */ + // if the signature is correct u1 is equal to sig->r if (BN_ucmp(u1, sig->r) != 0) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); goto err; @@ -236,8 +236,8 @@ static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, } k = BN_new(); - kinv = BN_new(); /* this value is later returned in *kinvp */ - r = BN_new(); /* this value is later returned in *rp */ + kinv = BN_new(); // this value is later returned in *kinvp + r = BN_new(); // this value is later returned in *rp tmp = BN_new(); if (k == NULL || kinv == NULL || r == NULL || tmp == NULL) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); @@ -251,17 +251,17 @@ static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, const BIGNUM *order = EC_GROUP_get0_order(group); - /* Check that the size of the group order is FIPS compliant (FIPS 186-4 - * B.5.2). */ + // Check that the size of the group order is FIPS compliant (FIPS 186-4 + // B.5.2). if (BN_num_bits(order) < 160) { OPENSSL_PUT_ERROR(ECDSA, EC_R_INVALID_GROUP_ORDER); goto err; } do { - /* If possible, we'll include the private key and message digest in the k - * generation. The |digest| argument is only empty if |ECDSA_sign_setup| is - * being used. */ + // If possible, we'll include the private key and message digest in the k + // generation. The |digest| argument is only empty if |ECDSA_sign_setup| is + // being used. if (eckey->fixed_k != NULL) { if (!BN_copy(k, eckey->fixed_k)) { goto err; @@ -279,18 +279,18 @@ static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, goto err; } - /* Compute the inverse of k. The order is a prime, so use Fermat's Little - * Theorem. Note |ec_group_get_order_mont| may return NULL but - * |bn_mod_inverse_prime| allows this. */ + // Compute the inverse of k. The order is a prime, so use Fermat's Little + // Theorem. Note |ec_group_get_order_mont| may return NULL but + // |bn_mod_inverse_prime| allows this. if (!bn_mod_inverse_prime(kinv, k, order, ctx, ec_group_get_order_mont(group))) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); goto err; } - /* We do not want timing information to leak the length of k, - * so we compute G*k using an equivalent scalar of fixed - * bit-length. */ + // We do not want timing information to leak the length of k, + // so we compute G*k using an equivalent scalar of fixed + // bit-length. if (!BN_add(k, k, order)) { goto err; @@ -301,7 +301,7 @@ static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, } } - /* compute r the x-coordinate of generator * k */ + // compute r the x-coordinate of generator * k if (!EC_POINT_mul(group, tmp_point, k, NULL, NULL, ctx)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); goto err; @@ -318,11 +318,11 @@ static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, } } while (BN_is_zero(r)); - /* clear old values if necessary */ + // clear old values if necessary BN_clear_free(*rp); BN_clear_free(*kinvp); - /* save the pre-computed values */ + // save the pre-computed values *rp = r; *kinvp = kinv; ret = 1; @@ -417,14 +417,14 @@ ECDSA_SIG *ECDSA_do_sign_ex(const uint8_t *digest, size_t digest_len, goto err; } if (BN_is_zero(s)) { - /* if kinv and r have been supplied by the caller - * don't to generate new kinv and r values */ + // if kinv and r have been supplied by the caller + // don't to generate new kinv and r values if (in_kinv != NULL && in_r != NULL) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NEED_NEW_SETUP_VALUES); goto err; } } else { - /* s != 0 => we have a valid signature */ + // s != 0 => we have a valid signature break; } } diff --git a/crypto/fipsmodule/ecdsa/ecdsa_test.cc b/crypto/fipsmodule/ecdsa/ecdsa_test.cc index e1f109bb..de4bc48a 100644 --- a/crypto/fipsmodule/ecdsa/ecdsa_test.cc +++ b/crypto/fipsmodule/ecdsa/ecdsa_test.cc @@ -242,13 +242,13 @@ TEST(ECDSATest, MaxSigLen) { SCOPED_TRACE(bits); size_t order_len = BitsToBytes(bits); - /* Create the largest possible |ECDSA_SIG| of the given constraints. */ + // Create the largest possible |ECDSA_SIG| of the given constraints. bssl::UniquePtr sig(ECDSA_SIG_new()); ASSERT_TRUE(sig); std::vector bytes(order_len, 0xff); ASSERT_TRUE(BN_bin2bn(bytes.data(), bytes.size(), sig->r)); ASSERT_TRUE(BN_bin2bn(bytes.data(), bytes.size(), sig->s)); - /* Serialize it. */ + // Serialize it. uint8_t *der; size_t der_len; ASSERT_TRUE(ECDSA_SIG_to_bytes(&der, &der_len, sig.get())); diff --git a/crypto/fipsmodule/hmac/hmac.c b/crypto/fipsmodule/hmac/hmac.c index 32923505..5c098dba 100644 --- a/crypto/fipsmodule/hmac/hmac.c +++ b/crypto/fipsmodule/hmac/hmac.c @@ -100,13 +100,13 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, md = ctx->md; } - /* If either |key| is non-NULL or |md| has changed, initialize with a new key - * rather than rewinding the previous one. - * - * TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is - * ambiguous between using the empty key and reusing the previous key. There - * exist callers which intend the latter, but the former is an awkward edge - * case. Fix to API to avoid this. */ + // If either |key| is non-NULL or |md| has changed, initialize with a new key + // rather than rewinding the previous one. + // + // TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is + // ambiguous between using the empty key and reusing the previous key. There + // exist callers which intend the latter, but the former is an awkward edge + // case. Fix to API to avoid this. if (md != ctx->md || key != NULL) { uint8_t pad[EVP_MAX_MD_BLOCK_SIZE]; uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE]; @@ -115,7 +115,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, size_t block_size = EVP_MD_block_size(md); assert(block_size <= sizeof(key_block)); if (block_size < key_len) { - /* Long keys are hashed. */ + // Long keys are hashed. if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) || !EVP_DigestUpdate(&ctx->md_ctx, key, key_len) || !EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) { @@ -126,7 +126,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, OPENSSL_memcpy(key_block, key, key_len); key_block_len = (unsigned)key_len; } - /* Keys are then padded with zeros. */ + // Keys are then padded with zeros. if (key_block_len != EVP_MAX_MD_BLOCK_SIZE) { OPENSSL_memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len); } @@ -165,8 +165,8 @@ int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len) { unsigned int i; uint8_t buf[EVP_MAX_MD_SIZE]; - /* TODO(davidben): The only thing that can officially fail here is - * |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. */ + // TODO(davidben): The only thing that can officially fail here is + // |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. if (!EVP_DigestFinal_ex(&ctx->md_ctx, buf, &i) || !EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->o_ctx) || !EVP_DigestUpdate(&ctx->md_ctx, buf, i) || diff --git a/crypto/fipsmodule/is_fips.c b/crypto/fipsmodule/is_fips.c index bff1a058..4182dfb7 100644 --- a/crypto/fipsmodule/is_fips.c +++ b/crypto/fipsmodule/is_fips.c @@ -15,8 +15,8 @@ #include -/* This file exists in order to give the fipsmodule target, in non-FIPS mode, - * something to compile. */ +// This file exists in order to give the fipsmodule target, in non-FIPS mode, +// something to compile. int FIPS_mode(void) { #if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN) diff --git a/crypto/fipsmodule/md4/md4.c b/crypto/fipsmodule/md4/md4.c index 3028c8b1..f0c1dcdf 100644 --- a/crypto/fipsmodule/md4/md4.c +++ b/crypto/fipsmodule/md4/md4.c @@ -71,7 +71,7 @@ uint8_t *MD4(const uint8_t *data, size_t len, uint8_t *out) { return out; } -/* Implemented from RFC1186 The MD4 Message-Digest Algorithm. */ +// Implemented from RFC1186 The MD4 Message-Digest Algorithm. int MD4_Init(MD4_CTX *md4) { OPENSSL_memset(md4, 0, sizeof(MD4_CTX)); @@ -107,9 +107,9 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #include "../digest/md32_common.h" -/* As pointed out by Wei Dai , the above can be - * simplified to the code below. Wei attributes these optimizations - * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. */ +// As pointed out by Wei Dai , the above can be +// simplified to the code below. Wei attributes these optimizations +// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) #define H(b, c, d) ((b) ^ (c) ^ (d)) @@ -148,7 +148,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X0 = l; HOST_c2l(data, l); X1 = l; - /* Round 0 */ + // Round 0 R0(A, B, C, D, X0, 3, 0); HOST_c2l(data, l); X2 = l; @@ -193,7 +193,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X15 = l; R0(C, D, A, B, X14, 11, 0); R0(B, C, D, A, X15, 19, 0); - /* Round 1 */ + // Round 1 R1(A, B, C, D, X0, 3, 0x5A827999L); R1(D, A, B, C, X4, 5, 0x5A827999L); R1(C, D, A, B, X8, 9, 0x5A827999L); @@ -210,7 +210,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R1(D, A, B, C, X7, 5, 0x5A827999L); R1(C, D, A, B, X11, 9, 0x5A827999L); R1(B, C, D, A, X15, 13, 0x5A827999L); - /* Round 2 */ + // Round 2 R2(A, B, C, D, X0, 3, 0x6ED9EBA1L); R2(D, A, B, C, X8, 9, 0x6ED9EBA1L); R2(C, D, A, B, X4, 11, 0x6ED9EBA1L); diff --git a/crypto/fipsmodule/md5/md5.c b/crypto/fipsmodule/md5/md5.c index 15a0f53c..32429da3 100644 --- a/crypto/fipsmodule/md5/md5.c +++ b/crypto/fipsmodule/md5/md5.c @@ -113,10 +113,9 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #include "../digest/md32_common.h" -/* As pointed out by Wei Dai , the above can be - * simplified to the code below. Wei attributes these optimizations - * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. - */ +// As pointed out by Wei Dai , the above can be +// simplified to the code below. Wei attributes these optimizations +// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) ((((b) ^ (c)) & (d)) ^ (c)) #define H(b, c, d) ((b) ^ (c) ^ (d)) @@ -172,7 +171,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X(0) = l; HOST_c2l(data, l); X(1) = l; - /* Round 0 */ + // Round 0 R0(A, B, C, D, X(0), 7, 0xd76aa478L); HOST_c2l(data, l); X(2) = l; @@ -217,7 +216,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X(15) = l; R0(C, D, A, B, X(14), 17, 0xa679438eL); R0(B, C, D, A, X(15), 22, 0x49b40821L); - /* Round 1 */ + // Round 1 R1(A, B, C, D, X(1), 5, 0xf61e2562L); R1(D, A, B, C, X(6), 9, 0xc040b340L); R1(C, D, A, B, X(11), 14, 0x265e5a51L); @@ -234,7 +233,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R1(D, A, B, C, X(2), 9, 0xfcefa3f8L); R1(C, D, A, B, X(7), 14, 0x676f02d9L); R1(B, C, D, A, X(12), 20, 0x8d2a4c8aL); - /* Round 2 */ + // Round 2 R2(A, B, C, D, X(5), 4, 0xfffa3942L); R2(D, A, B, C, X(8), 11, 0x8771f681L); R2(C, D, A, B, X(11), 16, 0x6d9d6122L); @@ -251,7 +250,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R2(D, A, B, C, X(12), 11, 0xe6db99e5L); R2(C, D, A, B, X(15), 16, 0x1fa27cf8L); R2(B, C, D, A, X(2), 23, 0xc4ac5665L); - /* Round 3 */ + // Round 3 R3(A, B, C, D, X(0), 6, 0xf4292244L); R3(D, A, B, C, X(7), 10, 0x432aff97L); R3(C, D, A, B, X(14), 15, 0xab9423a7L); diff --git a/crypto/fipsmodule/modes/cbc.c b/crypto/fipsmodule/modes/cbc.c index 12d551ce..4b3bdb8a 100644 --- a/crypto/fipsmodule/modes/cbc.c +++ b/crypto/fipsmodule/modes/cbc.c @@ -120,12 +120,12 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const uintptr_t inptr = (uintptr_t) in; const uintptr_t outptr = (uintptr_t) out; - /* If |in| and |out| alias, |in| must be ahead. */ + // If |in| and |out| alias, |in| must be ahead. assert(inptr >= outptr || inptr + len <= outptr); if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) { - /* If |out| is at least two blocks behind |in| or completely disjoint, there - * is no need to decrypt to a temporary block. */ + // If |out| is at least two blocks behind |in| or completely disjoint, there + // is no need to decrypt to a temporary block. const uint8_t *iv = ivec; if (STRICT_ALIGNMENT && @@ -140,7 +140,7 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, in += 16; out += 16; } - } else if (16 % sizeof(size_t) == 0) { /* always true */ + } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv; @@ -156,9 +156,9 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, } OPENSSL_memcpy(ivec, iv, 16); } else { - /* |out| is less than two blocks behind |in|. Decrypting an input block - * directly to |out| would overwrite a ciphertext block before it is used as - * the next block's IV. Decrypt to a temporary block instead. */ + // |out| is less than two blocks behind |in|. Decrypting an input block + // directly to |out| would overwrite a ciphertext block before it is used as + // the next block's IV. Decrypt to a temporary block instead. if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { uint8_t c; @@ -173,7 +173,7 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, in += 16; out += 16; } - } else if (16 % sizeof(size_t) == 0) { /* always true */ + } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec; const size_t *in_t = (const size_t *)in; diff --git a/crypto/fipsmodule/modes/cfb.c b/crypto/fipsmodule/modes/cfb.c index 836eb3f7..2775d195 100644 --- a/crypto/fipsmodule/modes/cfb.c +++ b/crypto/fipsmodule/modes/cfb.c @@ -166,23 +166,23 @@ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits, return; } - /* fill in the first half of the new IV with the current IV */ + // fill in the first half of the new IV with the current IV OPENSSL_memcpy(ovec, ivec, 16); - /* construct the new IV */ + // construct the new IV (*block)(ivec, ivec, key); num = (nbits + 7) / 8; if (enc) { - /* encrypt the input */ + // encrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n] ^ ivec[n]); } } else { - /* decrypt the input */ + // decrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n]) ^ ivec[n]; } } - /* shift ovec left... */ + // shift ovec left... rem = nbits % 8; num = nbits / 8; if (rem == 0) { @@ -193,10 +193,10 @@ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits, } } - /* it is not necessary to cleanse ovec, since the IV is not secret */ + // it is not necessary to cleanse ovec, since the IV is not secret } -/* N.B. This expects the input to be packed, MS bit first */ +// N.B. This expects the input to be packed, MS bit first void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block) { diff --git a/crypto/fipsmodule/modes/ctr.c b/crypto/fipsmodule/modes/ctr.c index a191f39e..5a97cf67 100644 --- a/crypto/fipsmodule/modes/ctr.c +++ b/crypto/fipsmodule/modes/ctr.c @@ -54,10 +54,10 @@ #include "internal.h" -/* NOTE: the IV/counter CTR mode is big-endian. The code itself - * is endian-neutral. */ +// NOTE: the IV/counter CTR mode is big-endian. The code itself +// is endian-neutral. -/* increment counter (128-bit int) by 1 */ +// increment counter (128-bit int) by 1 static void ctr128_inc(uint8_t *counter) { uint32_t n = 16, c = 1; @@ -71,16 +71,16 @@ static void ctr128_inc(uint8_t *counter) { OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ctr); -/* The input encrypted as though 128bit counter mode is being used. The extra - * state information to record how much of the 128bit block we have used is - * contained in *num, and the encrypted counter is kept in ecount_buf. Both - * *num and ecount_buf must be initialised with zeros before the first call to - * CRYPTO_ctr128_encrypt(). - * - * This algorithm assumes that the counter is in the x lower bits of the IV - * (ivec), and that the application has full control over overflow and the rest - * of the IV. This implementation takes NO responsibility for checking that - * the counter doesn't overflow into the rest of the IV when incremented. */ +// The input encrypted as though 128bit counter mode is being used. The extra +// state information to record how much of the 128bit block we have used is +// contained in *num, and the encrypted counter is kept in ecount_buf. Both +// *num and ecount_buf must be initialised with zeros before the first call to +// CRYPTO_ctr128_encrypt(). +// +// This algorithm assumes that the counter is in the x lower bits of the IV +// (ivec), and that the application has full control over overflow and the rest +// of the IV. This implementation takes NO responsibility for checking that +// the counter doesn't overflow into the rest of the IV when incremented. void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned int *num, @@ -140,7 +140,7 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, *num = n; } -/* increment upper 96 bits of 128-bit counter by 1 */ +// increment upper 96 bits of 128-bit counter by 1 static void ctr96_inc(uint8_t *counter) { uint32_t n = 12, c = 1; @@ -174,25 +174,25 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, ctr32 = GETU32(ivec + 12); while (len >= 16) { size_t blocks = len / 16; - /* 1<<28 is just a not-so-small yet not-so-large number... - * Below condition is practically never met, but it has to - * be checked for code correctness. */ + // 1<<28 is just a not-so-small yet not-so-large number... + // Below condition is practically never met, but it has to + // be checked for code correctness. if (sizeof(size_t) > sizeof(unsigned int) && blocks > (1U << 28)) { blocks = (1U << 28); } - /* As (*func) operates on 32-bit counter, caller - * has to handle overflow. 'if' below detects the - * overflow, which is then handled by limiting the - * amount of blocks to the exact overflow point... */ + // As (*func) operates on 32-bit counter, caller + // has to handle overflow. 'if' below detects the + // overflow, which is then handled by limiting the + // amount of blocks to the exact overflow point... ctr32 += (uint32_t)blocks; if (ctr32 < blocks) { blocks -= ctr32; ctr32 = 0; } (*func)(in, out, blocks, key, ivec); - /* (*func) does not update ivec, caller does: */ + // (*func) does not update ivec, caller does: PUTU32(ivec + 12, ctr32); - /* ... overflow was detected, propogate carry. */ + // ... overflow was detected, propogate carry. if (ctr32 == 0) { ctr96_inc(ivec); } diff --git a/crypto/fipsmodule/modes/gcm.c b/crypto/fipsmodule/modes/gcm.c index 47b093f4..bb5be544 100644 --- a/crypto/fipsmodule/modes/gcm.c +++ b/crypto/fipsmodule/modes/gcm.c @@ -177,11 +177,11 @@ static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) { Xi[1] = CRYPTO_bswap8(Z.lo); } -/* Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for - * details... Compiler-generated code doesn't seem to give any - * performance improvement, at least not on x86[_64]. It's here - * mostly as reference and a placeholder for possible future - * non-trivial optimization[s]... */ +// Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for +// details... Compiler-generated code doesn't seem to give any +// performance improvement, at least not on x86[_64]. It's here +// mostly as reference and a placeholder for possible future +// non-trivial optimization[s]... static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len) { u128 Z; @@ -237,7 +237,7 @@ static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], Xi[1] = CRYPTO_bswap8(Z.lo); } while (inp += 16, len -= 16); } -#else /* GHASH_ASM */ +#else // GHASH_ASM void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]); void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); @@ -246,9 +246,9 @@ void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, #define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->Htable) #if defined(GHASH_ASM) #define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len) -/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache - * trashing effect. In other words idea is to hash data while it's - * still in L1 cache after encryption pass... */ +// GHASH_CHUNK is "stride parameter" missioned to mitigate cache +// trashing effect. In other words idea is to hash data while it's +// still in L1 cache after encryption pass... #define GHASH_CHUNK (3 * 1024) #endif @@ -298,7 +298,7 @@ void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); #if defined(OPENSSL_ARM) -/* 32-bit ARM also has support for doing GCM with NEON instructions. */ +// 32-bit ARM also has support for doing GCM with NEON instructions. static int neon_capable(void) { return CRYPTO_is_NEON_capable(); } @@ -308,7 +308,7 @@ void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]); void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); #else -/* AArch64 only has the ARMv8 versions of functions. */ +// AArch64 only has the ARMv8 versions of functions. static int neon_capable(void) { return 0; } @@ -357,7 +357,7 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, OPENSSL_memcpy(H.c, gcm_key, 16); - /* H is stored in host byte order */ + // H is stored in host byte order H.u[0] = CRYPTO_bswap8(H.u[0]); H.u[1] = CRYPTO_bswap8(H.u[1]); @@ -365,7 +365,7 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, #if defined(GHASH_ASM_X86_64) if (crypto_gcm_clmul_enabled()) { - if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */ + if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { // AVX+MOVBE gcm_init_avx(out_table, H.u); *out_mult = gcm_gmult_avx; *out_hash = gcm_ghash_avx; @@ -444,8 +444,8 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, ctx->Yi.u[1] = 0; ctx->Xi.u[0] = 0; ctx->Xi.u[1] = 0; - ctx->len.u[0] = 0; /* AAD length */ - ctx->len.u[1] = 0; /* message length */ + ctx->len.u[0] = 0; // AAD length + ctx->len.u[1] = 0; // message length ctx->ares = 0; ctx->mres = 0; @@ -518,7 +518,7 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { } } - /* Process a whole number of blocks. */ + // Process a whole number of blocks. #ifdef GHASH size_t len_blocks = len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { @@ -537,7 +537,7 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { } #endif - /* Process the remainder. */ + // Process the remainder. if (len != 0) { n = (unsigned int)len; for (size_t i = 0; i < len; ++i) { @@ -571,7 +571,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to encrypt finalizes GHASH(AAD) */ + // First call to encrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -701,7 +701,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to decrypt finalizes GHASH(AAD) */ + // First call to decrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -839,7 +839,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to encrypt finalizes GHASH(AAD) */ + // First call to encrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -861,8 +861,8 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, #if defined(AESNI_GCM) if (ctx->use_aesni_gcm_crypt) { - /* |aesni_gcm_encrypt| may not process all the input given to it. It may - * not process *any* of its input if it is deemed too small. */ + // |aesni_gcm_encrypt| may not process all the input given to it. It may + // not process *any* of its input if it is deemed too small. size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u); in += bulk; out += bulk; @@ -940,7 +940,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to decrypt finalizes GHASH(AAD) */ + // First call to decrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -964,8 +964,8 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, #if defined(AESNI_GCM) if (ctx->use_aesni_gcm_crypt) { - /* |aesni_gcm_decrypt| may not process all the input given to it. It may - * not process *any* of its input if it is deemed too small. */ + // |aesni_gcm_decrypt| may not process all the input given to it. It may + // not process *any* of its input if it is deemed too small. size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u); in += bulk; out += bulk; @@ -1065,8 +1065,8 @@ void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) { int crypto_gcm_clmul_enabled(void) { #ifdef GHASH_ASM const uint32_t *ia32cap = OPENSSL_ia32cap_get(); - return (ia32cap[0] & (1 << 24)) && /* check FXSR bit */ - (ia32cap[1] & (1 << 1)); /* check PCLMULQDQ bit */ + return (ia32cap[0] & (1 << 24)) && // check FXSR bit + (ia32cap[1] & (1 << 1)); // check PCLMULQDQ bit #else return 0; #endif diff --git a/crypto/fipsmodule/modes/gcm_test.cc b/crypto/fipsmodule/modes/gcm_test.cc index bfd42759..59889453 100644 --- a/crypto/fipsmodule/modes/gcm_test.cc +++ b/crypto/fipsmodule/modes/gcm_test.cc @@ -46,9 +46,9 @@ * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== */ -/* Per C99, various stdint.h and inttypes.h macros (the latter used by - * internal.h) are unavailable in C++ unless some macros are defined. C++11 - * overruled this decision, but older Android NDKs still require it. */ +// Per C99, various stdint.h and inttypes.h macros (the latter used by +// internal.h) are unavailable in C++ unless some macros are defined. C++11 +// overruled this decision, but older Android NDKs still require it. #if !defined(__STDC_CONSTANT_MACROS) #define __STDC_CONSTANT_MACROS #endif diff --git a/crypto/fipsmodule/modes/internal.h b/crypto/fipsmodule/modes/internal.h index 227f704b..6a5ff99b 100644 --- a/crypto/fipsmodule/modes/internal.h +++ b/crypto/fipsmodule/modes/internal.h @@ -109,28 +109,28 @@ static inline void PUTU32(void *out, uint32_t v) { OPENSSL_memcpy(out, &v, sizeof(v)); } -/* block128_f is the type of a 128-bit, block cipher. */ +// block128_f is the type of a 128-bit, block cipher. typedef void (*block128_f)(const uint8_t in[16], uint8_t out[16], const void *key); -/* GCM definitions */ +// GCM definitions typedef struct { uint64_t hi,lo; } u128; -/* gmult_func multiplies |Xi| by the GCM key and writes the result back to - * |Xi|. */ +// gmult_func multiplies |Xi| by the GCM key and writes the result back to +// |Xi|. typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]); -/* ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from - * |inp|. The result is written back to |Xi| and the |len| argument must be a - * multiple of 16. */ +// ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from +// |inp|. The result is written back to |Xi| and the |len| argument must be a +// multiple of 16. typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); -/* This differs from upstream's |gcm128_context| in that it does not have the - * |key| pointer, in order to make it |memcpy|-friendly. Rather the key is - * passed into each call that needs it. */ +// This differs from upstream's |gcm128_context| in that it does not have the +// |key| pointer, in order to make it |memcpy|-friendly. Rather the key is +// passed into each call that needs it. struct gcm128_context { - /* Following 6 names follow names in GCM specification */ + // Following 6 names follow names in GCM specification union { uint64_t u[2]; uint32_t d[4]; @@ -138,8 +138,8 @@ struct gcm128_context { size_t t[16 / sizeof(size_t)]; } Yi, EKi, EK0, len, Xi; - /* Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based, - * x86-64, GHASH assembly. */ + // Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based, + // x86-64, GHASH assembly. u128 H; u128 Htable[16]; gmult_func gmult; @@ -148,39 +148,39 @@ struct gcm128_context { unsigned int mres, ares; block128_f block; - /* use_aesni_gcm_crypt is true if this context should use the assembly - * functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data. */ + // use_aesni_gcm_crypt is true if this context should use the assembly + // functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data. unsigned use_aesni_gcm_crypt:1; }; #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is - * used. */ +// crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is +// used. int crypto_gcm_clmul_enabled(void); #endif -/* CTR. */ +// CTR. -/* ctr128_f is the type of a function that performs CTR-mode encryption. */ +// ctr128_f is the type of a function that performs CTR-mode encryption. typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks, const void *key, const uint8_t ivec[16]); -/* CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) - * |len| bytes from |in| to |out| using |block| in counter mode. There's no - * requirement that |len| be a multiple of any value and any partial blocks are - * stored in |ecount_buf| and |*num|, which must be zeroed before the initial - * call. The counter is a 128-bit, big-endian value in |ivec| and is - * incremented by this function. */ +// CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) +// |len| bytes from |in| to |out| using |block| in counter mode. There's no +// requirement that |len| be a multiple of any value and any partial blocks are +// stored in |ecount_buf| and |*num|, which must be zeroed before the initial +// call. The counter is a 128-bit, big-endian value in |ivec| and is +// incremented by this function. void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned *num, block128_f block); -/* CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes - * |ctr|, a function that performs CTR mode but only deals with the lower 32 - * bits of the counter. This is useful when |ctr| can be an optimised - * function. */ +// CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes +// |ctr|, a function that performs CTR mode but only deals with the lower 32 +// bits of the counter. This is useful when |ctr| can be an optimised +// function. void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned *num, @@ -193,137 +193,137 @@ void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks, #endif -/* GCM. - * - * This API differs from the upstream API slightly. The |GCM128_CONTEXT| does - * not have a |key| pointer that points to the key as upstream's version does. - * Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT| - * can be safely copied. */ +// GCM. +// +// This API differs from the upstream API slightly. The |GCM128_CONTEXT| does +// not have a |key| pointer that points to the key as upstream's version does. +// Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT| +// can be safely copied. typedef struct gcm128_context GCM128_CONTEXT; -/* CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to - * |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware - * accelerated) functions for performing operations in the GHASH field. If the - * AVX implementation was used |*out_is_avx| will be true. */ +// CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to +// |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware +// accelerated) functions for performing operations in the GHASH field. If the +// AVX implementation was used |*out_is_avx| will be true. void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, u128 *out_key, u128 out_table[16], int *out_is_avx, const uint8_t *gcm_key); -/* CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with - * the given key. |is_aesni_encrypt| is one if |block| is |aesni_encrypt|. */ +// CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with +// the given key. |is_aesni_encrypt| is one if |block| is |aesni_encrypt|. OPENSSL_EXPORT void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key, block128_f block, int is_aesni_encrypt); -/* CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the - * same key that was passed to |CRYPTO_gcm128_init|. */ +// CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the +// same key that was passed to |CRYPTO_gcm128_init|. OPENSSL_EXPORT void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, const uint8_t *iv, size_t iv_len); -/* CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM. - * This must be called before and data is encrypted. It returns one on success - * and zero otherwise. */ +// CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM. +// This must be called before and data is encrypted. It returns one on success +// and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len); -/* CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key| - * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one - * on success and zero otherwise. */ +// CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key| +// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one +// on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len); -/* CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key| - * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one - * on success and zero otherwise. */ +// CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key| +// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one +// on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len); -/* CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using - * a CTR function that only handles the bottom 32 bits of the nonce, like - * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was - * passed to |CRYPTO_gcm128_init|. It returns one on success and zero - * otherwise. */ +// CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using +// a CTR function that only handles the bottom 32 bits of the nonce, like +// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was +// passed to |CRYPTO_gcm128_init|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len, ctr128_f stream); -/* CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using - * a CTR function that only handles the bottom 32 bits of the nonce, like - * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was - * passed to |CRYPTO_gcm128_init|. It returns one on success and zero - * otherwise. */ +// CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using +// a CTR function that only handles the bottom 32 bits of the nonce, like +// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was +// passed to |CRYPTO_gcm128_init|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len, ctr128_f stream); -/* CRYPTO_gcm128_finish calculates the authenticator and compares it against - * |len| bytes of |tag|. It returns one on success and zero otherwise. */ +// CRYPTO_gcm128_finish calculates the authenticator and compares it against +// |len| bytes of |tag|. It returns one on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len); -/* CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. - * The minimum of |len| and 16 bytes are copied into |tag|. */ +// CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. +// The minimum of |len| and 16 bytes are copied into |tag|. OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag, size_t len); -/* CBC. */ +// CBC. -/* cbc128_f is the type of a function that performs CBC-mode encryption. */ +// cbc128_f is the type of a function that performs CBC-mode encryption. typedef void (*cbc128_f)(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], int enc); -/* CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the - * given IV and block cipher in CBC mode. The input need not be a multiple of - * 128 bits long, but the output will round up to the nearest 128 bit multiple, - * zero padding the input if needed. The IV will be updated on return. */ +// CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the +// given IV and block cipher in CBC mode. The input need not be a multiple of +// 128 bits long, but the output will round up to the nearest 128 bit multiple, +// zero padding the input if needed. The IV will be updated on return. void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], block128_f block); -/* CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the - * given IV and block cipher in CBC mode. If |len| is not a multiple of 128 - * bits then only that many bytes will be written, but a multiple of 128 bits - * is always read from |in|. The IV will be updated on return. */ +// CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the +// given IV and block cipher in CBC mode. If |len| is not a multiple of 128 +// bits then only that many bytes will be written, but a multiple of 128 bits +// is always read from |in|. The IV will be updated on return. void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], block128_f block); -/* OFB. */ +// OFB. -/* CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode) - * |len| bytes from |in| to |out| using |block| in OFB mode. There's no - * requirement that |len| be a multiple of any value and any partial blocks are - * stored in |ivec| and |*num|, the latter must be zero before the initial - * call. */ +// CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode) +// |len| bytes from |in| to |out| using |block| in OFB mode. There's no +// requirement that |len| be a multiple of any value and any partial blocks are +// stored in |ivec| and |*num|, the latter must be zero before the initial +// call. void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, block128_f block); -/* CFB. */ +// CFB. -/* CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB mode. There's no requirement that - * |len| be a multiple of any value and any partial blocks are stored in |ivec| - * and |*num|, the latter must be zero before the initial call. */ +// CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB mode. There's no requirement that +// |len| be a multiple of any value and any partial blocks are stored in |ivec| +// and |*num|, the latter must be zero before the initial call. void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); -/* CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB-8 mode. Prior to the first call - * |num| should be set to zero. */ +// CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB-8 mode. Prior to the first call +// |num| should be set to zero. void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); -/* CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB-1 mode. Prior to the first call - * |num| should be set to zero. */ +// CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB-1 mode. Prior to the first call +// |num| should be set to zero. void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); @@ -333,11 +333,11 @@ size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len, block128_f block); -/* POLYVAL. - * - * POLYVAL is a polynomial authenticator that operates over a field very - * similar to the one that GHASH uses. See - * https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3. */ +// POLYVAL. +// +// POLYVAL is a polynomial authenticator that operates over a field very +// similar to the one that GHASH uses. See +// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3. typedef union { uint64_t u[2]; @@ -345,8 +345,8 @@ typedef union { } polyval_block; struct polyval_ctx { - /* Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based, - * x86-64, GHASH assembly. */ + // Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based, + // x86-64, GHASH assembly. polyval_block S; u128 H; u128 Htable[16]; @@ -354,21 +354,21 @@ struct polyval_ctx { ghash_func ghash; }; -/* CRYPTO_POLYVAL_init initialises |ctx| using |key|. */ +// CRYPTO_POLYVAL_init initialises |ctx| using |key|. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]); -/* CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the - * blocks from |in|. Only a whole number of blocks can be processed so |in_len| - * must be a multiple of 16. */ +// CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the +// blocks from |in|. Only a whole number of blocks can be processed so |in_len| +// must be a multiple of 16. void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in, size_t in_len); -/* CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. */ +// CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_MODES_INTERNAL_H */ +#endif // OPENSSL_HEADER_MODES_INTERNAL_H diff --git a/crypto/fipsmodule/modes/polyval.c b/crypto/fipsmodule/modes/polyval.c index 392e2d8e..857dc0e3 100644 --- a/crypto/fipsmodule/modes/polyval.c +++ b/crypto/fipsmodule/modes/polyval.c @@ -21,16 +21,16 @@ #include "../../internal.h" -/* byte_reverse reverses the order of the bytes in |b->c|. */ +// byte_reverse reverses the order of the bytes in |b->c|. static void byte_reverse(polyval_block *b) { const uint64_t t = CRYPTO_bswap8(b->u[0]); b->u[0] = CRYPTO_bswap8(b->u[1]); b->u[1] = t; } -/* reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of - * the GHASH field, multiplies that by 'x' and serialises the result back into - * |b|, but with GHASH's backwards bit ordering. */ +// reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of +// the GHASH field, multiplies that by 'x' and serialises the result back into +// |b|, but with GHASH's backwards bit ordering. static void reverse_and_mulX_ghash(polyval_block *b) { uint64_t hi = b->u[0]; uint64_t lo = b->u[1]; @@ -44,11 +44,11 @@ static void reverse_and_mulX_ghash(polyval_block *b) { b->u[1] = CRYPTO_bswap8(hi); } -/* POLYVAL(H, X_1, ..., X_n) = - * ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ..., - * ByteReverse(X_n))). - * - * See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A. */ +// POLYVAL(H, X_1, ..., X_n) = +// ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ..., +// ByteReverse(X_n))). +// +// See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]) { polyval_block H; diff --git a/crypto/fipsmodule/rand/ctrdrbg.c b/crypto/fipsmodule/rand/ctrdrbg.c index 2b22f5d9..9f8be666 100644 --- a/crypto/fipsmodule/rand/ctrdrbg.c +++ b/crypto/fipsmodule/rand/ctrdrbg.c @@ -21,16 +21,16 @@ #include "../cipher/internal.h" -/* Section references in this file refer to SP 800-90Ar1: - * http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf */ +// Section references in this file refer to SP 800-90Ar1: +// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf -/* See table 3. */ +// See table 3. static const uint64_t kMaxReseedCount = UINT64_C(1) << 48; int CTR_DRBG_init(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len) { - /* Section 10.2.1.3.1 */ + // Section 10.2.1.3.1 if (personalization_len > CTR_DRBG_ENTROPY_LEN) { return 0; } @@ -42,10 +42,10 @@ int CTR_DRBG_init(CTR_DRBG_STATE *drbg, seed_material[i] ^= personalization[i]; } - /* Section 10.2.1.2 */ + // Section 10.2.1.2 - /* kInitMask is the result of encrypting blocks with big-endian value 1, 2 - * and 3 with the all-zero AES-256 key. */ + // kInitMask is the result of encrypting blocks with big-endian value 1, 2 + // and 3 with the all-zero AES-256 key. static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, @@ -67,8 +67,8 @@ int CTR_DRBG_init(CTR_DRBG_STATE *drbg, OPENSSL_COMPILE_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0, not_a_multiple_of_block_size); -/* ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a - * big-endian number. */ +// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a +// big-endian number. static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { drbg->counter.words[3] = CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); @@ -76,9 +76,9 @@ static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { static int CTR_DRBG_update(CTR_DRBG_STATE *drbg, const uint8_t *data, size_t data_len) { - /* Section 10.2.1.2. A value of |data_len| which less than - * |CTR_DRBG_ENTROPY_LEN| is permitted and acts the same as right-padding - * with zeros. This can save a copy. */ + // Section 10.2.1.2. A value of |data_len| which less than + // |CTR_DRBG_ENTROPY_LEN| is permitted and acts the same as right-padding + // with zeros. This can save a copy. if (data_len > CTR_DRBG_ENTROPY_LEN) { return 0; } @@ -103,7 +103,7 @@ int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *additional_data, size_t additional_data_len) { - /* Section 10.2.1.4 */ + // Section 10.2.1.4 uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; if (additional_data_len > 0) { @@ -131,12 +131,12 @@ int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, const uint8_t *additional_data, size_t additional_data_len) { - /* See 9.3.1 */ + // See 9.3.1 if (out_len > CTR_DRBG_MAX_GENERATE_LENGTH) { return 0; } - /* See 10.2.1.5.1 */ + // See 10.2.1.5.1 if (drbg->reseed_counter > kMaxReseedCount) { return 0; } @@ -146,12 +146,12 @@ int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, return 0; } - /* kChunkSize is used to interact better with the cache. Since the AES-CTR - * code assumes that it's encrypting rather than just writing keystream, the - * buffer has to be zeroed first. Without chunking, large reads would zero - * the whole buffer, flushing the L1 cache, and then do another pass (missing - * the cache every time) to “encrypt” it. The code can avoid this by - * chunking. */ + // kChunkSize is used to interact better with the cache. Since the AES-CTR + // code assumes that it's encrypting rather than just writing keystream, the + // buffer has to be zeroed first. Without chunking, large reads would zero + // the whole buffer, flushing the L1 cache, and then do another pass (missing + // the cache every time) to “encrypt” it. The code can avoid this by + // chunking. static const size_t kChunkSize = 8 * 1024; while (out_len >= AES_BLOCK_SIZE) { diff --git a/crypto/fipsmodule/rand/internal.h b/crypto/fipsmodule/rand/internal.h index f569c382..c0812ee0 100644 --- a/crypto/fipsmodule/rand/internal.h +++ b/crypto/fipsmodule/rand/internal.h @@ -25,21 +25,21 @@ extern "C" { #endif -/* RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes - * from |user_additional_data| in. */ +// RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes +// from |user_additional_data| in. void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, const uint8_t user_additional_data[32]); -/* CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating - * system. */ +// CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating +// system. void CRYPTO_sysrand(uint8_t *buf, size_t len); -/* rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has - * been enabled via |RAND_enable_fork_unsafe_buffering|. */ +// rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has +// been enabled via |RAND_enable_fork_unsafe_buffering|. int rand_fork_unsafe_buffering_enabled(void); -/* CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP - * 800-90Ar1. */ +// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP +// 800-90Ar1. typedef struct { alignas(16) AES_KEY ks; block128_f block; @@ -51,42 +51,42 @@ typedef struct { uint64_t reseed_counter; } CTR_DRBG_STATE; -/* See SP 800-90Ar1, table 3. */ +// See SP 800-90Ar1, table 3. #define CTR_DRBG_ENTROPY_LEN 48 #define CTR_DRBG_MAX_GENERATE_LENGTH 65536 -/* CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of - * entropy in |entropy| and, optionally, a personalization string up to - * |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero - * on error. */ +// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of +// entropy in |entropy| and, optionally, a personalization string up to +// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero +// on error. OPENSSL_EXPORT int CTR_DRBG_init(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len); -/* CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy - * in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of - * additional data. It returns one on success or zero on error. */ +// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy +// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of +// additional data. It returns one on success or zero on error. OPENSSL_EXPORT int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *additional_data, size_t additional_data_len); -/* CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional - * data (if any) and then writes |out_len| random bytes to |out|, where - * |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or - * zero on error. */ +// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional +// data (if any) and then writes |out_len| random bytes to |out|, where +// |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or +// zero on error. OPENSSL_EXPORT int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, const uint8_t *additional_data, size_t additional_data_len); -/* CTR_DRBG_clear zeroises the state of |drbg|. */ +// CTR_DRBG_clear zeroises the state of |drbg|. OPENSSL_EXPORT void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H */ +#endif // OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H diff --git a/crypto/fipsmodule/rand/rand.c b/crypto/fipsmodule/rand/rand.c index 9480ddbb..dafc91f7 100644 --- a/crypto/fipsmodule/rand/rand.c +++ b/crypto/fipsmodule/rand/rand.c @@ -31,53 +31,53 @@ #include "../delocate.h" -/* It's assumed that the operating system always has an unfailing source of - * entropy which is accessed via |CRYPTO_sysrand|. (If the operating system - * entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we - * don't try to handle it.) - * - * In addition, the hardware may provide a low-latency RNG. Intel's rdrand - * instruction is the canonical example of this. When a hardware RNG is - * available we don't need to worry about an RNG failure arising from fork()ing - * the process or moving a VM, so we can keep thread-local RNG state and use it - * as an additional-data input to CTR-DRBG. - * - * (We assume that the OS entropy is safe from fork()ing and VM duplication. - * This might be a bit of a leap of faith, esp on Windows, but there's nothing - * that we can do about it.) */ - -/* kReseedInterval is the number of generate calls made to CTR-DRBG before - * reseeding. */ +// It's assumed that the operating system always has an unfailing source of +// entropy which is accessed via |CRYPTO_sysrand|. (If the operating system +// entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we +// don't try to handle it.) +// +// In addition, the hardware may provide a low-latency RNG. Intel's rdrand +// instruction is the canonical example of this. When a hardware RNG is +// available we don't need to worry about an RNG failure arising from fork()ing +// the process or moving a VM, so we can keep thread-local RNG state and use it +// as an additional-data input to CTR-DRBG. +// +// (We assume that the OS entropy is safe from fork()ing and VM duplication. +// This might be a bit of a leap of faith, esp on Windows, but there's nothing +// that we can do about it.) + +// kReseedInterval is the number of generate calls made to CTR-DRBG before +// reseeding. static const unsigned kReseedInterval = 4096; -/* CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the - * continuous random number generator test in FIPS 140-2, section 4.9.2. */ +// CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the +// continuous random number generator test in FIPS 140-2, section 4.9.2. #define CRNGT_BLOCK_SIZE 16 -/* rand_thread_state contains the per-thread state for the RNG. */ +// rand_thread_state contains the per-thread state for the RNG. struct rand_thread_state { CTR_DRBG_STATE drbg; - /* calls is the number of generate calls made on |drbg| since it was last - * (re)seeded. This is bound by |kReseedInterval|. */ + // calls is the number of generate calls made on |drbg| since it was last + // (re)seeded. This is bound by |kReseedInterval|. unsigned calls; - /* last_block_valid is non-zero iff |last_block| contains data from - * |CRYPTO_sysrand|. */ + // last_block_valid is non-zero iff |last_block| contains data from + // |CRYPTO_sysrand|. int last_block_valid; #if defined(BORINGSSL_FIPS) - /* last_block contains the previous block from |CRYPTO_sysrand|. */ + // last_block contains the previous block from |CRYPTO_sysrand|. uint8_t last_block[CRNGT_BLOCK_SIZE]; - /* next and prev form a NULL-terminated, double-linked list of all states in - * a process. */ + // next and prev form a NULL-terminated, double-linked list of all states in + // a process. struct rand_thread_state *next, *prev; #endif }; #if defined(BORINGSSL_FIPS) -/* thread_states_list is the head of a linked-list of all |rand_thread_state| - * objects in the process, one per thread. This is needed because FIPS requires - * that they be zeroed on process exit, but thread-local destructors aren't - * called when the whole process is exiting. */ +// thread_states_list is the head of a linked-list of all |rand_thread_state| +// objects in the process, one per thread. This is needed because FIPS requires +// that they be zeroed on process exit, but thread-local destructors aren't +// called when the whole process is exiting. DEFINE_BSS_GET(struct rand_thread_state *, thread_states_list); DEFINE_STATIC_MUTEX(thread_states_list_lock); @@ -88,13 +88,13 @@ static void rand_thread_state_clear_all(void) { cur != NULL; cur = cur->next) { CTR_DRBG_clear(&cur->drbg); } - /* |thread_states_list_lock is deliberately left locked so that any threads - * that are still running will hang if they try to call |RAND_bytes|. */ + // |thread_states_list_lock is deliberately left locked so that any threads + // that are still running will hang if they try to call |RAND_bytes|. } #endif -/* rand_thread_state_free frees a |rand_thread_state|. This is called when a - * thread exits. */ +// rand_thread_state_free frees a |rand_thread_state|. This is called when a +// thread exits. static void rand_thread_state_free(void *state_in) { struct rand_thread_state *state = state_in; @@ -126,7 +126,7 @@ static void rand_thread_state_free(void *state_in) { #if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) -/* These functions are defined in asm/rdrand-x86_64.pl */ +// These functions are defined in asm/rdrand-x86_64.pl extern int CRYPTO_rdrand(uint8_t out[8]); extern int CRYPTO_rdrand_multiple8_buf(uint8_t *buf, size_t len); @@ -183,8 +183,8 @@ static void rand_get_seed(struct rand_thread_state *state, state->last_block_valid = 1; } - /* We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to - * whiten. */ + // We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to + // whiten. #define FIPS_OVERREAD 10 uint8_t entropy[CTR_DRBG_ENTROPY_LEN * FIPS_OVERREAD]; @@ -192,9 +192,9 @@ static void rand_get_seed(struct rand_thread_state *state, CRYPTO_sysrand(entropy, sizeof(entropy)); } - /* See FIPS 140-2, section 4.9.2. This is the “continuous random number - * generator test” which causes the program to randomly abort. Hopefully the - * rate of failure is small enough not to be a problem in practice. */ + // See FIPS 140-2, section 4.9.2. This is the “continuous random number + // generator test” which causes the program to randomly abort. Hopefully the + // rate of failure is small enough not to be a problem in practice. if (CRYPTO_memcmp(state->last_block, entropy, CRNGT_BLOCK_SIZE) == 0) { printf("CRNGT failed.\n"); BORINGSSL_FIPS_abort(); @@ -225,8 +225,8 @@ static void rand_get_seed(struct rand_thread_state *state, static void rand_get_seed(struct rand_thread_state *state, uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { - /* If not in FIPS mode, we don't overread from the system entropy source and - * we don't depend only on the hardware RDRAND. */ + // If not in FIPS mode, we don't overread from the system entropy source and + // we don't depend only on the hardware RDRAND. CRYPTO_sysrand(seed, CTR_DRBG_ENTROPY_LEN); } @@ -238,16 +238,16 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, return; } - /* Additional data is mixed into every CTR-DRBG call to protect, as best we - * can, against forks & VM clones. We do not over-read this information and - * don't reseed with it so, from the point of view of FIPS, this doesn't - * provide “prediction resistance”. But, in practice, it does. */ + // Additional data is mixed into every CTR-DRBG call to protect, as best we + // can, against forks & VM clones. We do not over-read this information and + // don't reseed with it so, from the point of view of FIPS, this doesn't + // provide “prediction resistance”. But, in practice, it does. uint8_t additional_data[32]; if (!hwrand(additional_data, sizeof(additional_data))) { - /* Without a hardware RNG to save us from address-space duplication, the OS - * entropy is used. This can be expensive (one read per |RAND_bytes| call) - * and so can be disabled by applications that we have ensured don't fork - * and aren't at risk of VM cloning. */ + // Without a hardware RNG to save us from address-space duplication, the OS + // entropy is used. This can be expensive (one read per |RAND_bytes| call) + // and so can be disabled by applications that we have ensured don't fork + // and aren't at risk of VM cloning. if (!rand_fork_unsafe_buffering_enabled()) { CRYPTO_sysrand(additional_data, sizeof(additional_data)); } else { @@ -268,8 +268,8 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, if (state == NULL || !CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_RAND, state, rand_thread_state_free)) { - /* If the system is out of memory, use an ephemeral state on the - * stack. */ + // If the system is out of memory, use an ephemeral state on the + // stack. state = &stack_state; } @@ -300,14 +300,14 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, uint8_t seed[CTR_DRBG_ENTROPY_LEN]; rand_get_seed(state, seed); #if defined(BORINGSSL_FIPS) - /* Take a read lock around accesses to |state->drbg|. This is needed to - * avoid returning bad entropy if we race with - * |rand_thread_state_clear_all|. - * - * This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a - * bug on ppc64le. glibc may implement pthread locks by wrapping user code - * in a hardware transaction, but, on some older versions of glibc and the - * kernel, syscalls made with |syscall| did not abort the transaction. */ + // Take a read lock around accesses to |state->drbg|. This is needed to + // avoid returning bad entropy if we race with + // |rand_thread_state_clear_all|. + // + // This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a + // bug on ppc64le. glibc may implement pthread locks by wrapping user code + // in a hardware transaction, but, on some older versions of glibc and the + // kernel, syscalls made with |syscall| did not abort the transaction. CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get()); #endif if (!CTR_DRBG_reseed(&state->drbg, seed, NULL, 0)) { diff --git a/crypto/fipsmodule/rand/urandom.c b/crypto/fipsmodule/rand/urandom.c index 8cbf7278..54309686 100644 --- a/crypto/fipsmodule/rand/urandom.c +++ b/crypto/fipsmodule/rand/urandom.c @@ -13,7 +13,7 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_GNU_SOURCE) -#define _GNU_SOURCE /* needed for syscall() on Linux. */ +#define _GNU_SOURCE // needed for syscall() on Linux. #endif #include @@ -65,40 +65,40 @@ #error "system call number for getrandom is not the expected value" #endif -#else /* __NR_getrandom */ +#else // __NR_getrandom #define __NR_getrandom EXPECTED_NR_getrandom -#endif /* __NR_getrandom */ +#endif // __NR_getrandom -#endif /* EXPECTED_NR_getrandom */ +#endif // EXPECTED_NR_getrandom #if !defined(GRND_NONBLOCK) #define GRND_NONBLOCK 1 #endif -#endif /* OPENSSL_LINUX */ +#endif // OPENSSL_LINUX -/* rand_lock is used to protect the |*_requested| variables. */ +// rand_lock is used to protect the |*_requested| variables. DEFINE_STATIC_MUTEX(rand_lock); -/* The following constants are magic values of |urandom_fd|. */ +// The following constants are magic values of |urandom_fd|. static const int kUnset = 0; static const int kHaveGetrandom = -3; -/* urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by - * |rand_lock|. */ +// urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by +// |rand_lock|. DEFINE_BSS_GET(int, urandom_fd_requested); -/* urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. */ +// urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. DEFINE_BSS_GET(int, urandom_fd); DEFINE_STATIC_ONCE(rand_once); #if defined(USE_NR_getrandom) || defined(BORINGSSL_FIPS) -/* message writes |msg| to stderr. We use this because referencing |stderr| - * with |fprintf| generates relocations, which is a problem inside the FIPS - * module. */ +// message writes |msg| to stderr. We use this because referencing |stderr| +// with |fprintf| generates relocations, which is a problem inside the FIPS +// module. static void message(const char *msg) { ssize_t r; do { @@ -107,10 +107,10 @@ static void message(const char *msg) { } #endif -/* init_once initializes the state of this module to values previously - * requested. This is the only function that modifies |urandom_fd| and - * |urandom_buffering|, whose values may be read safely after calling the - * once. */ +// init_once initializes the state of this module to values previously +// requested. This is the only function that modifies |urandom_fd| and +// |urandom_buffering|, whose values may be read safely after calling the +// once. static void init_once(void) { CRYPTO_STATIC_MUTEX_lock_read(rand_lock_bss_get()); int fd = *urandom_fd_requested_bss_get(); @@ -140,7 +140,7 @@ static void init_once(void) { return; } } -#endif /* USE_NR_getrandom */ +#endif // USE_NR_getrandom if (fd == kUnset) { do { @@ -154,9 +154,9 @@ static void init_once(void) { assert(kUnset == 0); if (fd == kUnset) { - /* Because we want to keep |urandom_fd| in the BSS, we have to initialise - * it to zero. But zero is a valid file descriptor too. Thus if open - * returns zero for /dev/urandom, we dup it to get a non-zero number. */ + // Because we want to keep |urandom_fd| in the BSS, we have to initialise + // it to zero. But zero is a valid file descriptor too. Thus if open + // returns zero for /dev/urandom, we dup it to get a non-zero number. fd = dup(fd); close(kUnset); @@ -166,10 +166,10 @@ static void init_once(void) { } #if defined(BORINGSSL_FIPS) - /* In FIPS mode we ensure that the kernel has sufficient entropy before - * continuing. This is automatically handled by getrandom, which requires - * that the entropy pool has been initialised, but for urandom we have to - * poll. */ + // In FIPS mode we ensure that the kernel has sufficient entropy before + // continuing. This is automatically handled by getrandom, which requires + // that the entropy pool has been initialised, but for urandom we have to + // poll. for (;;) { int entropy_bits; if (ioctl(fd, RNDGETENTCNT, &entropy_bits)) { @@ -190,7 +190,7 @@ static void init_once(void) { int flags = fcntl(fd, F_GETFD); if (flags == -1) { - /* Native Client doesn't implement |fcntl|. */ + // Native Client doesn't implement |fcntl|. if (errno != ENOSYS) { abort(); } @@ -211,9 +211,9 @@ void RAND_set_urandom_fd(int fd) { assert(kUnset == 0); if (fd == kUnset) { - /* Because we want to keep |urandom_fd| in the BSS, we have to initialise - * it to zero. But zero is a valid file descriptor too. Thus if dup - * returned zero we dup it again to get a non-zero number. */ + // Because we want to keep |urandom_fd| in the BSS, we have to initialise + // it to zero. But zero is a valid file descriptor too. Thus if dup + // returned zero we dup it again to get a non-zero number. fd = dup(fd); close(kUnset); @@ -238,8 +238,8 @@ void RAND_set_urandom_fd(int fd) { void __msan_unpoison(void *, size_t); #endif -/* fill_with_entropy writes |len| bytes of entropy into |out|. It returns one - * on success and zero on error. */ +// fill_with_entropy writes |len| bytes of entropy into |out|. It returns one +// on success and zero on error. static char fill_with_entropy(uint8_t *out, size_t len) { while (len > 0) { ssize_t r; @@ -252,13 +252,13 @@ static char fill_with_entropy(uint8_t *out, size_t len) { #if defined(OPENSSL_MSAN) if (r > 0) { - /* MSAN doesn't recognise |syscall| and thus doesn't notice that we - * have initialised the output buffer. */ + // MSAN doesn't recognise |syscall| and thus doesn't notice that we + // have initialised the output buffer. __msan_unpoison(out, r); } -#endif /* OPENSSL_MSAN */ +#endif // OPENSSL_MSAN -#else /* USE_NR_getrandom */ +#else // USE_NR_getrandom abort(); #endif } else { @@ -277,7 +277,7 @@ static char fill_with_entropy(uint8_t *out, size_t len) { return 1; } -/* CRYPTO_sysrand puts |requested| random bytes into |out|. */ +// CRYPTO_sysrand puts |requested| random bytes into |out|. void CRYPTO_sysrand(uint8_t *out, size_t requested) { if (requested == 0) { return; diff --git a/crypto/fipsmodule/rsa/blinding.c b/crypto/fipsmodule/rsa/blinding.c index 71feb3b0..d9560574 100644 --- a/crypto/fipsmodule/rsa/blinding.c +++ b/crypto/fipsmodule/rsa/blinding.c @@ -121,8 +121,8 @@ #define BN_BLINDING_COUNTER 32 struct bn_blinding_st { - BIGNUM *A; /* The base blinding factor, Montgomery-encoded. */ - BIGNUM *Ai; /* The inverse of the blinding factor, Montgomery-encoded. */ + BIGNUM *A; // The base blinding factor, Montgomery-encoded. + BIGNUM *Ai; // The inverse of the blinding factor, Montgomery-encoded. unsigned counter; }; @@ -147,7 +147,7 @@ BN_BLINDING *BN_BLINDING_new(void) { goto err; } - /* The blinding values need to be created before this blinding can be used. */ + // The blinding values need to be created before this blinding can be used. ret->counter = BN_BLINDING_COUNTER - 1; return ret; @@ -170,7 +170,7 @@ void BN_BLINDING_free(BN_BLINDING *r) { static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { if (++b->counter == BN_BLINDING_COUNTER) { - /* re-create blinding parameters */ + // re-create blinding parameters if (!bn_blinding_create_param(b, e, mont, ctx)) { goto err; } @@ -185,10 +185,10 @@ static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, return 1; err: - /* |A| and |Ai| may be in an inconsistent state so they both need to be - * replaced the next time this blinding is used. Note that this is only - * sufficient because support for |BN_BLINDING_NO_UPDATE| and - * |BN_BLINDING_NO_RECREATE| was previously dropped. */ + // |A| and |Ai| may be in an inconsistent state so they both need to be + // replaced the next time this blinding is used. Note that this is only + // sufficient because support for |BN_BLINDING_NO_UPDATE| and + // |BN_BLINDING_NO_RECREATE| was previously dropped. b->counter = BN_BLINDING_COUNTER - 1; return 0; @@ -196,9 +196,8 @@ err: int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { - /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| - * cancels one Montgomery factor, so the resulting value of |n| is unencoded. - */ + // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| + // cancels one Montgomery factor, so the resulting value of |n| is unencoded. if (!bn_blinding_update(b, e, mont, ctx) || !BN_mod_mul_montgomery(n, n, b->A, mont, ctx)) { return 0; @@ -209,9 +208,8 @@ int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont, BN_CTX *ctx) { - /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| - * cancels one Montgomery factor, so the resulting value of |n| is unencoded. - */ + // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| + // cancels one Montgomery factor, so the resulting value of |n| is unencoded. return BN_mod_mul_montgomery(n, n, b->Ai, mont, ctx); } @@ -225,8 +223,8 @@ static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, return 0; } - /* |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but - * more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|. */ + // |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but + // more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|. if (!BN_from_montgomery(b->Ai, b->A, mont, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); return 0; @@ -242,8 +240,8 @@ static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, return 0; } - /* For reasonably-sized RSA keys, it should almost never be the case that a - * random value doesn't have an inverse. */ + // For reasonably-sized RSA keys, it should almost never be the case that a + // random value doesn't have an inverse. if (retry_counter-- == 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS); return 0; diff --git a/crypto/fipsmodule/rsa/internal.h b/crypto/fipsmodule/rsa/internal.h index fb5ffffc..67f2cb9e 100644 --- a/crypto/fipsmodule/rsa/internal.h +++ b/crypto/fipsmodule/rsa/internal.h @@ -67,7 +67,7 @@ extern "C" { #endif -/* Default implementations of RSA operations. */ +// Default implementations of RSA operations. const RSA_METHOD *RSA_default_method(void); @@ -107,29 +107,29 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len); -/* RSA_private_transform calls either the method-specific |private_transform| - * function (if given) or the generic one. See the comment for - * |private_transform| in |rsa_meth_st|. */ +// RSA_private_transform calls either the method-specific |private_transform| +// function (if given) or the generic one. See the comment for +// |private_transform| in |rsa_meth_st|. int RSA_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); -/* The following utility functions are exported for test purposes. */ +// The following utility functions are exported for test purposes. extern const BN_ULONG kBoringSSLRSASqrtTwo[]; extern const size_t kBoringSSLRSASqrtTwoLen; -/* rsa_less_than_words returns one if |a| < |b| and zero otherwise, where |a| - * and |b| both are |len| words long. It runs in constant time. */ +// rsa_less_than_words returns one if |a| < |b| and zero otherwise, where |a| +// and |b| both are |len| words long. It runs in constant time. int rsa_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len); -/* rsa_greater_than_pow2 returns one if |b| is greater than 2^|n| and zero - * otherwise. */ +// rsa_greater_than_pow2 returns one if |b| is greater than 2^|n| and zero +// otherwise. int rsa_greater_than_pow2(const BIGNUM *b, int n); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_RSA_INTERNAL_H */ +#endif // OPENSSL_HEADER_RSA_INTERNAL_H diff --git a/crypto/fipsmodule/rsa/padding.c b/crypto/fipsmodule/rsa/padding.c index 9f002d24..9d88dba7 100644 --- a/crypto/fipsmodule/rsa/padding.c +++ b/crypto/fipsmodule/rsa/padding.c @@ -74,7 +74,7 @@ int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { - /* See RFC 8017, section 9.2. */ + // See RFC 8017, section 9.2. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; @@ -96,20 +96,20 @@ int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len) { - /* See RFC 8017, section 9.2. This is part of signature verification and thus - * does not need to run in constant-time. */ + // See RFC 8017, section 9.2. This is part of signature verification and thus + // does not need to run in constant-time. if (from_len < 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } - /* Check the header. */ + // Check the header. if (from[0] != 0 || from[1] != 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_BLOCK_TYPE_IS_NOT_01); return 0; } - /* Scan over padded data, looking for the 00. */ + // Scan over padded data, looking for the 00. size_t pad; for (pad = 2 /* header */; pad < from_len; pad++) { if (from[pad] == 0x00) { @@ -132,7 +132,7 @@ int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, return 0; } - /* Skip over the 00. */ + // Skip over the 00. pad++; if (from_len - pad > max_out) { @@ -163,7 +163,7 @@ static int rand_nonzero(uint8_t *out, size_t len) { int RSA_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { - /* See RFC 8017, section 7.2.1. */ + // See RFC 8017, section 7.2.1. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; @@ -195,11 +195,11 @@ int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, return 0; } - /* PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography - * Standard", section 7.2.2. */ + // PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography + // Standard", section 7.2.2. if (from_len < RSA_PKCS1_PADDING_SIZE) { - /* |from| is zero-padded to the size of the RSA modulus, a public value, so - * this can be rejected in non-constant time. */ + // |from| is zero-padded to the size of the RSA modulus, a public value, so + // this can be rejected in non-constant time. OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } @@ -215,24 +215,24 @@ int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, looking_for_index = constant_time_select_w(equals0, 0, looking_for_index); } - /* The input must begin with 00 02. */ + // The input must begin with 00 02. crypto_word_t valid_index = first_byte_is_zero; valid_index &= second_byte_is_two; - /* We must have found the end of PS. */ + // We must have found the end of PS. valid_index &= ~looking_for_index; - /* PS must be at least 8 bytes long, and it starts two bytes into |from|. */ + // PS must be at least 8 bytes long, and it starts two bytes into |from|. valid_index &= constant_time_ge_w(zero_index, 2 + 8); - /* Skip the zero byte. */ + // Skip the zero byte. zero_index++; - /* NOTE: Although this logic attempts to be constant time, the API contracts - * of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it - * impossible to completely avoid Bleichenbacher's attack. Consumers should - * use |RSA_PADDING_NONE| and perform the padding check in constant-time - * combined with a swap to a random session key or other mitigation. */ + // NOTE: Although this logic attempts to be constant time, the API contracts + // of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it + // impossible to completely avoid Bleichenbacher's attack. Consumers should + // use |RSA_PADDING_NONE| and perform the padding check in constant-time + // combined with a swap to a random session key or other mitigation. if (!valid_index) { OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; @@ -240,8 +240,8 @@ int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, const size_t msg_len = from_len - zero_index; if (msg_len > max_out) { - /* This shouldn't happen because this function is always called with - * |max_out| as the key size and |from_len| is bounded by the key size. */ + // This shouldn't happen because this function is always called with + // |max_out| as the key size and |from_len| is bounded by the key size. OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; } @@ -397,12 +397,12 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, size_t mdlen = EVP_MD_size(md); - /* The encoded message is one byte smaller than the modulus to ensure that it - * doesn't end up greater than the modulus. Thus there's an extra "+1" here - * compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. */ + // The encoded message is one byte smaller than the modulus to ensure that it + // doesn't end up greater than the modulus. Thus there's an extra "+1" here + // compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. if (from_len < 1 + 2*mdlen + 1) { - /* 'from_len' is the length of the modulus, i.e. does not depend on the - * particular ciphertext. */ + // 'from_len' is the length of the modulus, i.e. does not depend on the + // particular ciphertext. goto decoding_err; } @@ -470,8 +470,8 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, return 1; decoding_err: - /* to avoid chosen ciphertext attacks, the error message should not reveal - * which kind of decoding error happened */ + // to avoid chosen ciphertext attacks, the error message should not reveal + // which kind of decoding error happened OPENSSL_PUT_ERROR(RSA, RSA_R_OAEP_DECODING_ERROR); err: OPENSSL_free(db); @@ -499,10 +499,10 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, hLen = EVP_MD_size(Hash); - /* Negative sLen has special meanings: - * -1 sLen == hLen - * -2 salt length is autorecovered from signature - * -N reserved */ + // Negative sLen has special meanings: + // -1 sLen == hLen + // -2 salt length is autorecovered from signature + // -N reserved if (sLen == -1) { sLen = hLen; } else if (sLen == -2) { @@ -523,7 +523,7 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, emLen--; } if (emLen < (int)hLen + 2 || emLen < ((int)hLen + sLen + 2)) { - /* sLen can be small negative */ + // sLen can be small negative OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -612,10 +612,10 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, goto err; } - /* Negative sLenRequested has special meanings: - * -1 sLen == hLen - * -2 salt length is maximized - * -N reserved */ + // Negative sLenRequested has special meanings: + // -1 sLen == hLen + // -2 salt length is maximized + // -N reserved size_t sLen; if (sLenRequested == -1) { sLen = hLen; @@ -658,16 +658,16 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, goto err; } - /* Generate dbMask in place then perform XOR on it */ + // Generate dbMask in place then perform XOR on it if (!PKCS1_MGF1(EM, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } p = EM; - /* Initial PS XORs with all zeroes which is a NOP so just update - * pointer. Note from a test above this value is guaranteed to - * be non-negative. */ + // Initial PS XORs with all zeroes which is a NOP so just update + // pointer. Note from a test above this value is guaranteed to + // be non-negative. p += emLen - sLen - hLen - 2; *p++ ^= 0x1; if (sLen > 0) { @@ -679,7 +679,7 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, EM[0] &= 0xFF >> (8 - MSBits); } - /* H is already in place so just set final 0xbc */ + // H is already in place so just set final 0xbc EM[emLen - 1] = 0xbc; diff --git a/crypto/fipsmodule/rsa/rsa.c b/crypto/fipsmodule/rsa/rsa.c index a434cb1a..17348c1e 100644 --- a/crypto/fipsmodule/rsa/rsa.c +++ b/crypto/fipsmodule/rsa/rsa.c @@ -301,25 +301,25 @@ void *RSA_get_ex_data(const RSA *rsa, int idx) { return CRYPTO_get_ex_data(&rsa->ex_data, idx); } -/* SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's - * the length of an MD5 and SHA1 hash. */ +// SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's +// the length of an MD5 and SHA1 hash. static const unsigned SSL_SIG_LENGTH = 36; -/* pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is - * to be signed with PKCS#1. */ +// pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is +// to be signed with PKCS#1. struct pkcs1_sig_prefix { - /* nid identifies the hash function. */ + // nid identifies the hash function. int nid; - /* hash_len is the expected length of the hash function. */ + // hash_len is the expected length of the hash function. uint8_t hash_len; - /* len is the number of bytes of |bytes| which are valid. */ + // len is the number of bytes of |bytes| which are valid. uint8_t len; - /* bytes contains the DER bytes. */ + // bytes contains the DER bytes. uint8_t bytes[19]; }; -/* kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with - * different hash functions. */ +// kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with +// different hash functions. static const struct pkcs1_sig_prefix kPKCS1SigPrefixes[] = { { NID_md5, @@ -374,7 +374,7 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, unsigned i; if (hash_nid == NID_md5_sha1) { - /* Special case: SSL signature, just check the length. */ + // Special case: SSL signature, just check the length. if (msg_len != SSL_SIG_LENGTH) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; @@ -516,8 +516,8 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, goto out; } - /* Check that no other information follows the hash value (FIPS 186-4 Section - * 5.5) and it matches the expected hash. */ + // Check that no other information follows the hash value (FIPS 186-4 Section + // 5.5) and it matches the expected hash. if (len != signed_msg_len || OPENSSL_memcmp(buf, signed_msg, len) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); goto out; @@ -571,7 +571,7 @@ int RSA_check_key(const RSA *key) { int ok = 0, has_crt_values; if (RSA_is_opaque(key)) { - /* Opaque keys can't be checked. */ + // Opaque keys can't be checked. return 1; } @@ -586,8 +586,8 @@ int RSA_check_key(const RSA *key) { } if (!key->d || !key->p) { - /* For a public key, or without p and q, there's nothing that can be - * checked. */ + // For a public key, or without p and q, there's nothing that can be + // checked. return 1; } @@ -608,7 +608,7 @@ int RSA_check_key(const RSA *key) { BN_init(&iqmp_times_q); if (!BN_mul(&n, key->p, key->q, ctx) || - /* lcm = lcm(p, q) */ + // lcm = lcm(p, q) !BN_sub(&pm1, key->p, BN_value_one()) || !BN_sub(&qm1, key->q, BN_value_one()) || !BN_mul(&lcm, &pm1, &qm1, ctx) || @@ -619,7 +619,7 @@ int RSA_check_key(const RSA *key) { if (!BN_div(&lcm, NULL, &lcm, &gcd, ctx) || !BN_gcd(&gcd, &pm1, &qm1, ctx) || - /* de = d*e mod lcm(p, q). */ + // de = d*e mod lcm(p, q). !BN_mod_mul(&de, key->d, key->e, &lcm, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; @@ -643,11 +643,11 @@ int RSA_check_key(const RSA *key) { } if (has_crt_values) { - if (/* dmp1 = d mod (p-1) */ + if (// dmp1 = d mod (p-1) !BN_mod(&dmp1, key->d, &pm1, ctx) || - /* dmq1 = d mod (q-1) */ + // dmq1 = d mod (q-1) !BN_mod(&dmq1, key->d, &qm1, ctx) || - /* iqmp = q^-1 mod p */ + // iqmp = q^-1 mod p !BN_mod_mul(&iqmp_times_q, key->iqmp, key->q, key->p, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; @@ -680,7 +680,7 @@ out: } -/* This is the product of the 132 smallest odd primes, from 3 to 751. */ +// This is the product of the 132 smallest odd primes, from 3 to 751. static const BN_ULONG kSmallFactorsLimbs[] = { TOBN(0xc4309333, 0x3ef4e3e1), TOBN(0x71161eb6, 0xcd2d655f), TOBN(0x95e2238c, 0x0bf94862), TOBN(0x3eb233d3, 0x24f7912b), @@ -703,7 +703,7 @@ DEFINE_LOCAL_DATA(BIGNUM, g_small_factors) { int RSA_check_fips(RSA *key) { if (RSA_is_opaque(key)) { - /* Opaque keys can't be checked. */ + // Opaque keys can't be checked. OPENSSL_PUT_ERROR(RSA, RSA_R_PUBLIC_KEY_VALIDATION_FAILED); return 0; } @@ -723,7 +723,7 @@ int RSA_check_fips(RSA *key) { int ret = 1; - /* Perform partial public key validation of RSA keys (SP 800-89 5.3.3). */ + // Perform partial public key validation of RSA keys (SP 800-89 5.3.3). enum bn_primality_result_t primality_result; if (BN_num_bits(key->e) <= 16 || BN_num_bits(key->e) > 256 || @@ -742,15 +742,15 @@ int RSA_check_fips(RSA *key) { BN_CTX_free(ctx); if (!ret || key->d == NULL || key->p == NULL) { - /* On a failure or on only a public key, there's nothing else can be - * checked. */ + // On a failure or on only a public key, there's nothing else can be + // checked. return ret; } - /* FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG, - * section 9.9, it is not known whether |rsa| will be used for signing or - * encryption, so either pair-wise consistency self-test is acceptable. We - * perform a signing test. */ + // FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG, + // section 9.9, it is not known whether |rsa| will be used for signing or + // encryption, so either pair-wise consistency self-test is acceptable. We + // perform a signing test. uint8_t data[32] = {0}; unsigned sig_len = RSA_size(key); uint8_t *sig = OPENSSL_malloc(sig_len); diff --git a/crypto/fipsmodule/rsa/rsa_impl.c b/crypto/fipsmodule/rsa/rsa_impl.c index b126164d..f8cb9e36 100644 --- a/crypto/fipsmodule/rsa/rsa_impl.c +++ b/crypto/fipsmodule/rsa/rsa_impl.c @@ -80,15 +80,15 @@ static int check_modulus_and_exponent_sizes(const RSA *rsa) { return 0; } - /* Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as - * the limit based on the recommendations in [1] and [2]. Windows CryptoAPI - * doesn't support values larger than 32 bits [3], so it is unlikely that - * exponents larger than 32 bits are being used for anything Windows commonly - * does. - * - * [1] https://www.imperialviolet.org/2012/03/16/rsae.html - * [2] https://www.imperialviolet.org/2012/03/17/rsados.html - * [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx */ + // Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as + // the limit based on the recommendations in [1] and [2]. Windows CryptoAPI + // doesn't support values larger than 32 bits [3], so it is unlikely that + // exponents larger than 32 bits are being used for anything Windows commonly + // does. + // + // [1] https://www.imperialviolet.org/2012/03/16/rsae.html + // [2] https://www.imperialviolet.org/2012/03/17/rsados.html + // [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx static const unsigned kMaxExponentBits = 33; if (BN_num_bits(rsa->e) > kMaxExponentBits) { @@ -96,10 +96,10 @@ static int check_modulus_and_exponent_sizes(const RSA *rsa) { return 0; } - /* Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small - * shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits| - * is much smaller than the minimum RSA key size that any application should - * accept. */ + // Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small + // shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits| + // is much smaller than the minimum RSA key size that any application should + // accept. if (rsa_bits <= kMaxExponentBits) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; @@ -154,7 +154,7 @@ int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, i = RSA_padding_add_PKCS1_type_2(buf, rsa_size, in, in_len); break; case RSA_PKCS1_OAEP_PADDING: - /* Use the default parameters: SHA-1 for both hashes and no label. */ + // Use the default parameters: SHA-1 for both hashes and no label. i = RSA_padding_add_PKCS1_OAEP_mgf1(buf, rsa_size, in, in_len, NULL, 0, NULL, NULL); break; @@ -175,7 +175,7 @@ int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, } if (BN_ucmp(f, rsa->n) >= 0) { - /* usually the padding functions would catch this */ + // usually the padding functions would catch this OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -185,8 +185,8 @@ int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, goto err; } - /* put in leading 0 bytes if the number is less than the length of the - * modulus */ + // put in leading 0 bytes if the number is less than the length of the + // modulus if (!BN_bn2bin_padded(out, rsa_size, result)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; @@ -208,18 +208,18 @@ err: return ret; } -/* MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per - * RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and - * destroyed as needed. */ +// MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per +// RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and +// destroyed as needed. #define MAX_BLINDINGS_PER_RSA 1024 -/* rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by - * allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If - * none are free, the cache will be extended by a extra element and the new - * BN_BLINDING is returned. - * - * On success, the index of the assigned BN_BLINDING is written to - * |*index_used| and must be passed to |rsa_blinding_release| when finished. */ +// rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by +// allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If +// none are free, the cache will be extended by a extra element and the new +// BN_BLINDING is returned. +// +// On success, the index of the assigned BN_BLINDING is written to +// |*index_used| and must be passed to |rsa_blinding_release| when finished. static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, BN_CTX *ctx) { assert(ctx != NULL); @@ -249,8 +249,8 @@ static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, overflow = rsa->num_blindings >= MAX_BLINDINGS_PER_RSA; - /* We didn't find a free BN_BLINDING to use so increase the length of - * the arrays by one and use the newly created element. */ + // We didn't find a free BN_BLINDING to use so increase the length of + // the arrays by one and use the newly created element. CRYPTO_MUTEX_unlock_write(&rsa->lock); ret = BN_BLINDING_new(); @@ -259,8 +259,8 @@ static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, } if (overflow) { - /* We cannot add any more cached BN_BLINDINGs so we use |ret| - * and mark it for destruction in |rsa_blinding_release|. */ + // We cannot add any more cached BN_BLINDINGs so we use |ret| + // and mark it for destruction in |rsa_blinding_release|. *index_used = MAX_BLINDINGS_PER_RSA; return ret; } @@ -302,12 +302,12 @@ err1: return NULL; } -/* rsa_blinding_release marks the cached BN_BLINDING at the given index as free - * for other threads to use. */ +// rsa_blinding_release marks the cached BN_BLINDING at the given index as free +// for other threads to use. static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding, unsigned blinding_index) { if (blinding_index == MAX_BLINDINGS_PER_RSA) { - /* This blinding wasn't cached. */ + // This blinding wasn't cached. BN_BLINDING_free(blinding); return; } @@ -317,7 +317,7 @@ static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding, CRYPTO_MUTEX_unlock_write(&rsa->lock); } -/* signing */ +// signing int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { @@ -382,7 +382,7 @@ int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, if (padding == RSA_NO_PADDING) { buf = out; } else { - /* Allocate a temporary buffer to hold the padded plaintext. */ + // Allocate a temporary buffer to hold the padded plaintext. buf = OPENSSL_malloc(rsa_size); if (buf == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); @@ -405,7 +405,7 @@ int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, RSA_padding_check_PKCS1_type_2(out, out_len, rsa_size, buf, rsa_size); break; case RSA_PKCS1_OAEP_PADDING: - /* Use the default parameters: SHA-1 for both hashes and no label. */ + // Use the default parameters: SHA-1 for both hashes and no label. ret = RSA_padding_check_PKCS1_OAEP_mgf1(out, out_len, rsa_size, buf, rsa_size, NULL, 0, NULL, NULL); break; @@ -476,7 +476,7 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, if (padding == RSA_NO_PADDING) { buf = out; } else { - /* Allocate a temporary buffer to hold the padded plaintext. */ + // Allocate a temporary buffer to hold the padded plaintext. buf = OPENSSL_malloc(rsa_size); if (buf == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); @@ -562,7 +562,7 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, } if (BN_ucmp(f, rsa->n) >= 0) { - /* Usually the padding functions would catch this. */ + // Usually the padding functions would catch this. OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -575,10 +575,10 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, const int do_blinding = (rsa->flags & RSA_FLAG_NO_BLINDING) == 0; if (rsa->e == NULL && do_blinding) { - /* We cannot do blinding or verification without |e|, and continuing without - * those countermeasures is dangerous. However, the Java/Android RSA API - * requires support for keys where only |d| and |n| (and not |e|) are known. - * The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|. */ + // We cannot do blinding or verification without |e|, and continuing without + // those countermeasures is dangerous. However, the Java/Android RSA API + // requires support for keys where only |d| and |n| (and not |e|) are known. + // The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|. OPENSSL_PUT_ERROR(RSA, RSA_R_NO_PUBLIC_EXPONENT); goto err; } @@ -604,15 +604,15 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, goto err; } - /* Verify the result to protect against fault attacks as described in the - * 1997 paper "On the Importance of Checking Cryptographic Protocols for - * Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some - * implementations do this only when the CRT is used, but we do it in all - * cases. Section 6 of the aforementioned paper describes an attack that - * works when the CRT isn't used. That attack is much less likely to succeed - * than the CRT attack, but there have likely been improvements since 1997. - * - * This check is cheap assuming |e| is small; it almost always is. */ + // Verify the result to protect against fault attacks as described in the + // 1997 paper "On the Importance of Checking Cryptographic Protocols for + // Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some + // implementations do this only when the CRT is used, but we do it in all + // cases. Section 6 of the aforementioned paper describes an attack that + // works when the CRT isn't used. That attack is much less likely to succeed + // than the CRT attack, but there have likely been improvements since 1997. + // + // This check is cheap assuming |e| is small; it almost always is. if (rsa->e != NULL) { BIGNUM *vrfy = BN_CTX_get(ctx); if (vrfy == NULL || @@ -682,22 +682,22 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { goto err; } - /* compute I mod q */ + // compute I mod q if (!BN_mod(r1, I, rsa->q, ctx)) { goto err; } - /* compute r1^dmq1 mod q */ + // compute r1^dmq1 mod q if (!BN_mod_exp_mont_consttime(m1, r1, rsa->dmq1, rsa->q, ctx, rsa->mont_q)) { goto err; } - /* compute I mod p */ + // compute I mod p if (!BN_mod(r1, I, rsa->p, ctx)) { goto err; } - /* compute r1^dmp1 mod p */ + // compute r1^dmp1 mod p if (!BN_mod_exp_mont_consttime(r0, r1, rsa->dmp1, rsa->p, ctx, rsa->mont_p)) { goto err; } @@ -705,8 +705,8 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { if (!BN_sub(r0, r0, m1)) { goto err; } - /* This will help stop the size of r0 increasing, which does - * affect the multiply if it optimised for a power of 2 size */ + // This will help stop the size of r0 increasing, which does + // affect the multiply if it optimised for a power of 2 size if (BN_is_negative(r0)) { if (!BN_add(r0, r0, rsa->p)) { goto err; @@ -721,12 +721,12 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { goto err; } - /* If p < q it is occasionally possible for the correction of - * adding 'p' if r0 is negative above to leave the result still - * negative. This can break the private key operations: the following - * second correction should *always* correct this rare occurrence. - * This will *never* happen with OpenSSL generated keys because - * they ensure p > q [steve] */ + // If p < q it is occasionally possible for the correction of + // adding 'p' if r0 is negative above to leave the result still + // negative. This can break the private key operations: the following + // second correction should *always* correct this rare occurrence. + // This will *never* happen with OpenSSL generated keys because + // they ensure p > q [steve] if (BN_is_negative(r0)) { if (!BN_add(r0, r0, rsa->p)) { goto err; @@ -753,20 +753,19 @@ static int ensure_bignum(BIGNUM **out) { return *out != NULL; } -/* kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2¹⁵³⁵×√2⌋. This is - * chosen to give enough precision for 3072-bit RSA, the largest key size FIPS - * specifies. Key sizes beyond this will round up. - * - * To verify this number, check that n² < 2³⁰⁷¹ < (n+1)², where n is value - * represented here. Note the components are listed in little-endian order. Here - * is some sample Python code to check: - * - * >>> TOBN = lambda a, b: a << 32 | b - * >>> l = [ ] - * >>> n = sum(a * 2**(64*i) for i, a in enumerate(l)) - * >>> n**2 < 2**3071 < (n+1)**2 - * True - */ +// kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2¹⁵³⁵×√2⌋. This is +// chosen to give enough precision for 3072-bit RSA, the largest key size FIPS +// specifies. Key sizes beyond this will round up. +// +// To verify this number, check that n² < 2³⁰⁷¹ < (n+1)², where n is value +// represented here. Note the components are listed in little-endian order. Here +// is some sample Python code to check: +// +// >>> TOBN = lambda a, b: a << 32 | b +// >>> l = [ ] +// >>> n = sum(a * 2**(64*i) for i, a in enumerate(l)) +// >>> n**2 < 2**3071 < (n+1)**2 +// True const BN_ULONG kBoringSSLRSASqrtTwo[] = { TOBN(0xdea06241, 0xf7aa81c2), TOBN(0xf6a1be3f, 0xca221307), TOBN(0x332a5e9f, 0x7bda1ebf), TOBN(0x0104dc01, 0xfe32352f), @@ -787,7 +786,7 @@ int rsa_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len) { OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t), crypto_word_t_too_small); int ret = 0; - /* Process the words in little-endian order. */ + // Process the words in little-endian order. for (size_t i = 0; i < len; i++) { crypto_word_t eq = constant_time_eq_w(a[i], b[i]); crypto_word_t lt = constant_time_lt_w(a[i], b[i]); @@ -805,9 +804,9 @@ int rsa_greater_than_pow2(const BIGNUM *b, int n) { return b_bits > n + 1 || (b_bits == n + 1 && !BN_is_pow2(b)); } -/* generate_prime sets |out| to a prime with length |bits| such that |out|-1 is - * relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to - * |p|. */ +// generate_prime sets |out| to a prime with length |bits| such that |out|-1 is +// relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to +// |p|. static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, const BIGNUM *p, BN_CTX *ctx, BN_GENCB *cb) { if (bits < 128 || (bits % BN_BITS2) != 0) { @@ -815,7 +814,7 @@ static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, return 0; } - /* Ensure the bound on |tries| does not overflow. */ + // Ensure the bound on |tries| does not overflow. if (bits >= INT_MAX/5) { OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE); return 0; @@ -828,19 +827,19 @@ static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, goto err; } - /* See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is - * nlen/2. */ + // See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is + // nlen/2. for (;;) { - /* Generate a random number of length |bits| where the bottom bit is set - * (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the - * bound checked below in steps 4.4 and 5.5). */ + // Generate a random number of length |bits| where the bottom bit is set + // (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the + // bound checked below in steps 4.4 and 5.5). if (!BN_rand(out, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD) || !BN_GENCB_call(cb, BN_GENCB_GENERATED, rand_tries++)) { goto err; } if (p != NULL) { - /* If |p| and |out| are too close, try again (step 5.4). */ + // If |p| and |out| are too close, try again (step 5.4). if (!BN_sub(tmp, out, p)) { goto err; } @@ -850,21 +849,21 @@ static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, } } - /* If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5). - * - * We check the most significant words, so we retry if ⌊out/2^k⌋ <= ⌊b/2^k⌋, - * where b = 2^(bits-1)×√2 and k = max(0, bits - 1536). For key sizes up to - * 3072 (bits = 1536), k = 0, so we are testing that ⌊out⌋ <= ⌊b⌋. out is an - * integer and b is not, so this is equivalent to out < b. That is, the - * comparison is exact for FIPS key sizes. - * - * For larger keys, the comparison is approximate, leaning towards - * retrying. That is, we reject a negligible fraction of primes that are - * within the FIPS bound, but we will never accept a prime outside the - * bound, ensuring the resulting RSA key is the right size. Specifically, if - * the FIPS bound holds, we have ⌊out/2^k⌋ < out/2^k < b/2^k. This implies - * ⌊out/2^k⌋ <= ⌊b/2^k⌋. That is, the FIPS bound implies our bound and so we - * are slightly tighter. */ + // If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5). + // + // We check the most significant words, so we retry if ⌊out/2^k⌋ <= ⌊b/2^k⌋, + // where b = 2^(bits-1)×√2 and k = max(0, bits - 1536). For key sizes up to + // 3072 (bits = 1536), k = 0, so we are testing that ⌊out⌋ <= ⌊b⌋. out is an + // integer and b is not, so this is equivalent to out < b. That is, the + // comparison is exact for FIPS key sizes. + // + // For larger keys, the comparison is approximate, leaning towards + // retrying. That is, we reject a negligible fraction of primes that are + // within the FIPS bound, but we will never accept a prime outside the + // bound, ensuring the resulting RSA key is the right size. Specifically, if + // the FIPS bound holds, we have ⌊out/2^k⌋ < out/2^k < b/2^k. This implies + // ⌊out/2^k⌋ <= ⌊b/2^k⌋. That is, the FIPS bound implies our bound and so we + // are slightly tighter. size_t out_len = (size_t)out->top; assert(out_len == (size_t)bits / BN_BITS2); size_t to_check = kBoringSSLRSASqrtTwoLen; @@ -877,13 +876,13 @@ static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, continue; } - /* Check gcd(out-1, e) is one (steps 4.5 and 5.6). */ + // Check gcd(out-1, e) is one (steps 4.5 and 5.6). if (!BN_sub(tmp, out, BN_value_one()) || !BN_gcd(tmp, tmp, e, ctx)) { goto err; } if (BN_is_one(tmp)) { - /* Test |out| for primality (steps 4.5.1 and 5.6.1). */ + // Test |out| for primality (steps 4.5.1 and 5.6.1). int is_probable_prime; if (!BN_primality_test(&is_probable_prime, out, BN_prime_checks, ctx, 1, cb)) { @@ -895,8 +894,8 @@ static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, } } - /* If we've tried too many times to find a prime, abort (steps 4.7 and - * 5.8). */ + // If we've tried too many times to find a prime, abort (steps 4.7 and + // 5.8). tries++; if (tries >= bits * 5) { OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS); @@ -913,15 +912,15 @@ err: } int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { - /* See FIPS 186-4 appendix B.3. This function implements a generalized version - * of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks - * for FIPS-compliant key generation. */ + // See FIPS 186-4 appendix B.3. This function implements a generalized version + // of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks + // for FIPS-compliant key generation. - /* Always generate RSA keys which are a multiple of 128 bits. Round |bits| - * down as needed. */ + // Always generate RSA keys which are a multiple of 128 bits. Round |bits| + // down as needed. bits &= ~127; - /* Reject excessively small keys. */ + // Reject excessively small keys. if (bits < 256) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; @@ -941,7 +940,7 @@ int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { goto bn_err; } - /* We need the RSA components non-NULL. */ + // We need the RSA components non-NULL. if (!ensure_bignum(&rsa->n) || !ensure_bignum(&rsa->d) || !ensure_bignum(&rsa->e) || @@ -959,8 +958,8 @@ int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { int prime_bits = bits / 2; do { - /* Generate p and q, each of size |prime_bits|, using the steps outlined in - * appendix FIPS 186-4 appendix B.3.3. */ + // Generate p and q, each of size |prime_bits|, using the steps outlined in + // appendix FIPS 186-4 appendix B.3.3. if (!generate_prime(rsa->p, prime_bits, rsa->e, NULL, ctx, cb) || !BN_GENCB_call(cb, 3, 0) || !generate_prime(rsa->q, prime_bits, rsa->e, rsa->p, ctx, cb) || @@ -974,13 +973,13 @@ int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { rsa->q = tmp; } - /* Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs - * from typical RSA implementations which use (p-1)*(q-1). - * - * Note this means the size of d might reveal information about p-1 and - * q-1. However, we do operations with Chinese Remainder Theorem, so we only - * use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient - * does not affect those two values. */ + // Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs + // from typical RSA implementations which use (p-1)*(q-1). + // + // Note this means the size of d might reveal information about p-1 and + // q-1. However, we do operations with Chinese Remainder Theorem, so we only + // use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient + // does not affect those two values. if (!BN_sub(pm1, rsa->p, BN_value_one()) || !BN_sub(qm1, rsa->q, BN_value_one()) || !BN_mul(totient, pm1, qm1, ctx) || @@ -990,39 +989,39 @@ int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { goto bn_err; } - /* Check that |rsa->d| > 2^|prime_bits| and try again if it fails. See - * appendix B.3.1's guidance on values for d. */ + // Check that |rsa->d| > 2^|prime_bits| and try again if it fails. See + // appendix B.3.1's guidance on values for d. } while (!rsa_greater_than_pow2(rsa->d, prime_bits)); - if (/* Calculate n. */ + if (// Calculate n. !BN_mul(rsa->n, rsa->p, rsa->q, ctx) || - /* Calculate d mod (p-1). */ + // Calculate d mod (p-1). !BN_mod(rsa->dmp1, rsa->d, pm1, ctx) || - /* Calculate d mod (q-1) */ + // Calculate d mod (q-1) !BN_mod(rsa->dmq1, rsa->d, qm1, ctx)) { goto bn_err; } - /* Sanity-check that |rsa->n| has the specified size. This is implied by - * |generate_prime|'s bounds. */ + // Sanity-check that |rsa->n| has the specified size. This is implied by + // |generate_prime|'s bounds. if (BN_num_bits(rsa->n) != (unsigned)bits) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } - /* Calculate inverse of q mod p. Note that although RSA key generation is far - * from constant-time, |bn_mod_inverse_secret_prime| uses the same modular - * exponentation logic as in RSA private key operations and, if the RSAZ-1024 - * code is enabled, will be optimized for common RSA prime sizes. */ + // Calculate inverse of q mod p. Note that although RSA key generation is far + // from constant-time, |bn_mod_inverse_secret_prime| uses the same modular + // exponentation logic as in RSA private key operations and, if the RSAZ-1024 + // code is enabled, will be optimized for common RSA prime sizes. if (!BN_MONT_CTX_set_locked(&rsa->mont_p, &rsa->lock, rsa->p, ctx) || !bn_mod_inverse_secret_prime(rsa->iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { goto bn_err; } - /* The key generation process is complex and thus error-prone. It could be - * disastrous to generate and then use a bad key so double-check that the key - * makes sense. */ + // The key generation process is complex and thus error-prone. It could be + // disastrous to generate and then use a bad key so double-check that the key + // makes sense. if (!RSA_check_key(rsa)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INTERNAL_ERROR); goto err; @@ -1043,8 +1042,8 @@ err: } int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb) { - /* FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit - * primes, respectively) with the prime generation method we use. */ + // FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit + // primes, respectively) with the prime generation method we use. if (bits != 2048 && bits != 3072) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); return 0; @@ -1060,9 +1059,9 @@ int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb) { } DEFINE_METHOD_FUNCTION(RSA_METHOD, RSA_default_method) { - /* All of the methods are NULL to make it easier for the compiler/linker to - * drop unused functions. The wrapper functions will select the appropriate - * |rsa_default_*| implementation. */ + // All of the methods are NULL to make it easier for the compiler/linker to + // drop unused functions. The wrapper functions will select the appropriate + // |rsa_default_*| implementation. OPENSSL_memset(out, 0, sizeof(RSA_METHOD)); out->common.is_static = 1; out->flags = RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE; diff --git a/crypto/fipsmodule/sha/sha1-altivec.c b/crypto/fipsmodule/sha/sha1-altivec.c index 14e2bae9..3152827a 100644 --- a/crypto/fipsmodule/sha/sha1-altivec.c +++ b/crypto/fipsmodule/sha/sha1-altivec.c @@ -54,14 +54,14 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -/* Altivec-optimized SHA1 in C. This is tested on ppc64le only. - * - * References: - * https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 - * http://arctic.org/~dean/crypto/sha1.html - * - * This code used the generic SHA-1 from OpenSSL as a basis and AltiVec - * optimisations were added on top. */ +// Altivec-optimized SHA1 in C. This is tested on ppc64le only. +// +// References: +// https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 +// http://arctic.org/~dean/crypto/sha1.html +// +// This code used the generic SHA-1 from OpenSSL as a basis and AltiVec +// optimisations were added on top. #include @@ -76,11 +76,11 @@ static uint32_t rotate(uint32_t a, int n) { return (a << n) | (a >> (32 - n)); } typedef vector unsigned int vec_uint32_t; typedef vector unsigned char vec_uint8_t; -/* Vector constants */ +// Vector constants static const vec_uint8_t k_swap_endianness = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}; -/* Shift amounts for byte and bit shifts and rotations */ +// Shift amounts for byte and bit shifts and rotations static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}; static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96, @@ -91,18 +91,18 @@ static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96, #define K_40_59 0x8f1bbcdcUL #define K_60_79 0xca62c1d6UL -/* Vector versions of the above. */ +// Vector versions of the above. static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19}; static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39}; static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59}; static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79}; -/* vector message scheduling: compute message schedule for round i..i+3 where i - * is divisible by 4. We return the schedule w[i..i+3] as a vector. In - * addition, we also precompute sum w[i..+3] and an additive constant K. This - * is done to offload some computation of f() in the integer execution units. - * - * Byte shifting code below may not be correct for big-endian systems. */ +// vector message scheduling: compute message schedule for round i..i+3 where i +// is divisible by 4. We return the schedule w[i..i+3] as a vector. In +// addition, we also precompute sum w[i..+3] and an additive constant K. This +// is done to offload some computation of f() in the integer execution units. +// +// Byte shifting code below may not be correct for big-endian systems. static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data, vec_uint32_t k) { const vector unsigned char unaligned_data = @@ -113,17 +113,17 @@ static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data, return w; } -/* Compute w[i..i+3] using these steps for i in [16, 20, 24, 28] - * - * w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1 - * w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1 - * w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1 - * w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1 - * - * w[ i] = w'[ i] - * w[i+1] = w'[i+1] - * w[i+2] = w'[i+2] - * w[i+3] = w'[i+3] ^ (w'[i] <<< 1) */ +// Compute w[i..i+3] using these steps for i in [16, 20, 24, 28] +// +// w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1 +// w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1 +// w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1 +// w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1 +// +// w[ i] = w'[ i] +// w[i+1] = w'[i+1] +// w[i+2] = w'[i+2] +// w[i+3] = w'[i+3] ^ (w'[i] <<< 1) static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4, vec_uint32_t minus_8, vec_uint32_t minus_12, vec_uint32_t minus_16, vec_uint32_t k) { @@ -138,8 +138,8 @@ static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4, return w; } -/* Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76] - * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 */ +// Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76] +// w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4, vec_uint32_t minus_8, vec_uint32_t minus_16, vec_uint32_t minus_28, vec_uint32_t minus_32, @@ -152,17 +152,17 @@ static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4, return w; } -/* As pointed out by Wei Dai , F() below can be simplified - * to the code in F_00_19. Wei attributes these optimisations to Peter - * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define - * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another - * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */ +// As pointed out by Wei Dai , F() below can be simplified +// to the code in F_00_19. Wei attributes these optimisations to Peter +// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define +// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another +// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) #define F_60_79(b, c, d) F_20_39(b, c, d) -/* We pre-added the K constants during message scheduling. */ +// We pre-added the K constants during message scheduling. #define BODY_00_19(i, a, b, c, d, e, f) \ do { \ (f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \ @@ -318,7 +318,7 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { BODY_60_79(74, E, T, A, B, C, D); BODY_60_79(75, D, E, T, A, B, C); - /* We don't use the last value */ + // We don't use the last value (void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k); BODY_60_79(76, C, D, E, T, A, B); BODY_60_79(77, B, C, D, E, T, A); @@ -345,7 +345,7 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { } } -#endif /* OPENSSL_PPC64LE */ +#endif // OPENSSL_PPC64LE #undef K_00_19 #undef K_20_39 diff --git a/crypto/fipsmodule/sha/sha1.c b/crypto/fipsmodule/sha/sha1.c index 7b445632..7ce01938 100644 --- a/crypto/fipsmodule/sha/sha1.c +++ b/crypto/fipsmodule/sha/sha1.c @@ -131,11 +131,11 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #define K_40_59 0x8f1bbcdcUL #define K_60_79 0xca62c1d6UL -/* As pointed out by Wei Dai , F() below can be simplified - * to the code in F_00_19. Wei attributes these optimisations to Peter - * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define - * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another - * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */ +// As pointed out by Wei Dai , F() below can be simplified +// to the code in F_00_19. Wei attributes these optimisations to Peter +// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define +// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another +// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) diff --git a/crypto/fipsmodule/sha/sha256.c b/crypto/fipsmodule/sha/sha256.c index cd6becba..6d709a67 100644 --- a/crypto/fipsmodule/sha/sha256.c +++ b/crypto/fipsmodule/sha/sha256.c @@ -128,15 +128,15 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) { #define HASH_CTX SHA256_CTX #define HASH_CBLOCK 64 -/* Note that FIPS180-2 discusses "Truncation of the Hash Function Output." - * default: case below covers for it. It's not clear however if it's permitted - * to truncate to amount of bytes not divisible by 4. I bet not, but if it is, - * then default: case shall be extended. For reference. Idea behind separate - * cases for pre-defined lenghts is to let the compiler decide if it's - * appropriate to unroll small loops. - * - * TODO(davidben): The small |md_len| case is one of the few places a low-level - * hash 'final' function can fail. This should never happen. */ +// Note that FIPS180-2 discusses "Truncation of the Hash Function Output." +// default: case below covers for it. It's not clear however if it's permitted +// to truncate to amount of bytes not divisible by 4. I bet not, but if it is, +// then default: case shall be extended. For reference. Idea behind separate +// cases for pre-defined lenghts is to let the compiler decide if it's +// appropriate to unroll small loops. +// +// TODO(davidben): The small |md_len| case is one of the few places a low-level +// hash 'final' function can fail. This should never happen. #define HASH_MAKE_STRING(c, s) \ do { \ uint32_t ll; \ @@ -196,9 +196,9 @@ static const uint32_t K256[64] = { #define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n)))) -/* FIPS specification refers to right rotations, while our ROTATE macro - * is left one. This is why you might notice that rotation coefficients - * differ from those observed in FIPS document by 32-N... */ +// FIPS specification refers to right rotations, while our ROTATE macro +// is left one. This is why you might notice that rotation coefficients +// differ from those observed in FIPS document by 32-N... #define Sigma0(x) (ROTATE((x), 30) ^ ROTATE((x), 19) ^ ROTATE((x), 10)) #define Sigma1(x) (ROTATE((x), 26) ^ ROTATE((x), 21) ^ ROTATE((x), 7)) #define sigma0(x) (ROTATE((x), 25) ^ ROTATE((x), 14) ^ ((x) >> 3)) @@ -314,7 +314,7 @@ static void sha256_block_data_order(uint32_t *state, const uint8_t *data, } } -#endif /* !SHA256_ASM */ +#endif // !SHA256_ASM #undef DATA_ORDER_IS_BIG_ENDIAN #undef HASH_CTX diff --git a/crypto/fipsmodule/sha/sha512.c b/crypto/fipsmodule/sha/sha512.c index 6e1f79ba..3902f50e 100644 --- a/crypto/fipsmodule/sha/sha512.c +++ b/crypto/fipsmodule/sha/sha512.c @@ -63,17 +63,17 @@ #include "../../internal.h" -/* IMPLEMENTATION NOTES. - * - * The 32-bit hash algorithms share a common byte-order neutral collector and - * padding function implementations that operate on unaligned data, - * ../md32_common.h. This SHA-512 implementation does not. Reasons - * [in reverse order] are: - * - * - It's the only 64-bit hash algorithm for the moment of this writing, - * there is no need for common collector/padding implementation [yet]; - * - By supporting only a transform function that operates on *aligned* data - * the collector/padding function is simpler and easier to optimize. */ +// IMPLEMENTATION NOTES. +// +// The 32-bit hash algorithms share a common byte-order neutral collector and +// padding function implementations that operate on unaligned data, +// ../md32_common.h. This SHA-512 implementation does not. Reasons +// [in reverse order] are: +// +// - It's the only 64-bit hash algorithm for the moment of this writing, +// there is no need for common collector/padding implementation [yet]; +// - By supporting only a transform function that operates on *aligned* data +// the collector/padding function is simpler and easier to optimize. #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ @@ -227,7 +227,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { uint8_t *p = (uint8_t *)sha->u.p; size_t n = sha->num; - p[n] = 0x80; /* There always is a room for one */ + p[n] = 0x80; // There always is a room for one n++; if (n > (sizeof(sha->u) - 16)) { OPENSSL_memset(p + n, 0, sizeof(sha->u) - n); @@ -256,13 +256,13 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { sha512_block_data_order(sha->h, (uint64_t *)p, 1); if (md == NULL) { - /* TODO(davidben): This NULL check is absent in other low-level hash 'final' - * functions and is one of the few places one can fail. */ + // TODO(davidben): This NULL check is absent in other low-level hash 'final' + // functions and is one of the few places one can fail. return 0; } switch (sha->md_len) { - /* Let compiler decide if it's appropriate to unroll... */ + // Let compiler decide if it's appropriate to unroll... case SHA384_DIGEST_LENGTH: for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) { uint64_t t = sha->h[n]; @@ -291,10 +291,10 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { *(md++) = (uint8_t)(t); } break; - /* ... as well as make sure md_len is not abused. */ + // ... as well as make sure md_len is not abused. default: - /* TODO(davidben): This bad |md_len| case is one of the few places a - * low-level hash 'final' function can fail. This should never happen. */ + // TODO(davidben): This bad |md_len| case is one of the few places a + // low-level hash 'final' function can fail. This should never happen. return 0; } @@ -392,7 +392,7 @@ static const uint64_t K512[80] = { #endif #endif #elif defined(_MSC_VER) -#if defined(_WIN64) /* applies to both IA-64 and AMD64 */ +#if defined(_WIN64) // applies to both IA-64 and AMD64 #pragma intrinsic(_rotr64) #define ROTR(a, n) _rotr64((a), n) #endif @@ -432,10 +432,8 @@ static uint64_t __fastcall __pull64be(const void *x) { #if defined(__i386) || defined(__i386__) || defined(_M_IX86) -/* - * This code should give better results on 32-bit CPU with less than - * ~24 registers, both size and performance wise... - */ +// This code should give better results on 32-bit CPU with less than +// ~24 registers, both size and performance wise... static void sha512_block_data_order(uint64_t *state, const uint64_t *W, size_t num) { uint64_t A, E, T; @@ -593,7 +591,7 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W, #endif -#endif /* !SHA512_ASM */ +#endif // !SHA512_ASM #undef ROTR #undef PULL64 diff --git a/crypto/hkdf/hkdf.c b/crypto/hkdf/hkdf.c index ae43b69f..23b60afe 100644 --- a/crypto/hkdf/hkdf.c +++ b/crypto/hkdf/hkdf.c @@ -26,7 +26,7 @@ int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len, const uint8_t *info, size_t info_len) { - /* https://tools.ietf.org/html/rfc5869#section-2 */ + // https://tools.ietf.org/html/rfc5869#section-2 uint8_t prk[EVP_MAX_MD_SIZE]; size_t prk_len; @@ -42,10 +42,10 @@ int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len) { - /* https://tools.ietf.org/html/rfc5869#section-2.2 */ + // https://tools.ietf.org/html/rfc5869#section-2.2 - /* If salt is not given, HashLength zeros are used. However, HMAC does that - * internally already so we can ignore it.*/ + // If salt is not given, HashLength zeros are used. However, HMAC does that + // internally already so we can ignore it. unsigned len; if (HMAC(digest, salt, salt_len, secret, secret_len, out_key, &len) == NULL) { OPENSSL_PUT_ERROR(HKDF, ERR_R_HMAC_LIB); @@ -59,7 +59,7 @@ int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *prk, size_t prk_len, const uint8_t *info, size_t info_len) { - /* https://tools.ietf.org/html/rfc5869#section-2.3 */ + // https://tools.ietf.org/html/rfc5869#section-2.3 const size_t digest_len = EVP_MD_size(digest); uint8_t previous[EVP_MAX_MD_SIZE]; size_t n, done = 0; @@ -67,7 +67,7 @@ int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, int ret = 0; HMAC_CTX hmac; - /* Expand key material to desired length. */ + // Expand key material to desired length. n = (out_len + digest_len - 1) / digest_len; if (out_len + digest_len < out_len || n > 255) { OPENSSL_PUT_ERROR(HKDF, HKDF_R_OUTPUT_TOO_LARGE); diff --git a/crypto/hkdf/hkdf_test.cc b/crypto/hkdf/hkdf_test.cc index b763b9b4..53526859 100644 --- a/crypto/hkdf/hkdf_test.cc +++ b/crypto/hkdf/hkdf_test.cc @@ -35,7 +35,7 @@ struct HKDFTestVector { const uint8_t out[82]; }; -/* These test vectors are from RFC 5869. */ +// These test vectors are from RFC 5869. static const HKDFTestVector kTests[] = { { EVP_sha256, diff --git a/crypto/internal.h b/crypto/internal.h index 83da68e7..87b69dab 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -146,7 +146,7 @@ extern "C" { #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) || \ defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE) -/* OPENSSL_cpuid_setup initializes the platform-specific feature cache. */ +// OPENSSL_cpuid_setup initializes the platform-specific feature cache. void OPENSSL_cpuid_setup(void); #endif @@ -158,42 +158,42 @@ typedef __uint128_t uint128_t; #define OPENSSL_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) -/* buffers_alias returns one if |a| and |b| alias and zero otherwise. */ +// buffers_alias returns one if |a| and |b| alias and zero otherwise. static inline int buffers_alias(const uint8_t *a, size_t a_len, const uint8_t *b, size_t b_len) { - /* Cast |a| and |b| to integers. In C, pointer comparisons between unrelated - * objects are undefined whereas pointer to integer conversions are merely - * implementation-defined. We assume the implementation defined it in a sane - * way. */ + // Cast |a| and |b| to integers. In C, pointer comparisons between unrelated + // objects are undefined whereas pointer to integer conversions are merely + // implementation-defined. We assume the implementation defined it in a sane + // way. uintptr_t a_u = (uintptr_t)a; uintptr_t b_u = (uintptr_t)b; return a_u + a_len > b_u && b_u + b_len > a_u; } -/* Constant-time utility functions. - * - * The following methods return a bitmask of all ones (0xff...f) for true and 0 - * for false. This is useful for choosing a value based on the result of a - * conditional in constant time. For example, - * - * if (a < b) { - * c = a; - * } else { - * c = b; - * } - * - * can be written as - * - * crypto_word_t lt = constant_time_lt_w(a, b); - * c = constant_time_select_w(lt, a, b); */ - -/* crypto_word_t is the type that most constant-time functions use. Ideally we - * would like it to be |size_t|, but NaCl builds in 64-bit mode with 32-bit - * pointers, which means that |size_t| can be 32 bits when |BN_ULONG| is 64 - * bits. Since we want to be able to do constant-time operations on a - * |BN_ULONG|, |crypto_word_t| is defined as an unsigned value with the native - * word length. */ +// Constant-time utility functions. +// +// The following methods return a bitmask of all ones (0xff...f) for true and 0 +// for false. This is useful for choosing a value based on the result of a +// conditional in constant time. For example, +// +// if (a < b) { +// c = a; +// } else { +// c = b; +// } +// +// can be written as +// +// crypto_word_t lt = constant_time_lt_w(a, b); +// c = constant_time_select_w(lt, a, b); + +// crypto_word_t is the type that most constant-time functions use. Ideally we +// would like it to be |size_t|, but NaCl builds in 64-bit mode with 32-bit +// pointers, which means that |size_t| can be 32 bits when |BN_ULONG| is 64 +// bits. Since we want to be able to do constant-time operations on a +// |BN_ULONG|, |crypto_word_t| is defined as an unsigned value with the native +// word length. #if defined(OPENSSL_64_BIT) typedef uint64_t crypto_word_t; #elif defined(OPENSSL_32_BIT) @@ -211,139 +211,137 @@ typedef uint32_t crypto_word_t; #define CONSTTIME_TRUE_8 ((uint8_t)0xff) #define CONSTTIME_FALSE_8 ((uint8_t)0) -/* constant_time_msb_w returns the given value with the MSB copied to all the - * other bits. */ +// constant_time_msb_w returns the given value with the MSB copied to all the +// other bits. static inline crypto_word_t constant_time_msb_w(crypto_word_t a) { return 0u - (a >> (sizeof(a) * 8 - 1)); } -/* constant_time_lt_w returns 0xff..f if a < b and 0 otherwise. */ +// constant_time_lt_w returns 0xff..f if a < b and 0 otherwise. static inline crypto_word_t constant_time_lt_w(crypto_word_t a, crypto_word_t b) { - /* Consider the two cases of the problem: - * msb(a) == msb(b): a < b iff the MSB of a - b is set. - * msb(a) != msb(b): a < b iff the MSB of b is set. - * - * If msb(a) == msb(b) then the following evaluates as: - * msb(a^((a^b)|((a-b)^a))) == - * msb(a^((a-b) ^ a)) == (because msb(a^b) == 0) - * msb(a^a^(a-b)) == (rearranging) - * msb(a-b) (because ∀x. x^x == 0) - * - * Else, if msb(a) != msb(b) then the following evaluates as: - * msb(a^((a^b)|((a-b)^a))) == - * msb(a^(𝟙 | ((a-b)^a))) == (because msb(a^b) == 1 and 𝟙 - * represents a value s.t. msb(𝟙) = 1) - * msb(a^𝟙) == (because ORing with 1 results in 1) - * msb(b) - * - * - * Here is an SMT-LIB verification of this formula: - * - * (define-fun lt ((a (_ BitVec 32)) (b (_ BitVec 32))) (_ BitVec 32) - * (bvxor a (bvor (bvxor a b) (bvxor (bvsub a b) a))) - * ) - * - * (declare-fun a () (_ BitVec 32)) - * (declare-fun b () (_ BitVec 32)) - * - * (assert (not (= (= #x00000001 (bvlshr (lt a b) #x0000001f)) (bvult a b)))) - * (check-sat) - * (get-model) - */ + // Consider the two cases of the problem: + // msb(a) == msb(b): a < b iff the MSB of a - b is set. + // msb(a) != msb(b): a < b iff the MSB of b is set. + // + // If msb(a) == msb(b) then the following evaluates as: + // msb(a^((a^b)|((a-b)^a))) == + // msb(a^((a-b) ^ a)) == (because msb(a^b) == 0) + // msb(a^a^(a-b)) == (rearranging) + // msb(a-b) (because ∀x. x^x == 0) + // + // Else, if msb(a) != msb(b) then the following evaluates as: + // msb(a^((a^b)|((a-b)^a))) == + // msb(a^(𝟙 | ((a-b)^a))) == (because msb(a^b) == 1 and 𝟙 + // represents a value s.t. msb(𝟙) = 1) + // msb(a^𝟙) == (because ORing with 1 results in 1) + // msb(b) + // + // + // Here is an SMT-LIB verification of this formula: + // + // (define-fun lt ((a (_ BitVec 32)) (b (_ BitVec 32))) (_ BitVec 32) + // (bvxor a (bvor (bvxor a b) (bvxor (bvsub a b) a))) + // ) + // + // (declare-fun a () (_ BitVec 32)) + // (declare-fun b () (_ BitVec 32)) + // + // (assert (not (= (= #x00000001 (bvlshr (lt a b) #x0000001f)) (bvult a b)))) + // (check-sat) + // (get-model) return constant_time_msb_w(a^((a^b)|((a-b)^a))); } -/* constant_time_lt_8 acts like |constant_time_lt_w| but returns an 8-bit - * mask. */ +// constant_time_lt_8 acts like |constant_time_lt_w| but returns an 8-bit +// mask. static inline uint8_t constant_time_lt_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_lt_w(a, b)); } -/* constant_time_ge_w returns 0xff..f if a >= b and 0 otherwise. */ +// constant_time_ge_w returns 0xff..f if a >= b and 0 otherwise. static inline crypto_word_t constant_time_ge_w(crypto_word_t a, crypto_word_t b) { return ~constant_time_lt_w(a, b); } -/* constant_time_ge_8 acts like |constant_time_ge_w| but returns an 8-bit - * mask. */ +// constant_time_ge_8 acts like |constant_time_ge_w| but returns an 8-bit +// mask. static inline uint8_t constant_time_ge_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_ge_w(a, b)); } -/* constant_time_is_zero returns 0xff..f if a == 0 and 0 otherwise. */ +// constant_time_is_zero returns 0xff..f if a == 0 and 0 otherwise. static inline crypto_word_t constant_time_is_zero_w(crypto_word_t a) { - /* Here is an SMT-LIB verification of this formula: - * - * (define-fun is_zero ((a (_ BitVec 32))) (_ BitVec 32) - * (bvand (bvnot a) (bvsub a #x00000001)) - * ) - * - * (declare-fun a () (_ BitVec 32)) - * - * (assert (not (= (= #x00000001 (bvlshr (is_zero a) #x0000001f)) (= a #x00000000)))) - * (check-sat) - * (get-model) - */ + // Here is an SMT-LIB verification of this formula: + // + // (define-fun is_zero ((a (_ BitVec 32))) (_ BitVec 32) + // (bvand (bvnot a) (bvsub a #x00000001)) + // ) + // + // (declare-fun a () (_ BitVec 32)) + // + // (assert (not (= (= #x00000001 (bvlshr (is_zero a) #x0000001f)) (= a #x00000000)))) + // (check-sat) + // (get-model) return constant_time_msb_w(~a & (a - 1)); } -/* constant_time_is_zero_8 acts like |constant_time_is_zero_w| but returns an - * 8-bit mask. */ +// constant_time_is_zero_8 acts like |constant_time_is_zero_w| but returns an +// 8-bit mask. static inline uint8_t constant_time_is_zero_8(crypto_word_t a) { return (uint8_t)(constant_time_is_zero_w(a)); } -/* constant_time_eq_w returns 0xff..f if a == b and 0 otherwise. */ +// constant_time_eq_w returns 0xff..f if a == b and 0 otherwise. static inline crypto_word_t constant_time_eq_w(crypto_word_t a, crypto_word_t b) { return constant_time_is_zero_w(a ^ b); } -/* constant_time_eq_8 acts like |constant_time_eq_w| but returns an 8-bit - * mask. */ +// constant_time_eq_8 acts like |constant_time_eq_w| but returns an 8-bit +// mask. static inline uint8_t constant_time_eq_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_eq_w(a, b)); } -/* constant_time_eq_int acts like |constant_time_eq_w| but works on int - * values. */ +// constant_time_eq_int acts like |constant_time_eq_w| but works on int +// values. static inline crypto_word_t constant_time_eq_int(int a, int b) { return constant_time_eq_w((crypto_word_t)(a), (crypto_word_t)(b)); } -/* constant_time_eq_int_8 acts like |constant_time_eq_int| but returns an 8-bit - * mask. */ +// constant_time_eq_int_8 acts like |constant_time_eq_int| but returns an 8-bit +// mask. static inline uint8_t constant_time_eq_int_8(int a, int b) { return constant_time_eq_8((crypto_word_t)(a), (crypto_word_t)(b)); } -/* constant_time_select_w returns (mask & a) | (~mask & b). When |mask| is all - * 1s or all 0s (as returned by the methods above), the select methods return - * either |a| (if |mask| is nonzero) or |b| (if |mask| is zero). */ +// constant_time_select_w returns (mask & a) | (~mask & b). When |mask| is all +// 1s or all 0s (as returned by the methods above), the select methods return +// either |a| (if |mask| is nonzero) or |b| (if |mask| is zero). static inline crypto_word_t constant_time_select_w(crypto_word_t mask, crypto_word_t a, crypto_word_t b) { return (mask & a) | (~mask & b); } -/* constant_time_select_8 acts like |constant_time_select| but operates on - * 8-bit values. */ +// constant_time_select_8 acts like |constant_time_select| but operates on +// 8-bit values. static inline uint8_t constant_time_select_8(uint8_t mask, uint8_t a, uint8_t b) { return (uint8_t)(constant_time_select_w(mask, a, b)); } -/* constant_time_select_int acts like |constant_time_select| but operates on - * ints. */ +// constant_time_select_int acts like |constant_time_select| but operates on +// ints. static inline int constant_time_select_int(crypto_word_t mask, int a, int b) { return (int)(constant_time_select_w(mask, (crypto_word_t)(a), (crypto_word_t)(b))); } -/* Thread-safe initialisation. */ +// Thread-safe initialisation. #if defined(OPENSSL_NO_THREADS) typedef uint32_t CRYPTO_once_t; @@ -358,52 +356,52 @@ typedef pthread_once_t CRYPTO_once_t; #error "Unknown threading library" #endif -/* CRYPTO_once calls |init| exactly once per process. This is thread-safe: if - * concurrent threads call |CRYPTO_once| with the same |CRYPTO_once_t| argument - * then they will block until |init| completes, but |init| will have only been - * called once. - * - * The |once| argument must be a |CRYPTO_once_t| that has been initialised with - * the value |CRYPTO_ONCE_INIT|. */ +// CRYPTO_once calls |init| exactly once per process. This is thread-safe: if +// concurrent threads call |CRYPTO_once| with the same |CRYPTO_once_t| argument +// then they will block until |init| completes, but |init| will have only been +// called once. +// +// The |once| argument must be a |CRYPTO_once_t| that has been initialised with +// the value |CRYPTO_ONCE_INIT|. OPENSSL_EXPORT void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)); -/* Reference counting. */ +// Reference counting. -/* CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. */ +// CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. #define CRYPTO_REFCOUNT_MAX 0xffffffff -/* CRYPTO_refcount_inc atomically increments the value at |*count| unless the - * value would overflow. It's safe for multiple threads to concurrently call - * this or |CRYPTO_refcount_dec_and_test_zero| on the same - * |CRYPTO_refcount_t|. */ +// CRYPTO_refcount_inc atomically increments the value at |*count| unless the +// value would overflow. It's safe for multiple threads to concurrently call +// this or |CRYPTO_refcount_dec_and_test_zero| on the same +// |CRYPTO_refcount_t|. OPENSSL_EXPORT void CRYPTO_refcount_inc(CRYPTO_refcount_t *count); -/* CRYPTO_refcount_dec_and_test_zero tests the value at |*count|: - * if it's zero, it crashes the address space. - * if it's the maximum value, it returns zero. - * otherwise, it atomically decrements it and returns one iff the resulting - * value is zero. - * - * It's safe for multiple threads to concurrently call this or - * |CRYPTO_refcount_inc| on the same |CRYPTO_refcount_t|. */ +// CRYPTO_refcount_dec_and_test_zero tests the value at |*count|: +// if it's zero, it crashes the address space. +// if it's the maximum value, it returns zero. +// otherwise, it atomically decrements it and returns one iff the resulting +// value is zero. +// +// It's safe for multiple threads to concurrently call this or +// |CRYPTO_refcount_inc| on the same |CRYPTO_refcount_t|. OPENSSL_EXPORT int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *count); -/* Locks. - * - * Two types of locks are defined: |CRYPTO_MUTEX|, which can be used in - * structures as normal, and |struct CRYPTO_STATIC_MUTEX|, which can be used as - * a global lock. A global lock must be initialised to the value - * |CRYPTO_STATIC_MUTEX_INIT|. - * - * |CRYPTO_MUTEX| can appear in public structures and so is defined in - * thread.h as a structure large enough to fit the real type. The global lock is - * a different type so it may be initialized with platform initializer macros.*/ +// Locks. +// +// Two types of locks are defined: |CRYPTO_MUTEX|, which can be used in +// structures as normal, and |struct CRYPTO_STATIC_MUTEX|, which can be used as +// a global lock. A global lock must be initialised to the value +// |CRYPTO_STATIC_MUTEX_INIT|. +// +// |CRYPTO_MUTEX| can appear in public structures and so is defined in +// thread.h as a structure large enough to fit the real type. The global lock is +// a different type so it may be initialized with platform initializer macros. #if defined(OPENSSL_NO_THREADS) struct CRYPTO_STATIC_MUTEX { - char padding; /* Empty structs have different sizes in C and C++. */ + char padding; // Empty structs have different sizes in C and C++. }; #define CRYPTO_STATIC_MUTEX_INIT { 0 } #elif defined(OPENSSL_WINDOWS_THREADS) @@ -420,46 +418,46 @@ struct CRYPTO_STATIC_MUTEX { #error "Unknown threading library" #endif -/* CRYPTO_MUTEX_init initialises |lock|. If |lock| is a static variable, use a - * |CRYPTO_STATIC_MUTEX|. */ +// CRYPTO_MUTEX_init initialises |lock|. If |lock| is a static variable, use a +// |CRYPTO_STATIC_MUTEX|. OPENSSL_EXPORT void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_lock_read locks |lock| such that other threads may also have a - * read lock, but none may have a write lock. */ +// CRYPTO_MUTEX_lock_read locks |lock| such that other threads may also have a +// read lock, but none may have a write lock. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_lock_write locks |lock| such that no other thread has any type - * of lock on it. */ +// CRYPTO_MUTEX_lock_write locks |lock| such that no other thread has any type +// of lock on it. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_unlock_read unlocks |lock| for reading. */ +// CRYPTO_MUTEX_unlock_read unlocks |lock| for reading. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_unlock_write unlocks |lock| for writing. */ +// CRYPTO_MUTEX_unlock_write unlocks |lock| for writing. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_cleanup releases all resources held by |lock|. */ +// CRYPTO_MUTEX_cleanup releases all resources held by |lock|. OPENSSL_EXPORT void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_lock_read locks |lock| such that other threads may also - * have a read lock, but none may have a write lock. The |lock| variable does - * not need to be initialised by any function, but must have been statically - * initialised with |CRYPTO_STATIC_MUTEX_INIT|. */ +// CRYPTO_STATIC_MUTEX_lock_read locks |lock| such that other threads may also +// have a read lock, but none may have a write lock. The |lock| variable does +// not need to be initialised by any function, but must have been statically +// initialised with |CRYPTO_STATIC_MUTEX_INIT|. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_lock_read( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_lock_write locks |lock| such that no other thread has - * any type of lock on it. The |lock| variable does not need to be initialised - * by any function, but must have been statically initialised with - * |CRYPTO_STATIC_MUTEX_INIT|. */ +// CRYPTO_STATIC_MUTEX_lock_write locks |lock| such that no other thread has +// any type of lock on it. The |lock| variable does not need to be initialised +// by any function, but must have been statically initialised with +// |CRYPTO_STATIC_MUTEX_INIT|. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_lock_write( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_unlock_read unlocks |lock| for reading. */ +// CRYPTO_STATIC_MUTEX_unlock_read unlocks |lock| for reading. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_unlock_read( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_unlock_write unlocks |lock| for writing. */ +// CRYPTO_STATIC_MUTEX_unlock_write unlocks |lock| for writing. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_unlock_write( struct CRYPTO_STATIC_MUTEX *lock); @@ -470,7 +468,7 @@ namespace bssl { namespace internal { -/* MutexLockBase is a RAII helper for CRYPTO_MUTEX locking. */ +// MutexLockBase is a RAII helper for CRYPTO_MUTEX locking. template class MutexLockBase { public: @@ -500,10 +498,10 @@ using MutexReadLock = #endif // defined(__cplusplus) -/* Thread local storage. */ +// Thread local storage. -/* thread_local_data_t enumerates the types of thread-local data that can be - * stored. */ +// thread_local_data_t enumerates the types of thread-local data that can be +// stored. typedef enum { OPENSSL_THREAD_LOCAL_ERR = 0, OPENSSL_THREAD_LOCAL_RAND, @@ -511,47 +509,47 @@ typedef enum { NUM_OPENSSL_THREAD_LOCALS, } thread_local_data_t; -/* thread_local_destructor_t is the type of a destructor function that will be - * called when a thread exits and its thread-local storage needs to be freed. */ +// thread_local_destructor_t is the type of a destructor function that will be +// called when a thread exits and its thread-local storage needs to be freed. typedef void (*thread_local_destructor_t)(void *); -/* CRYPTO_get_thread_local gets the pointer value that is stored for the - * current thread for the given index, or NULL if none has been set. */ +// CRYPTO_get_thread_local gets the pointer value that is stored for the +// current thread for the given index, or NULL if none has been set. OPENSSL_EXPORT void *CRYPTO_get_thread_local(thread_local_data_t value); -/* CRYPTO_set_thread_local sets a pointer value for the current thread at the - * given index. This function should only be called once per thread for a given - * |index|: rather than update the pointer value itself, update the data that - * is pointed to. - * - * The destructor function will be called when a thread exits to free this - * thread-local data. All calls to |CRYPTO_set_thread_local| with the same - * |index| should have the same |destructor| argument. The destructor may be - * called with a NULL argument if a thread that never set a thread-local - * pointer for |index|, exits. The destructor may be called concurrently with - * different arguments. - * - * This function returns one on success or zero on error. If it returns zero - * then |destructor| has been called with |value| already. */ +// CRYPTO_set_thread_local sets a pointer value for the current thread at the +// given index. This function should only be called once per thread for a given +// |index|: rather than update the pointer value itself, update the data that +// is pointed to. +// +// The destructor function will be called when a thread exits to free this +// thread-local data. All calls to |CRYPTO_set_thread_local| with the same +// |index| should have the same |destructor| argument. The destructor may be +// called with a NULL argument if a thread that never set a thread-local +// pointer for |index|, exits. The destructor may be called concurrently with +// different arguments. +// +// This function returns one on success or zero on error. If it returns zero +// then |destructor| has been called with |value| already. OPENSSL_EXPORT int CRYPTO_set_thread_local( thread_local_data_t index, void *value, thread_local_destructor_t destructor); -/* ex_data */ +// ex_data typedef struct crypto_ex_data_func_st CRYPTO_EX_DATA_FUNCS; DECLARE_STACK_OF(CRYPTO_EX_DATA_FUNCS) -/* CRYPTO_EX_DATA_CLASS tracks the ex_indices registered for a type which - * supports ex_data. It should defined as a static global within the module - * which defines that type. */ +// CRYPTO_EX_DATA_CLASS tracks the ex_indices registered for a type which +// supports ex_data. It should defined as a static global within the module +// which defines that type. typedef struct { struct CRYPTO_STATIC_MUTEX lock; STACK_OF(CRYPTO_EX_DATA_FUNCS) *meth; - /* num_reserved is one if the ex_data index zero is reserved for legacy - * |TYPE_get_app_data| functions. */ + // num_reserved is one if the ex_data index zero is reserved for legacy + // |TYPE_get_app_data| functions. uint8_t num_reserved; } CRYPTO_EX_DATA_CLASS; @@ -559,47 +557,47 @@ typedef struct { #define CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA \ {CRYPTO_STATIC_MUTEX_INIT, NULL, 1} -/* CRYPTO_get_ex_new_index allocates a new index for |ex_data_class| and writes - * it to |*out_index|. Each class of object should provide a wrapper function - * that uses the correct |CRYPTO_EX_DATA_CLASS|. It returns one on success and - * zero otherwise. */ +// CRYPTO_get_ex_new_index allocates a new index for |ex_data_class| and writes +// it to |*out_index|. Each class of object should provide a wrapper function +// that uses the correct |CRYPTO_EX_DATA_CLASS|. It returns one on success and +// zero otherwise. OPENSSL_EXPORT int CRYPTO_get_ex_new_index(CRYPTO_EX_DATA_CLASS *ex_data_class, int *out_index, long argl, void *argp, CRYPTO_EX_free *free_func); -/* CRYPTO_set_ex_data sets an extra data pointer on a given object. Each class - * of object should provide a wrapper function. */ +// CRYPTO_set_ex_data sets an extra data pointer on a given object. Each class +// of object should provide a wrapper function. OPENSSL_EXPORT int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val); -/* CRYPTO_get_ex_data returns an extra data pointer for a given object, or NULL - * if no such index exists. Each class of object should provide a wrapper - * function. */ +// CRYPTO_get_ex_data returns an extra data pointer for a given object, or NULL +// if no such index exists. Each class of object should provide a wrapper +// function. OPENSSL_EXPORT void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int index); -/* CRYPTO_new_ex_data initialises a newly allocated |CRYPTO_EX_DATA|. */ +// CRYPTO_new_ex_data initialises a newly allocated |CRYPTO_EX_DATA|. OPENSSL_EXPORT void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad); -/* CRYPTO_free_ex_data frees |ad|, which is embedded inside |obj|, which is an - * object of the given class. */ +// CRYPTO_free_ex_data frees |ad|, which is embedded inside |obj|, which is an +// object of the given class. OPENSSL_EXPORT void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad); -/* Language bug workarounds. - * - * Most C standard library functions are undefined if passed NULL, even when the - * corresponding length is zero. This gives them (and, in turn, all functions - * which call them) surprising behavior on empty arrays. Some compilers will - * miscompile code due to this rule. See also - * https://www.imperialviolet.org/2016/06/26/nonnull.html - * - * These wrapper functions behave the same as the corresponding C standard - * functions, but behave as expected when passed NULL if the length is zero. - * - * Note |OPENSSL_memcmp| is a different function from |CRYPTO_memcmp|. */ - -/* C++ defines |memchr| as a const-correct overload. */ +// Language bug workarounds. +// +// Most C standard library functions are undefined if passed NULL, even when the +// corresponding length is zero. This gives them (and, in turn, all functions +// which call them) surprising behavior on empty arrays. Some compilers will +// miscompile code due to this rule. See also +// https://www.imperialviolet.org/2016/06/26/nonnull.html +// +// These wrapper functions behave the same as the corresponding C standard +// functions, but behave as expected when passed NULL if the length is zero. +// +// Note |OPENSSL_memcmp| is a different function from |CRYPTO_memcmp|. + +// C++ defines |memchr| as a const-correct overload. #if defined(__cplusplus) extern "C++" { @@ -619,8 +617,8 @@ static inline void *OPENSSL_memchr(void *s, int c, size_t n) { return memchr(s, c, n); } -} /* extern "C++" */ -#else /* __cplusplus */ +} // extern "C++" +#else // __cplusplus static inline void *OPENSSL_memchr(const void *s, int c, size_t n) { if (n == 0) { @@ -630,7 +628,7 @@ static inline void *OPENSSL_memchr(const void *s, int c, size_t n) { return memchr(s, c, n); } -#endif /* __cplusplus */ +#endif // __cplusplus static inline int OPENSSL_memcmp(const void *s1, const void *s2, size_t n) { if (n == 0) { @@ -665,14 +663,14 @@ static inline void *OPENSSL_memset(void *dst, int c, size_t n) { } #if defined(BORINGSSL_FIPS) -/* BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test - * fails. It prevents any further cryptographic operations by the current - * process. */ +// BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test +// fails. It prevents any further cryptographic operations by the current +// process. void BORINGSSL_FIPS_abort(void) __attribute__((noreturn)); #endif #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_INTERNAL_H */ +#endif // OPENSSL_HEADER_CRYPTO_INTERNAL_H diff --git a/crypto/lhash/lhash.c b/crypto/lhash/lhash.c index 27960d98..3b60538a 100644 --- a/crypto/lhash/lhash.c +++ b/crypto/lhash/lhash.c @@ -65,11 +65,11 @@ #include "../internal.h" -/* kMinNumBuckets is the minimum size of the buckets array in an |_LHASH|. */ +// kMinNumBuckets is the minimum size of the buckets array in an |_LHASH|. static const size_t kMinNumBuckets = 16; -/* kMaxAverageChainLength contains the maximum, average chain length. When the - * average chain length exceeds this value, the hash table will be resized. */ +// kMaxAverageChainLength contains the maximum, average chain length. When the +// average chain length exceeds this value, the hash table will be resized. static const size_t kMaxAverageChainLength = 2; static const size_t kMinAverageChainLength = 1; @@ -112,13 +112,13 @@ void lh_free(_LHASH *lh) { size_t lh_num_items(const _LHASH *lh) { return lh->num_items; } -/* get_next_ptr_and_hash returns a pointer to the pointer that points to the - * item equal to |data|. In other words, it searches for an item equal to |data| - * and, if it's at the start of a chain, then it returns a pointer to an - * element of |lh->buckets|, otherwise it returns a pointer to the |next| - * element of the previous item in the chain. If an element equal to |data| is - * not found, it returns a pointer that points to a NULL pointer. If |out_hash| - * is not NULL, then it also puts the hash value of |data| in |*out_hash|. */ +// get_next_ptr_and_hash returns a pointer to the pointer that points to the +// item equal to |data|. In other words, it searches for an item equal to |data| +// and, if it's at the start of a chain, then it returns a pointer to an +// element of |lh->buckets|, otherwise it returns a pointer to the |next| +// element of the previous item in the chain. If an element equal to |data| is +// not found, it returns a pointer that points to a NULL pointer. If |out_hash| +// is not NULL, then it also puts the hash value of |data| in |*out_hash|. static LHASH_ITEM **get_next_ptr_and_hash(const _LHASH *lh, uint32_t *out_hash, const void *data) { const uint32_t hash = lh->hash(data); @@ -151,9 +151,9 @@ void *lh_retrieve(const _LHASH *lh, const void *data) { return (*next_ptr)->data; } -/* lh_rebucket allocates a new array of |new_num_buckets| pointers and - * redistributes the existing items into it before making it |lh->buckets| and - * freeing the old array. */ +// lh_rebucket allocates a new array of |new_num_buckets| pointers and +// redistributes the existing items into it before making it |lh->buckets| and +// freeing the old array. static void lh_rebucket(_LHASH *lh, const size_t new_num_buckets) { LHASH_ITEM **new_buckets, *cur, *next; size_t i, alloc_size; @@ -184,12 +184,12 @@ static void lh_rebucket(_LHASH *lh, const size_t new_num_buckets) { lh->buckets = new_buckets; } -/* lh_maybe_resize resizes the |buckets| array if needed. */ +// lh_maybe_resize resizes the |buckets| array if needed. static void lh_maybe_resize(_LHASH *lh) { size_t avg_chain_length; if (lh->callback_depth > 0) { - /* Don't resize the hash if we are currently iterating over it. */ + // Don't resize the hash if we are currently iterating over it. return; } @@ -223,14 +223,14 @@ int lh_insert(_LHASH *lh, void **old_data, void *data) { if (*next_ptr != NULL) { - /* An element equal to |data| already exists in the hash table. It will be - * replaced. */ + // An element equal to |data| already exists in the hash table. It will be + // replaced. *old_data = (*next_ptr)->data; (*next_ptr)->data = data; return 1; } - /* An element equal to |data| doesn't exist in the hash table yet. */ + // An element equal to |data| doesn't exist in the hash table yet. item = OPENSSL_malloc(sizeof(LHASH_ITEM)); if (item == NULL) { return 0; @@ -252,7 +252,7 @@ void *lh_delete(_LHASH *lh, const void *data) { next_ptr = get_next_ptr_and_hash(lh, NULL, data); if (*next_ptr == NULL) { - /* No such element. */ + // No such element. return NULL; } @@ -274,7 +274,7 @@ static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *), } if (lh->callback_depth < UINT_MAX) { - /* |callback_depth| is a saturating counter. */ + // |callback_depth| is a saturating counter. lh->callback_depth++; } @@ -294,9 +294,9 @@ static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *), lh->callback_depth--; } - /* The callback may have added or removed elements and the non-zero value of - * |callback_depth| will have suppressed any resizing. Thus any needed - * resizing is done here. */ + // The callback may have added or removed elements and the non-zero value of + // |callback_depth| will have suppressed any resizing. Thus any needed + // resizing is done here. lh_maybe_resize(lh); } @@ -309,9 +309,9 @@ void lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) { } uint32_t lh_strhash(const char *c) { - /* The following hash seems to work very well on normal text strings - * no collisions on /usr/dict/words and it distributes on %2^n quite - * well, not as good as MD5, but still good. */ + // The following hash seems to work very well on normal text strings + // no collisions on /usr/dict/words and it distributes on %2^n quite + // well, not as good as MD5, but still good. unsigned long ret = 0; long n; unsigned long v; diff --git a/crypto/mem.c b/crypto/mem.c index f451a12d..576ab7f4 100644 --- a/crypto/mem.c +++ b/crypto/mem.c @@ -55,7 +55,7 @@ * [including the GNU Public Licence.] */ #if !defined(_POSIX_C_SOURCE) -#define _POSIX_C_SOURCE 201410L /* needed for strdup, snprintf, vprintf etc */ +#define _POSIX_C_SOURCE 201410L // needed for strdup, snprintf, vprintf etc #endif #include @@ -85,8 +85,8 @@ void *OPENSSL_realloc_clean(void *ptr, size_t old_size, size_t new_size) { return NULL; } - /* We don't support shrinking the buffer. Note the memcpy that copies - * |old_size| bytes to the new buffer, below. */ + // We don't support shrinking the buffer. Note the memcpy that copies + // |old_size| bytes to the new buffer, below. if (new_size < old_size) { return NULL; } @@ -114,7 +114,7 @@ void OPENSSL_cleanse(void *ptr, size_t len) { detect memset_s, it would be better to use that. */ __asm__ __volatile__("" : : "r"(ptr) : "memory"); #endif -#endif /* !OPENSSL_NO_ASM */ +#endif // !OPENSSL_NO_ASM } int CRYPTO_memcmp(const void *in_a, const void *in_b, size_t len) { @@ -130,7 +130,7 @@ int CRYPTO_memcmp(const void *in_a, const void *in_b, size_t len) { } uint32_t OPENSSL_hash32(const void *ptr, size_t len) { - /* These are the FNV-1a parameters for 32 bits. */ + // These are the FNV-1a parameters for 32 bits. static const uint32_t kPrime = 16777619u; static const uint32_t kOffsetBasis = 2166136261u; diff --git a/crypto/obj/obj.c b/crypto/obj/obj.c index 173257fa..39685152 100644 --- a/crypto/obj/obj.c +++ b/crypto/obj/obj.c @@ -77,7 +77,7 @@ static struct CRYPTO_STATIC_MUTEX global_added_lock = CRYPTO_STATIC_MUTEX_INIT; -/* These globals are protected by |global_added_lock|. */ +// These globals are protected by |global_added_lock|. static LHASH_OF(ASN1_OBJECT) *global_added_by_data = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_nid = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_short_name = NULL; @@ -107,7 +107,7 @@ ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o) { } if (!(o->flags & ASN1_OBJECT_FLAG_DYNAMIC)) { - /* TODO(fork): this is a little dangerous. */ + // TODO(fork): this is a little dangerous. return (ASN1_OBJECT *)o; } @@ -126,7 +126,7 @@ ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o) { OPENSSL_memcpy(data, o->data, o->length); } - /* once data is attached to an object, it remains const */ + // once data is attached to an object, it remains const r->data = data; r->length = o->length; r->nid = o->nid; @@ -172,9 +172,9 @@ int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return OPENSSL_memcmp(a->data, b->data, a->length); } -/* obj_cmp is called to search the kNIDsInOIDOrder array. The |key| argument is - * an |ASN1_OBJECT|* that we're looking for and |element| is a pointer to an - * unsigned int in the array. */ +// obj_cmp is called to search the kNIDsInOIDOrder array. The |key| argument is +// an |ASN1_OBJECT|* that we're looking for and |element| is a pointer to an +// unsigned int in the array. static int obj_cmp(const void *key, const void *element) { unsigned nid = *((const unsigned*) element); const ASN1_OBJECT *a = key; @@ -233,9 +233,9 @@ int OBJ_cbs2nid(const CBS *cbs) { return OBJ_obj2nid(&obj); } -/* short_name_cmp is called to search the kNIDsInShortNameOrder array. The - * |key| argument is name that we're looking for and |element| is a pointer to - * an unsigned int in the array. */ +// short_name_cmp is called to search the kNIDsInShortNameOrder array. The +// |key| argument is name that we're looking for and |element| is a pointer to +// an unsigned int in the array. static int short_name_cmp(const void *key, const void *element) { const char *name = (const char *) key; unsigned nid = *((unsigned*) element); @@ -269,9 +269,9 @@ int OBJ_sn2nid(const char *short_name) { return kObjects[*nid_ptr].nid; } -/* long_name_cmp is called to search the kNIDsInLongNameOrder array. The - * |key| argument is name that we're looking for and |element| is a pointer to - * an unsigned int in the array. */ +// long_name_cmp is called to search the kNIDsInLongNameOrder array. The +// |key| argument is name that we're looking for and |element| is a pointer to +// an unsigned int in the array. static int long_name_cmp(const void *key, const void *element) { const char *name = (const char *) key; unsigned nid = *((unsigned*) element); @@ -392,12 +392,12 @@ ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { } } - /* Work out size of content octets */ + // Work out size of content octets contents_len = a2d_ASN1_OBJECT(NULL, 0, s, -1); if (contents_len <= 0) { return NULL; } - /* Work out total size */ + // Work out total size total_len = ASN1_object_size(0, contents_len, V_ASN1_OBJECT); buf = OPENSSL_malloc(total_len); @@ -407,9 +407,9 @@ ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { } p = buf; - /* Write out tag+length */ + // Write out tag+length ASN1_put_object(&p, 0, contents_len, V_ASN1_OBJECT, V_ASN1_UNIVERSAL); - /* Write out contents */ + // Write out contents a2d_ASN1_OBJECT(p, contents_len, s, -1); bufp = buf; @@ -436,16 +436,16 @@ static int parse_oid_component(CBS *cbs, uint64_t *out) { return 0; } if ((v >> (64 - 7)) != 0) { - /* The component is too large. */ + // The component is too large. return 0; } if (v == 0 && b == 0x80) { - /* The component must be minimally encoded. */ + // The component must be minimally encoded. return 0; } v = (v << 7) | (b & 0x7f); - /* Components end at an octet with the high bit cleared. */ + // Components end at an octet with the high bit cleared. } while (b & 0x80); *out = v; @@ -460,8 +460,8 @@ static int add_decimal(CBB *out, uint64_t v) { int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, int always_return_oid) { - /* Python depends on the empty OID successfully encoding as the empty - * string. */ + // Python depends on the empty OID successfully encoding as the empty + // string. if (obj == NULL || obj->length == 0) { return strlcpy_int(out, "", out_len); } @@ -487,7 +487,7 @@ int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, CBS cbs; CBS_init(&cbs, obj->data, obj->length); - /* The first component is 40 * value1 + value2, where value1 is 0, 1, or 2. */ + // The first component is 40 * value1 + value2, where value1 is 0, 1, or 2. uint64_t v; if (!parse_oid_component(&cbs, &v)) { goto err; @@ -567,8 +567,8 @@ static int cmp_long_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return strcmp(a->ln, b->ln); } -/* obj_add_object inserts |obj| into the various global hashes for run-time - * added objects. It returns one on success or zero otherwise. */ +// obj_add_object inserts |obj| into the various global hashes for run-time +// added objects. It returns one on success or zero otherwise. static int obj_add_object(ASN1_OBJECT *obj) { int ok; ASN1_OBJECT *old_object; @@ -584,10 +584,10 @@ static int obj_add_object(ASN1_OBJECT *obj) { global_added_by_long_name = lh_ASN1_OBJECT_new(hash_long_name, cmp_long_name); } - /* We don't pay attention to |old_object| (which contains any previous object - * that was evicted from the hashes) because we don't have a reference count - * on ASN1_OBJECT values. Also, we should never have duplicates nids and so - * should always have objects in |global_added_by_nid|. */ + // We don't pay attention to |old_object| (which contains any previous object + // that was evicted from the hashes) because we don't have a reference count + // on ASN1_OBJECT values. Also, we should never have duplicates nids and so + // should always have objects in |global_added_by_nid|. ok = lh_ASN1_OBJECT_insert(global_added_by_nid, &old_object, obj); if (obj->length != 0 && obj->data != NULL) { diff --git a/crypto/obj/obj_xref.c b/crypto/obj/obj_xref.c index 6136e995..21bde279 100644 --- a/crypto/obj/obj_xref.c +++ b/crypto/obj/obj_xref.c @@ -66,7 +66,7 @@ typedef struct { } nid_triple; static const nid_triple kTriples[] = { - /* RSA PKCS#1. */ + // RSA PKCS#1. {NID_md4WithRSAEncryption, NID_md4, NID_rsaEncryption}, {NID_md5WithRSAEncryption, NID_md5, NID_rsaEncryption}, {NID_sha1WithRSAEncryption, NID_sha1, NID_rsaEncryption}, @@ -74,19 +74,19 @@ static const nid_triple kTriples[] = { {NID_sha256WithRSAEncryption, NID_sha256, NID_rsaEncryption}, {NID_sha384WithRSAEncryption, NID_sha384, NID_rsaEncryption}, {NID_sha512WithRSAEncryption, NID_sha512, NID_rsaEncryption}, - /* DSA. */ + // DSA. {NID_dsaWithSHA1, NID_sha1, NID_dsa}, {NID_dsaWithSHA1_2, NID_sha1, NID_dsa_2}, {NID_dsa_with_SHA224, NID_sha224, NID_dsa}, {NID_dsa_with_SHA256, NID_sha256, NID_dsa}, - /* ECDSA. */ + // ECDSA. {NID_ecdsa_with_SHA1, NID_sha1, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA224, NID_sha224, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA256, NID_sha256, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA384, NID_sha384, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA512, NID_sha512, NID_X9_62_id_ecPublicKey}, - /* The following algorithms use more complex (or simpler) parameters. The - * digest "undef" indicates the caller should handle this explicitly. */ + // The following algorithms use more complex (or simpler) parameters. The + // digest "undef" indicates the caller should handle this explicitly. {NID_rsassaPss, NID_undef, NID_rsaEncryption}, {NID_ED25519, NID_undef, NID_ED25519}, }; diff --git a/crypto/pkcs7/internal.h b/crypto/pkcs7/internal.h index 9ca2a292..9541bea8 100644 --- a/crypto/pkcs7/internal.h +++ b/crypto/pkcs7/internal.h @@ -22,28 +22,28 @@ extern "C" { #endif -/* pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 - * SignedData blob from |cbs| and sets |*out| to point to the rest of the - * input. If the input is in BER format, then |*der_bytes| will be set to a - * pointer that needs to be freed by the caller once they have finished - * processing |*out| (which will be pointing into |*der_bytes|). - * - * It returns one on success or zero on error. On error, |*der_bytes| is - * NULL. */ +// pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 +// SignedData blob from |cbs| and sets |*out| to point to the rest of the +// input. If the input is in BER format, then |*der_bytes| will be set to a +// pointer that needs to be freed by the caller once they have finished +// processing |*out| (which will be pointing into |*der_bytes|). +// +// It returns one on success or zero on error. On error, |*der_bytes| is +// NULL. int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs); -/* pkcs7_bundle writes a PKCS#7, SignedData structure to |out| and then calls - * |cb| with a CBB to which certificate or CRL data can be written, and the - * opaque context pointer, |arg|. The callback can return zero to indicate an - * error. - * - * pkcs7_bundle returns one on success or zero on error. */ +// pkcs7_bundle writes a PKCS#7, SignedData structure to |out| and then calls +// |cb| with a CBB to which certificate or CRL data can be written, and the +// opaque context pointer, |arg|. The callback can return zero to indicate an +// error. +// +// pkcs7_bundle returns one on success or zero on error. int pkcs7_bundle(CBB *out, int (*cb)(CBB *out, const void *arg), const void *arg); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_PKCS7_INTERNAL_H */ +#endif // OPENSSL_HEADER_PKCS7_INTERNAL_H diff --git a/crypto/pkcs7/pkcs7.c b/crypto/pkcs7/pkcs7.c index 1b7e1cbf..fc175a94 100644 --- a/crypto/pkcs7/pkcs7.c +++ b/crypto/pkcs7/pkcs7.c @@ -24,28 +24,28 @@ #include "../bytestring/internal.h" -/* 1.2.840.113549.1.7.1 */ +// 1.2.840.113549.1.7.1 static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01}; -/* 1.2.840.113549.1.7.2 */ +// 1.2.840.113549.1.7.2 static const uint8_t kPKCS7SignedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02}; -/* pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 - * SignedData blob from |cbs| and sets |*out| to point to the rest of the - * input. If the input is in BER format, then |*der_bytes| will be set to a - * pointer that needs to be freed by the caller once they have finished - * processing |*out| (which will be pointing into |*der_bytes|). - * - * It returns one on success or zero on error. On error, |*der_bytes| is - * NULL. */ +// pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 +// SignedData blob from |cbs| and sets |*out| to point to the rest of the +// input. If the input is in BER format, then |*der_bytes| will be set to a +// pointer that needs to be freed by the caller once they have finished +// processing |*out| (which will be pointing into |*der_bytes|). +// +// It returns one on success or zero on error. On error, |*der_bytes| is +// NULL. int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { size_t der_len; CBS in, content_info, content_type, wrapped_signed_data, signed_data; uint64_t version; - /* The input may be in BER format. */ + // The input may be in BER format. *der_bytes = NULL; if (!CBS_asn1_ber_to_der(cbs, der_bytes, &der_len)) { return 0; @@ -56,7 +56,7 @@ int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { CBS_init(&in, CBS_data(cbs), CBS_len(cbs)); } - /* See https://tools.ietf.org/html/rfc2315#section-7 */ + // See https://tools.ietf.org/html/rfc2315#section-7 if (!CBS_get_asn1(&in, &content_info, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&content_info, &content_type, CBS_ASN1_OBJECT)) { goto err; @@ -68,7 +68,7 @@ int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { goto err; } - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBS_get_asn1(&content_info, &wrapped_signed_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || !CBS_get_asn1(&wrapped_signed_data, &signed_data, CBS_ASN1_SEQUENCE) || @@ -103,7 +103,7 @@ int PKCS7_get_raw_certificates(STACK_OF(CRYPTO_BUFFER) *out_certs, CBS *cbs, return 0; } - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBS_get_asn1(&signed_data, &certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_NO_CERTIFICATES_INCLUDED); @@ -144,13 +144,13 @@ int pkcs7_bundle(CBB *out, int (*cb)(CBB *out, const void *arg), CBB outer_seq, oid, wrapped_seq, seq, version_bytes, digest_algos_set, content_info; - /* See https://tools.ietf.org/html/rfc2315#section-7 */ + // See https://tools.ietf.org/html/rfc2315#section-7 if (!CBB_add_asn1(out, &outer_seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&outer_seq, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPKCS7SignedData, sizeof(kPKCS7SignedData)) || !CBB_add_asn1(&outer_seq, &wrapped_seq, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 !CBB_add_asn1(&wrapped_seq, &seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&seq, &version_bytes, CBS_ASN1_INTEGER) || !CBB_add_u8(&version_bytes, 1) || diff --git a/crypto/pkcs7/pkcs7_test.cc b/crypto/pkcs7/pkcs7_test.cc index 544dffcc..54f7e8aa 100644 --- a/crypto/pkcs7/pkcs7_test.cc +++ b/crypto/pkcs7/pkcs7_test.cc @@ -25,8 +25,8 @@ #include "../test/test_util.h" -/* kPKCS7NSS contains the certificate chain of mail.google.com, as saved by NSS - * using the Chrome UI. */ +// kPKCS7NSS contains the certificate chain of mail.google.com, as saved by NSS +// using the Chrome UI. static const uint8_t kPKCS7NSS[] = { 0x30, 0x80, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, 0xa0, 0x80, 0x30, 0x80, 0x02, 0x01, 0x01, 0x31, 0x00, 0x30, 0x80, @@ -272,7 +272,7 @@ static const uint8_t kPKCS7NSS[] = { 0x00, 0x00, 0x00, }; -/* kPKCS7Windows is the Equifax root certificate, as exported by Windows 7. */ +// kPKCS7Windows is the Equifax root certificate, as exported by Windows 7. static const uint8_t kPKCS7Windows[] = { 0x30, 0x82, 0x02, 0xb1, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, 0xa0, 0x82, 0x02, 0xa2, 0x30, 0x82, 0x02, 0x9e, 0x02, @@ -334,8 +334,8 @@ static const uint8_t kPKCS7Windows[] = { 0xcd, 0x5a, 0x2a, 0x82, 0xb2, 0x37, 0x79, 0x31, 0x00, }; -/* kOpenSSLCRL is the Equifax CRL, converted to PKCS#7 form by: - * openssl crl2pkcs7 -inform DER -in secureca.crl */ +// kOpenSSLCRL is the Equifax CRL, converted to PKCS#7 form by: +// openssl crl2pkcs7 -inform DER -in secureca.crl static const uint8_t kOpenSSLCRL[] = { 0x30, 0x82, 0x03, 0x85, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, 0xa0, 0x82, 0x03, 0x76, 0x30, 0x82, 0x03, 0x72, 0x02, @@ -415,9 +415,9 @@ static const uint8_t kOpenSSLCRL[] = { 0xf0, 0x00, 0x54, 0x31, 0x00, }; -/* kPEMCert is the result of exporting the mail.google.com certificate from - * Chrome and then running it through: - * openssl pkcs7 -inform DER -in mail.google.com -outform PEM */ +// kPEMCert is the result of exporting the mail.google.com certificate from +// Chrome and then running it through: +// openssl pkcs7 -inform DER -in mail.google.com -outform PEM static const char kPEMCert[] = "-----BEGIN PKCS7-----\n" "MIID+wYJKoZIhvcNAQcCoIID7DCCA+gCAQExADALBgkqhkiG9w0BBwGgggPQMIID\n" diff --git a/crypto/pkcs7/pkcs7_x509.c b/crypto/pkcs7/pkcs7_x509.c index f2a49a7c..7bc39d27 100644 --- a/crypto/pkcs7/pkcs7_x509.c +++ b/crypto/pkcs7/pkcs7_x509.c @@ -71,10 +71,10 @@ int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs) { return 0; } - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 - /* Even if only CRLs are included, there may be an empty certificates block. - * OpenSSL does this, for example. */ + // Even if only CRLs are included, there may be an empty certificates block. + // OpenSSL does this, for example. if (CBS_peek_asn1_tag(&signed_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) && !CBS_get_asn1(&signed_data, NULL /* certificates */, @@ -133,9 +133,9 @@ int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, BIO *pem_bio) { long len; int ret; - /* Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM - * internally will actually allow several other values too, including - * "CERTIFICATE". */ + // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM + // internally will actually allow several other values too, including + // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, @@ -155,9 +155,9 @@ int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, BIO *pem_bio) { long len; int ret; - /* Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM - * internally will actually allow several other values too, including - * "CERTIFICATE". */ + // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM + // internally will actually allow several other values too, including + // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, @@ -177,7 +177,7 @@ static int pkcs7_bundle_certificates_cb(CBB *out, const void *arg) { size_t i; CBB certificates; - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { return 0; @@ -207,7 +207,7 @@ static int pkcs7_bundle_crls_cb(CBB *out, const void *arg) { size_t i; CBB crl_data; - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &crl_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 1)) { return 0; diff --git a/crypto/pkcs8/internal.h b/crypto/pkcs8/internal.h index 583997d6..93994898 100644 --- a/crypto/pkcs8/internal.h +++ b/crypto/pkcs8/internal.h @@ -63,10 +63,10 @@ extern "C" { #endif -/* pkcs8_pbe_decrypt decrypts |in| using the PBE scheme described by - * |algorithm|, which should be a serialized AlgorithmIdentifier structure. On - * success, it sets |*out| to a newly-allocated buffer containing the decrypted - * result and returns one. Otherwise, it returns zero. */ +// pkcs8_pbe_decrypt decrypts |in| using the PBE scheme described by +// |algorithm|, which should be a serialized AlgorithmIdentifier structure. On +// success, it sets |*out| to a newly-allocated buffer containing the decrypted +// result and returns one. Otherwise, it returns zero. int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, const char *pass, size_t pass_len, const uint8_t *in, size_t in_len); @@ -75,10 +75,10 @@ int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, #define PKCS12_IV_ID 2 #define PKCS12_MAC_ID 3 -/* pkcs12_key_gen runs the PKCS#12 key derivation function as specified in - * RFC 7292, appendix B. On success, it writes the resulting |out_len| bytes of - * key material to |out| and returns one. Otherwise, it returns zero. |id| - * should be one of the |PKCS12_*_ID| values. */ +// pkcs12_key_gen runs the PKCS#12 key derivation function as specified in +// RFC 7292, appendix B. On success, it writes the resulting |out_len| bytes of +// key material to |out| and returns one. Otherwise, it returns zero. |id| +// should be one of the |PKCS12_*_ID| values. int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, uint8_t id, unsigned iterations, size_t out_len, uint8_t *out, const EVP_MD *md); @@ -89,11 +89,11 @@ struct pbe_suite { uint8_t oid_len; const EVP_CIPHER *(*cipher_func)(void); const EVP_MD *(*md_func)(void); - /* decrypt_init initialize |ctx| for decrypting. The password is specified by - * |pass| and |pass_len|. |param| contains the serialized parameters field of - * the AlgorithmIdentifier. - * - * It returns one on success and zero on error. */ + // decrypt_init initialize |ctx| for decrypting. The password is specified by + // |pass| and |pass_len|. |param| contains the serialized parameters field of + // the AlgorithmIdentifier. + // + // It returns one on success and zero on error. int (*decrypt_init)(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param); }; @@ -104,9 +104,9 @@ struct pbe_suite { int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param); -/* PKCS5_pbe2_encrypt_init configures |ctx| for encrypting with PKCS #5 PBES2, - * as defined in RFC 2998, with the specified parameters. It writes the - * corresponding AlgorithmIdentifier to |out|. */ +// PKCS5_pbe2_encrypt_init configures |ctx| for encrypting with PKCS #5 PBES2, +// as defined in RFC 2998, with the specified parameters. It writes the +// corresponding AlgorithmIdentifier to |out|. int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, unsigned iterations, const char *pass, size_t pass_len, @@ -114,7 +114,7 @@ int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_PKCS8_INTERNAL_H */ +#endif // OPENSSL_HEADER_PKCS8_INTERNAL_H diff --git a/crypto/pkcs8/p5_pbev2.c b/crypto/pkcs8/p5_pbev2.c index 29d89297..6686cf37 100644 --- a/crypto/pkcs8/p5_pbev2.c +++ b/crypto/pkcs8/p5_pbev2.c @@ -69,15 +69,15 @@ #include "../internal.h" -/* 1.2.840.113549.1.5.12 */ +// 1.2.840.113549.1.5.12 static const uint8_t kPBKDF2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0c}; -/* 1.2.840.113549.1.5.13 */ +// 1.2.840.113549.1.5.13 static const uint8_t kPBES2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d}; -/* 1.2.840.113549.2.7 */ +// 1.2.840.113549.2.7 static const uint8_t kHMACWithSHA1[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x07}; @@ -87,27 +87,27 @@ static const struct { int nid; const EVP_CIPHER *(*cipher_func)(void); } kCipherOIDs[] = { - /* 1.2.840.113549.3.2 */ + // 1.2.840.113549.3.2 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02}, 8, NID_rc2_cbc, &EVP_rc2_cbc}, - /* 1.2.840.113549.3.7 */ + // 1.2.840.113549.3.7 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07}, 8, NID_des_ede3_cbc, &EVP_des_ede3_cbc}, - /* 2.16.840.1.101.3.4.1.2 */ + // 2.16.840.1.101.3.4.1.2 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02}, 9, NID_aes_128_cbc, &EVP_aes_128_cbc}, - /* 2.16.840.1.101.3.4.1.22 */ + // 2.16.840.1.101.3.4.1.22 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16}, 9, NID_aes_192_cbc, &EVP_aes_192_cbc}, - /* 2.16.840.1.101.3.4.1.42 */ + // 2.16.840.1.101.3.4.1.42 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a}, 9, NID_aes_256_cbc, @@ -167,13 +167,13 @@ int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, return 0; } - /* Generate a random IV. */ + // Generate a random IV. uint8_t iv[EVP_MAX_IV_LENGTH]; if (!RAND_bytes(iv, EVP_CIPHER_iv_length(cipher))) { return 0; } - /* See RFC 2898, appendix A. */ + // See RFC 2898, appendix A. CBB algorithm, oid, param, kdf, kdf_oid, kdf_param, salt_cbb, cipher_cbb, iv_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || @@ -187,14 +187,14 @@ int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, !CBB_add_asn1(&kdf_param, &salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&salt_cbb, salt, salt_len) || !CBB_add_asn1_uint64(&kdf_param, iterations) || - /* Specify a key length for RC2. */ + // Specify a key length for RC2. (cipher_nid == NID_rc2_cbc && !CBB_add_asn1_uint64(&kdf_param, EVP_CIPHER_key_length(cipher))) || - /* Omit the PRF. We use the default hmacWithSHA1. */ + // Omit the PRF. We use the default hmacWithSHA1. !CBB_add_asn1(¶m, &cipher_cbb, CBS_ASN1_SEQUENCE) || !add_cipher_oid(&cipher_cbb, cipher_nid) || - /* RFC 2898 says RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and - * IV, but OpenSSL always uses an OCTET STRING IV, so we do the same. */ + // RFC 2898 says RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and + // IV, but OpenSSL always uses an OCTET STRING IV, so we do the same. !CBB_add_asn1(&cipher_cbb, &iv_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&iv_cbb, iv, EVP_CIPHER_iv_length(cipher)) || !CBB_flush(out)) { @@ -220,20 +220,20 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - /* Only PBKDF2 is supported. */ + // Only PBKDF2 is supported. if (!CBS_mem_equal(&kdf_obj, kPBKDF2, sizeof(kPBKDF2))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION); return 0; } - /* See if we recognise the encryption algorithm. */ + // See if we recognise the encryption algorithm. const EVP_CIPHER *cipher = cbs_to_cipher(&enc_obj); if (cipher == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_CIPHER); return 0; } - /* Parse the KDF parameters. See RFC 8018, appendix A.2. */ + // Parse the KDF parameters. See RFC 8018, appendix A.2. CBS pbkdf2_params, salt; uint64_t iterations; if (!CBS_get_asn1(&kdf, &pbkdf2_params, CBS_ASN1_SEQUENCE) || @@ -249,8 +249,8 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - /* The optional keyLength parameter, if present, must match the key length of - * the cipher. */ + // The optional keyLength parameter, if present, must match the key length of + // the cipher. if (CBS_peek_asn1_tag(&pbkdf2_params, CBS_ASN1_INTEGER)) { uint64_t key_len; if (!CBS_get_asn1_uint64(&pbkdf2_params, &key_len)) { @@ -273,14 +273,14 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - /* We only support hmacWithSHA1. It is the DEFAULT, so DER requires it be - * omitted, but we match OpenSSL in tolerating it being present. */ + // We only support hmacWithSHA1. It is the DEFAULT, so DER requires it be + // omitted, but we match OpenSSL in tolerating it being present. if (!CBS_mem_equal(&prf, kHMACWithSHA1, sizeof(kHMACWithSHA1))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_PRF); return 0; } - /* hmacWithSHA1 has a NULL parameter. */ + // hmacWithSHA1 has a NULL parameter. CBS null; if (!CBS_get_asn1(&alg_id, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || @@ -290,10 +290,10 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, } } - /* Parse the encryption scheme parameters. Note OpenSSL does not match the - * specification. Per RFC 2898, this should depend on the encryption scheme. - * In particular, RC2-CBC uses a SEQUENCE with version and IV. We align with - * OpenSSL. */ + // Parse the encryption scheme parameters. Note OpenSSL does not match the + // specification. Per RFC 2898, this should depend on the encryption scheme. + // In particular, RC2-CBC uses a SEQUENCE with version and IV. We align with + // OpenSSL. CBS iv; if (!CBS_get_asn1(&enc_scheme, &iv, CBS_ASN1_OCTETSTRING) || CBS_len(&enc_scheme) != 0) { diff --git a/crypto/pkcs8/pkcs12_test.cc b/crypto/pkcs8/pkcs12_test.cc index c5a7e077..66d03970 100644 --- a/crypto/pkcs8/pkcs12_test.cc +++ b/crypto/pkcs8/pkcs12_test.cc @@ -24,8 +24,8 @@ #include -/* kPKCS12DER contains sample PKCS#12 data generated by OpenSSL with: - * openssl pkcs12 -export -inkey key.pem -in cacert.pem */ +// kPKCS12DER contains sample PKCS#12 data generated by OpenSSL with: +// openssl pkcs12 -export -inkey key.pem -in cacert.pem static const uint8_t kOpenSSL[] = { 0x30, 0x82, 0x09, 0xa1, 0x02, 0x01, 0x03, 0x30, 0x82, 0x09, 0x67, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01, 0xa0, 0x82, @@ -235,8 +235,8 @@ static const uint8_t kOpenSSL[] = { 0xfd, 0x82, 0x14, 0xd8, 0x5c, 0x02, 0x02, 0x08, 0x00, }; -/* kNSS is the result of importing the OpenSSL example PKCS#12 into Chrome and - * then exporting it again. */ +// kNSS is the result of importing the OpenSSL example PKCS#12 into Chrome and +// then exporting it again. static const uint8_t kNSS[] = { 0x30, 0x80, 0x02, 0x01, 0x03, 0x30, 0x80, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01, 0xa0, 0x80, 0x24, 0x80, 0x04, 0x82, @@ -460,8 +460,8 @@ static const uint8_t kNSS[] = { 0xbc, 0x8d, 0x02, 0x02, 0x07, 0xd0, 0x00, 0x00, }; -/* kWindows is a dummy key and certificate exported from the certificate - * manager on Windows 7. */ +// kWindows is a dummy key and certificate exported from the certificate +// manager on Windows 7. static const uint8_t kWindows[] = { 0x30, 0x82, 0x0a, 0x02, 0x02, 0x01, 0x03, 0x30, 0x82, 0x09, 0xbe, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01, 0xa0, 0x82, @@ -679,9 +679,8 @@ static const uint8_t kWindows[] = { 0xfe, 0x3a, 0x66, 0x47, 0x40, 0x49, 0x02, 0x02, 0x07, 0xd0, }; -/* kPBES2 is a PKCS#12 file using PBES2 created with: - * openssl pkcs12 -export -inkey key.pem -in cert.pem -keypbe AES-128-CBC \ - * -certpbe AES-128-CBC */ +// kPBES2 is a PKCS#12 file using PBES2 created with: +// openssl pkcs12 -export -inkey key.pem -in cert.pem -keypbe AES-128-CBC -certpbe AES-128-CBC static const uint8_t kPBES2[] = { 0x30, 0x82, 0x0a, 0x03, 0x02, 0x01, 0x03, 0x30, 0x82, 0x09, 0xc9, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01, 0xa0, 0x82, diff --git a/crypto/pkcs8/pkcs8.c b/crypto/pkcs8/pkcs8.c index 08cc5a35..388d65e5 100644 --- a/crypto/pkcs8/pkcs8.c +++ b/crypto/pkcs8/pkcs8.c @@ -88,7 +88,7 @@ static int ascii_to_ucs2(const char *ascii, size_t ascii_len, unitmp[i + 1] = ascii[i >> 1]; } - /* Terminate the result with a UCS-2 NUL. */ + // Terminate the result with a UCS-2 NUL. unitmp[ulen - 2] = 0; unitmp[ulen - 1] = 0; *out_len = ulen; @@ -99,8 +99,8 @@ static int ascii_to_ucs2(const char *ascii, size_t ascii_len, int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, uint8_t id, unsigned iterations, size_t out_len, uint8_t *out, const EVP_MD *md) { - /* See https://tools.ietf.org/html/rfc7292#appendix-B. Quoted parts of the - * specification have errata applied and other typos fixed. */ + // See https://tools.ietf.org/html/rfc7292#appendix-B. Quoted parts of the + // specification have errata applied and other typos fixed. if (iterations < 1) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_ITERATION_COUNT); @@ -112,31 +112,31 @@ int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, EVP_MD_CTX_init(&ctx); uint8_t *pass_raw = NULL, *I = NULL; size_t pass_raw_len = 0, I_len = 0; - /* If |pass| is NULL, we use the empty string rather than {0, 0} as the raw - * password. */ + // If |pass| is NULL, we use the empty string rather than {0, 0} as the raw + // password. if (pass != NULL && !ascii_to_ucs2(pass, pass_len, &pass_raw, &pass_raw_len)) { goto err; } - /* In the spec, |block_size| is called "v", but measured in bits. */ + // In the spec, |block_size| is called "v", but measured in bits. size_t block_size = EVP_MD_block_size(md); - /* 1. Construct a string, D (the "diversifier"), by concatenating v/8 copies - * of ID. */ + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 copies + // of ID. uint8_t D[EVP_MAX_MD_BLOCK_SIZE]; OPENSSL_memset(D, id, block_size); - /* 2. Concatenate copies of the salt together to create a string S of length - * v(ceiling(s/v)) bits (the final copy of the salt may be truncated to - * create S). Note that if the salt is the empty string, then so is S. - * - * 3. Concatenate copies of the password together to create a string P of - * length v(ceiling(p/v)) bits (the final copy of the password may be - * truncated to create P). Note that if the password is the empty string, - * then so is P. - * - * 4. Set I=S||P to be the concatenation of S and P. */ + // 2. Concatenate copies of the salt together to create a string S of length + // v(ceiling(s/v)) bits (the final copy of the salt may be truncated to + // create S). Note that if the salt is the empty string, then so is S. + // + // 3. Concatenate copies of the password together to create a string P of + // length v(ceiling(p/v)) bits (the final copy of the password may be + // truncated to create P). Note that if the password is the empty string, + // then so is P. + // + // 4. Set I=S||P to be the concatenation of S and P. if (salt_len + block_size - 1 < salt_len || pass_raw_len + block_size - 1 < pass_raw_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); @@ -164,8 +164,8 @@ int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, } while (out_len != 0) { - /* A. Set A_i=H^r(D||I). (i.e., the r-th hash of D||I, - * H(H(H(... H(D||I)))) */ + // A. Set A_i=H^r(D||I). (i.e., the r-th hash of D||I, + // H(H(H(... H(D||I)))) uint8_t A[EVP_MAX_MD_SIZE]; unsigned A_len; if (!EVP_DigestInit_ex(&ctx, md, NULL) || @@ -190,16 +190,16 @@ int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, break; } - /* B. Concatenate copies of A_i to create a string B of length v bits (the - * final copy of A_i may be truncated to create B). */ + // B. Concatenate copies of A_i to create a string B of length v bits (the + // final copy of A_i may be truncated to create B). uint8_t B[EVP_MAX_MD_BLOCK_SIZE]; for (size_t i = 0; i < block_size; i++) { B[i] = A[i % A_len]; } - /* C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit blocks, - * where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod - * 2^v for each j. */ + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit blocks, + // where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod + // 2^v for each j. assert(I_len % block_size == 0); for (size_t i = 0; i < I_len; i += block_size) { unsigned carry = 1; @@ -277,7 +277,7 @@ static int pkcs12_pbe_decrypt_init(const struct pbe_suite *suite, static const struct pbe_suite kBuiltinPBE[] = { { NID_pbe_WithSHA1And40BitRC2_CBC, - /* 1.2.840.113549.1.12.1.6 */ + // 1.2.840.113549.1.12.1.6 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x06}, 10, EVP_rc2_40_cbc, @@ -286,7 +286,7 @@ static const struct pbe_suite kBuiltinPBE[] = { }, { NID_pbe_WithSHA1And128BitRC4, - /* 1.2.840.113549.1.12.1.1 */ + // 1.2.840.113549.1.12.1.1 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x01}, 10, EVP_rc4, @@ -295,7 +295,7 @@ static const struct pbe_suite kBuiltinPBE[] = { }, { NID_pbe_WithSHA1And3_Key_TripleDES_CBC, - /* 1.2.840.113549.1.12.1.3 */ + // 1.2.840.113549.1.12.1.3 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03}, 10, EVP_des_ede3_cbc, @@ -304,7 +304,7 @@ static const struct pbe_suite kBuiltinPBE[] = { }, { NID_pbes2, - /* 1.2.840.113549.1.5.13 */ + // 1.2.840.113549.1.5.13 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d}, 9, NULL, @@ -333,7 +333,7 @@ static int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, return 0; } - /* See RFC 2898, appendix A.3. */ + // See RFC 2898, appendix A.3. CBB algorithm, oid, param, salt_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || @@ -411,7 +411,7 @@ err: EVP_PKEY *PKCS8_parse_encrypted_private_key(CBS *cbs, const char *pass, size_t pass_len) { - /* See RFC 5208, section 6. */ + // See RFC 5208, section 6. CBS epki, algorithm, ciphertext; if (!CBS_get_asn1(cbs, &epki, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&epki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -447,7 +447,7 @@ int PKCS8_marshal_encrypted_private_key(CBB *out, int pbe_nid, EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); - /* Generate a random salt if necessary. */ + // Generate a random salt if necessary. if (salt == NULL) { if (salt_len == 0) { salt_len = PKCS5_SALT_LEN; @@ -466,7 +466,7 @@ int PKCS8_marshal_encrypted_private_key(CBB *out, int pbe_nid, iterations = PKCS5_DEFAULT_ITERATIONS; } - /* Serialize the input key. */ + // Serialize the input key. CBB plaintext_cbb; if (!CBB_init(&plaintext_cbb, 128) || !EVP_marshal_private_key(&plaintext_cbb, pkey) || diff --git a/crypto/pkcs8/pkcs8_test.cc b/crypto/pkcs8/pkcs8_test.cc index 020c9d94..44388bb8 100644 --- a/crypto/pkcs8/pkcs8_test.cc +++ b/crypto/pkcs8/pkcs8_test.cc @@ -22,14 +22,13 @@ #include "../internal.h" -/* kDER is a PKCS#8 encrypted private key. It was generated with: - * - * openssl genrsa 512 > test.key - * openssl pkcs8 -topk8 -in test.key -out test.key.encrypted -v2 des3 -outform der - * hexdump -Cv test.key.encrypted - * - * The password is "testing". - */ +// kDER is a PKCS#8 encrypted private key. It was generated with: +// +// openssl genrsa 512 > test.key +// openssl pkcs8 -topk8 -in test.key -out test.key.encrypted -v2 des3 -outform der +// hexdump -Cv test.key.encrypted +// +// The password is "testing". static const uint8_t kDER[] = { 0x30, 0x82, 0x01, 0x9e, 0x30, 0x40, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d, 0x30, 0x33, 0x30, 0x1b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0c, @@ -60,7 +59,7 @@ static const uint8_t kDER[] = { 0xd6, 0x2d, }; -/* kNullPassword is a PKCS#8 encrypted private key using the null password. */ +// kNullPassword is a PKCS#8 encrypted private key using the null password. static const uint8_t kNullPassword[] = { 0x30, 0x81, 0xb0, 0x30, 0x1b, 0x06, 0x0a, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03, 0x30, 0x0d, 0x04, 0x08, 0xb2, 0xfe, 0x68, @@ -79,8 +78,8 @@ static const uint8_t kNullPassword[] = { 0x49, 0xf6, 0x7e, 0xd0, 0x42, 0xaa, 0x14, 0x3c, 0x24, 0x77, 0xb4, }; -/* kNullPasswordNSS is a PKCS#8 encrypted private key using the null password - * and generated by NSS. */ +// kNullPasswordNSS is a PKCS#8 encrypted private key using the null password +// and generated by NSS. static const uint8_t kNullPasswordNSS[] = { 0x30, 0x81, 0xb8, 0x30, 0x23, 0x06, 0x0a, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03, 0x30, 0x15, 0x04, 0x10, 0x3f, 0xac, 0xe9, @@ -100,8 +99,8 @@ static const uint8_t kNullPasswordNSS[] = { 0x0a, 0xb2, 0x1d, 0xca, 0x15, 0xb2, 0xca, }; -/* kEmptyPasswordOpenSSL is a PKCS#8 encrypted private key using the empty - * password and generated by OpenSSL. */ +// kEmptyPasswordOpenSSL is a PKCS#8 encrypted private key using the empty +// password and generated by OpenSSL. static const uint8_t kEmptyPasswordOpenSSL[] = { 0x30, 0x82, 0x01, 0xa1, 0x30, 0x1b, 0x06, 0x0a, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03, 0x30, 0x0d, 0x04, 0x08, 0x86, 0xaa, diff --git a/crypto/pkcs8/pkcs8_x509.c b/crypto/pkcs8/pkcs8_x509.c index 875b4ca6..ace5f33b 100644 --- a/crypto/pkcs8/pkcs8_x509.c +++ b/crypto/pkcs8/pkcs8_x509.c @@ -75,10 +75,10 @@ #include "../internal.h" -/* Minor tweak to operation: zero private key data */ +// Minor tweak to operation: zero private key data static int pkey_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { - /* Since the structure must still be valid use ASN1_OP_FREE_PRE */ + // Since the structure must still be valid use ASN1_OP_FREE_PRE if (operation == ASN1_OP_FREE_PRE) { PKCS8_PRIV_KEY_INFO *key = (PKCS8_PRIV_KEY_INFO *)*pval; if (key->pkey && key->pkey->type == V_ASN1_OCTET_STRING && @@ -162,7 +162,7 @@ PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, EVP_PKEY *pkey = NULL; uint8_t *in = NULL; - /* Convert the legacy ASN.1 object to a byte string. */ + // Convert the legacy ASN.1 object to a byte string. int in_len = i2d_X509_SIG(pkcs8, &in); if (in_len < 0) { goto err; @@ -193,7 +193,7 @@ X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, pass_len = (size_t)pass_len_in; } - /* Parse out the private key. */ + // Parse out the private key. EVP_PKEY *pkey = EVP_PKCS82PKEY(p8inf); if (pkey == NULL) { return NULL; @@ -212,7 +212,7 @@ X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, goto err; } - /* Convert back to legacy ASN.1 objects. */ + // Convert back to legacy ASN.1 objects. const uint8_t *ptr = der; ret = d2i_X509_SIG(NULL, &ptr, der_len); if (ret == NULL || ptr != der + der_len) { @@ -234,8 +234,8 @@ struct pkcs12_context { size_t password_len; }; -/* PKCS12_handle_sequence parses a BER-encoded SEQUENCE of elements in a PKCS#12 - * structure. */ +// PKCS12_handle_sequence parses a BER-encoded SEQUENCE of elements in a PKCS#12 +// structure. static int PKCS12_handle_sequence( CBS *sequence, struct pkcs12_context *ctx, int (*handle_element)(CBS *cbs, struct pkcs12_context *ctx)) { @@ -244,10 +244,10 @@ static int PKCS12_handle_sequence( CBS in; int ret = 0; - /* Although a BER->DER conversion is done at the beginning of |PKCS12_parse|, - * the ASN.1 data gets wrapped in OCTETSTRINGs and/or encrypted and the - * conversion cannot see through those wrappings. So each time we step - * through one we need to convert to DER again. */ + // Although a BER->DER conversion is done at the beginning of |PKCS12_parse|, + // the ASN.1 data gets wrapped in OCTETSTRINGs and/or encrypted and the + // conversion cannot see through those wrappings. So each time we step + // through one we need to convert to DER again. if (!CBS_asn1_ber_to_der(sequence, &der_bytes, &der_len)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; @@ -285,20 +285,20 @@ err: return ret; } -/* 1.2.840.113549.1.12.10.1.2 */ +// 1.2.840.113549.1.12.10.1.2 static const uint8_t kPKCS8ShroudedKeyBag[] = { 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x02}; -/* 1.2.840.113549.1.12.10.1.3 */ +// 1.2.840.113549.1.12.10.1.3 static const uint8_t kCertBag[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x03}; -/* 1.2.840.113549.1.9.22.1 */ +// 1.2.840.113549.1.9.22.1 static const uint8_t kX509Certificate[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x16, 0x01}; -/* PKCS12_handle_safe_bag parses a single SafeBag element in a PKCS#12 - * structure. */ +// PKCS12_handle_safe_bag parses a single SafeBag element in a PKCS#12 +// structure. static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { CBS bag_id, wrapped_value; if (!CBS_get_asn1(safe_bag, &bag_id, CBS_ASN1_OBJECT) || @@ -311,7 +311,7 @@ static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { if (CBS_mem_equal(&bag_id, kPKCS8ShroudedKeyBag, sizeof(kPKCS8ShroudedKeyBag))) { - /* See RFC 7292, section 4.2.2. */ + // See RFC 7292, section 4.2.2. if (*ctx->out_key) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MULTIPLE_PRIVATE_KEYS_IN_PKCS12); return 0; @@ -334,7 +334,7 @@ static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { } if (CBS_mem_equal(&bag_id, kCertBag, sizeof(kCertBag))) { - /* See RFC 7292, section 4.2.3. */ + // See RFC 7292, section 4.2.3. CBS cert_bag, cert_type, wrapped_cert, cert; if (!CBS_get_asn1(&wrapped_value, &cert_bag, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&cert_bag, &cert_type, CBS_ASN1_OBJECT) || @@ -345,7 +345,7 @@ static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { return 0; } - /* Skip unknown certificate types. */ + // Skip unknown certificate types. if (!CBS_mem_equal(&cert_type, kX509Certificate, sizeof(kX509Certificate))) { return 1; @@ -377,20 +377,20 @@ static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { return 1; } - /* Unknown element type - ignore it. */ + // Unknown element type - ignore it. return 1; } -/* 1.2.840.113549.1.7.1 */ +// 1.2.840.113549.1.7.1 static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01}; -/* 1.2.840.113549.1.7.6 */ +// 1.2.840.113549.1.7.6 static const uint8_t kPKCS7EncryptedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x06}; -/* PKCS12_handle_content_info parses a single PKCS#7 ContentInfo element in a - * PKCS#12 structure. */ +// PKCS12_handle_content_info parses a single PKCS#7 ContentInfo element in a +// PKCS#12 structure. static int PKCS12_handle_content_info(CBS *content_info, struct pkcs12_context *ctx) { CBS content_type, wrapped_contents, contents; @@ -407,23 +407,23 @@ static int PKCS12_handle_content_info(CBS *content_info, if (CBS_mem_equal(&content_type, kPKCS7EncryptedData, sizeof(kPKCS7EncryptedData))) { - /* See https://tools.ietf.org/html/rfc2315#section-13. - * - * PKCS#7 encrypted data inside a PKCS#12 structure is generally an - * encrypted certificate bag and it's generally encrypted with 40-bit - * RC2-CBC. */ + // See https://tools.ietf.org/html/rfc2315#section-13. + // + // PKCS#7 encrypted data inside a PKCS#12 structure is generally an + // encrypted certificate bag and it's generally encrypted with 40-bit + // RC2-CBC. CBS version_bytes, eci, contents_type, ai, encrypted_contents; uint8_t *out; size_t out_len; if (!CBS_get_asn1(&wrapped_contents, &contents, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&contents, &version_bytes, CBS_ASN1_INTEGER) || - /* EncryptedContentInfo, see - * https://tools.ietf.org/html/rfc2315#section-10.1 */ + // EncryptedContentInfo, see + // https://tools.ietf.org/html/rfc2315#section-10.1 !CBS_get_asn1(&contents, &eci, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&eci, &contents_type, CBS_ASN1_OBJECT) || - /* AlgorithmIdentifier, see - * https://tools.ietf.org/html/rfc5280#section-4.1.1.2 */ + // AlgorithmIdentifier, see + // https://tools.ietf.org/html/rfc5280#section-4.1.1.2 !CBS_get_asn1(&eci, &ai, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_implicit_string( &eci, &encrypted_contents, &storage, @@ -459,7 +459,7 @@ static int PKCS12_handle_content_info(CBS *content_info, ret = PKCS12_handle_sequence(&octet_string_contents, ctx, PKCS12_handle_safe_bag); } else { - /* Unknown element type - ignore it. */ + // Unknown element type - ignore it. ret = 1; } @@ -478,7 +478,7 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, struct pkcs12_context ctx; const size_t original_out_certs_len = sk_X509_num(out_certs); - /* The input may be in BER format. */ + // The input may be in BER format. if (!CBS_asn1_ber_to_der(ber_in, &der_bytes, &der_len)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; @@ -492,8 +492,8 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, *out_key = NULL; OPENSSL_memset(&ctx, 0, sizeof(ctx)); - /* See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12v1.pdf, section - * four. */ + // See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12v1.pdf, section + // four. if (!CBS_get_asn1(&in, &pfx, CBS_ASN1_SEQUENCE) || CBS_len(&in) != 0 || !CBS_get_asn1_uint64(&pfx, &version)) { @@ -521,8 +521,8 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, goto err; } - /* authsafe is a PKCS#7 ContentInfo. See - * https://tools.ietf.org/html/rfc2315#section-7. */ + // authsafe is a PKCS#7 ContentInfo. See + // https://tools.ietf.org/html/rfc2315#section-7. if (!CBS_get_asn1(&authsafe, &content_type, CBS_ASN1_OBJECT) || !CBS_get_asn1(&authsafe, &wrapped_authsafes, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { @@ -530,8 +530,8 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, goto err; } - /* The content type can either be data or signedData. The latter indicates - * that it's signed by a public key, which isn't supported. */ + // The content type can either be data or signedData. The latter indicates + // that it's signed by a public key, which isn't supported. if (!CBS_mem_equal(&content_type, kPKCS7Data, sizeof(kPKCS7Data))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED); goto err; @@ -547,7 +547,7 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, ctx.password = password; ctx.password_len = password != NULL ? strlen(password) : 0; - /* Verify the MAC. */ + // Verify the MAC. { CBS mac, salt, expected_mac; if (!CBS_get_asn1(&mac_data, &mac, CBS_ASN1_SEQUENCE)) { @@ -566,7 +566,7 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, goto err; } - /* The iteration count is optional and the default is one. */ + // The iteration count is optional and the default is one. uint64_t iterations = 1; if (CBS_len(&mac_data) > 0) { if (!CBS_get_asn1_uint64(&mac_data, &iterations) || @@ -596,7 +596,7 @@ int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, } } - /* authsafes contains a series of PKCS#7 ContentInfos. */ + // authsafes contains a series of PKCS#7 ContentInfos. if (!PKCS12_handle_sequence(&authsafes, &ctx, PKCS12_handle_content_info)) { goto err; } @@ -673,8 +673,8 @@ PKCS12* d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12) { if (used == 0) { goto out; } - /* Workaround a bug in node.js. It uses a memory BIO for this in the wrong - * mode. */ + // Workaround a bug in node.js. It uses a memory BIO for this in the wrong + // mode. n = 0; } diff --git a/crypto/poly1305/internal.h b/crypto/poly1305/internal.h index 079f51ea..251b1f4f 100644 --- a/crypto/poly1305/internal.h +++ b/crypto/poly1305/internal.h @@ -35,7 +35,7 @@ void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_POLY1305_INTERNAL_H */ +#endif // OPENSSL_HEADER_POLY1305_INTERNAL_H diff --git a/crypto/poly1305/poly1305.c b/crypto/poly1305/poly1305.c index 5c310e9f..c3e92721 100644 --- a/crypto/poly1305/poly1305.c +++ b/crypto/poly1305/poly1305.c @@ -12,9 +12,9 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation of poly1305 is by Andrew Moon - * (https://github.com/floodyberry/poly1305-donna) and released as public - * domain. */ +// This implementation of poly1305 is by Andrew Moon +// (https://github.com/floodyberry/poly1305-donna) and released as public +// domain. #include @@ -28,7 +28,7 @@ #if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) -/* We can assume little-endian. */ +// We can assume little-endian. static uint32_t U8TO32_LE(const uint8_t *m) { uint32_t r; OPENSSL_memcpy(&r, m, sizeof(r)); @@ -55,9 +55,9 @@ static inline struct poly1305_state_st *poly1305_aligned_state( return (struct poly1305_state_st *)(((uintptr_t)state + 63) & ~63); } -/* poly1305_blocks updates |state| given some amount of input data. This - * function may only be called with a |len| that is not a multiple of 16 at the - * end of the data. Otherwise the input must be buffered into 16 byte blocks. */ +// poly1305_blocks updates |state| given some amount of input data. This +// function may only be called with a |len| that is not a multiple of 16 at the +// end of the data. Otherwise the input must be buffered into 16 byte blocks. static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, size_t len) { uint32_t t0, t1, t2, t3; @@ -123,7 +123,7 @@ poly1305_donna_mul: goto poly1305_donna_16bytes; } -/* final bytes */ +// final bytes poly1305_donna_atmost15bytes: if (!len) { return; @@ -168,7 +168,7 @@ void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { t2 = U8TO32_LE(key + 8); t3 = U8TO32_LE(key + 12); - /* precompute multipliers */ + // precompute multipliers state->r0 = t0 & 0x3ffffff; t0 >>= 26; t0 |= t1 << 6; @@ -187,7 +187,7 @@ void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { state->s3 = state->r3 * 5; state->s4 = state->r4 * 5; - /* init state */ + // init state state->h0 = 0; state->h1 = 0; state->h2 = 0; @@ -315,4 +315,4 @@ void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { U32TO8_LE(&mac[12], f3); } -#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 */ +#endif // OPENSSL_WINDOWS || !OPENSSL_X86_64 diff --git a/crypto/poly1305/poly1305_arm.c b/crypto/poly1305/poly1305_arm.c index 6280e2c7..4aff713f 100644 --- a/crypto/poly1305/poly1305_arm.c +++ b/crypto/poly1305/poly1305_arm.c @@ -12,8 +12,8 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation was taken from the public domain, neon2 version in - * SUPERCOP by D. J. Bernstein and Peter Schwabe. */ +// This implementation was taken from the public domain, neon2 version in +// SUPERCOP by D. J. Bernstein and Peter Schwabe. #include @@ -26,7 +26,7 @@ #if defined(OPENSSL_POLY1305_NEON) typedef struct { - uint32_t v[12]; /* for alignment; only using 10 */ + uint32_t v[12]; // for alignment; only using 10 } fe1305x2; #define addmulmod openssl_poly1305_neon2_addmulmod @@ -125,8 +125,8 @@ static void fe1305x2_tobytearray(uint8_t *r, fe1305x2 *x) { *(uint32_t *)(r + 12) = (x3 >> 18) + (x4 << 8); } -/* load32 exists to avoid breaking strict aliasing rules in - * fe1305x2_frombytearray. */ +// load32 exists to avoid breaking strict aliasing rules in +// fe1305x2_frombytearray. static uint32_t load32(uint8_t *t) { uint32_t tmp; OPENSSL_memcpy(&tmp, t, sizeof(tmp)); @@ -197,11 +197,11 @@ void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]) { r->v[9] = r->v[8] = 0x00fffff & ((*(uint32_t *)(key + 12)) >> 8); for (j = 0; j < 10; j++) { - h->v[j] = 0; /* XXX: should fast-forward a bit */ + h->v[j] = 0; // XXX: should fast-forward a bit } - addmulmod(precomp, r, r, &zero); /* precompute r^2 */ - addmulmod(precomp + 1, precomp, precomp, &zero); /* precompute r^4 */ + addmulmod(precomp, r, r, &zero); // precompute r^2 + addmulmod(precomp + 1, precomp, precomp, &zero); // precompute r^4 OPENSSL_memcpy(st->key, key + 16, 16); st->buf_used = 0; @@ -301,4 +301,4 @@ void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]) { fe1305x2_tobytearray(mac, h); } -#endif /* OPENSSL_POLY1305_NEON */ +#endif // OPENSSL_POLY1305_NEON diff --git a/crypto/poly1305/poly1305_vec.c b/crypto/poly1305/poly1305_vec.c index 3045a2f1..80eaa36d 100644 --- a/crypto/poly1305/poly1305_vec.c +++ b/crypto/poly1305/poly1305_vec.c @@ -12,11 +12,11 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation of poly1305 is by Andrew Moon - * (https://github.com/floodyberry/poly1305-donna) and released as public - * domain. It implements SIMD vectorization based on the algorithm described in - * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte - * block size */ +// This implementation of poly1305 is by Andrew Moon +// (https://github.com/floodyberry/poly1305-donna) and released as public +// domain. It implements SIMD vectorization based on the algorithm described in +// http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte +// block size #include @@ -69,14 +69,14 @@ typedef struct poly1305_state_internal_t { poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 bytes of free storage */ union { - xmmi H[5]; /* 80 bytes */ + xmmi H[5]; // 80 bytes uint64_t HH[10]; }; - /* uint64_t r0,r1,r2; [24 bytes] */ - /* uint64_t pad0,pad1; [16 bytes] */ - uint64_t started; /* 8 bytes */ - uint64_t leftover; /* 8 bytes */ - uint8_t buffer[64]; /* 64 bytes */ + // uint64_t r0,r1,r2; [24 bytes] + // uint64_t pad0,pad1; [16 bytes] + uint64_t started; // 8 bytes + uint64_t leftover; // 8 bytes + uint8_t buffer[64]; // 64 bytes } poly1305_state_internal; /* 448 bytes total + 63 bytes for alignment = 511 bytes raw */ @@ -85,7 +85,7 @@ static inline poly1305_state_internal *poly1305_aligned_state( return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63); } -/* copy 0-63 bytes */ +// copy 0-63 bytes static inline void poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { size_t offset = src - dst; @@ -117,7 +117,7 @@ poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { } } -/* zero 0-15 bytes */ +// zero 0-15 bytes static inline void poly1305_block_zero(uint8_t *dst, size_t bytes) { if (bytes & 8) { *(uint64_t *)dst = 0; @@ -146,7 +146,7 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { uint64_t r0, r1, r2; uint64_t t0, t1; - /* clamp key */ + // clamp key t0 = U8TO64_LE(key + 0); t1 = U8TO64_LE(key + 8); r0 = t0 & 0xffc0fffffff; @@ -156,7 +156,7 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { t1 >>= 24; r2 = t1 & 0x00ffffffc0f; - /* store r in un-used space of st->P[1] */ + // store r in un-used space of st->P[1] p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); @@ -165,13 +165,13 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { p->R22.d[1] = (uint32_t)(r2); p->R22.d[3] = (uint32_t)(r2 >> 32); - /* store pad */ + // store pad p->R23.d[1] = U8TO32_LE(key + 16); p->R23.d[3] = U8TO32_LE(key + 20); p->R24.d[1] = U8TO32_LE(key + 24); p->R24.d[3] = U8TO32_LE(key + 28); - /* H = 0 */ + // H = 0 st->H[0] = _mm_setzero_si128(); st->H[1] = _mm_setzero_si128(); st->H[2] = _mm_setzero_si128(); @@ -196,7 +196,7 @@ static void poly1305_first_block(poly1305_state_internal *st, uint64_t c; uint64_t i; - /* pull out stored info */ + // pull out stored info p = &st->P[1]; r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; @@ -205,7 +205,7 @@ static void poly1305_first_block(poly1305_state_internal *st, pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; - /* compute powers r^2,r^4 */ + // compute powers r^2,r^4 r20 = r0; r21 = r1; r22 = r2; @@ -249,7 +249,7 @@ static void poly1305_first_block(poly1305_state_internal *st, p--; } - /* put saved info back */ + // put saved info back p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); @@ -262,7 +262,7 @@ static void poly1305_first_block(poly1305_state_internal *st, p->R24.d[1] = (uint32_t)(pad1); p->R24.d[3] = (uint32_t)(pad1 >> 32); - /* H = [Mx,My] */ + // H = [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -294,7 +294,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, H4 = st->H[4]; while (bytes >= 64) { - /* H *= [r^4,r^4] */ + // H *= [r^4,r^4] p = &st->P[0]; T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); @@ -342,7 +342,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My]*[r^2,r^2] */ + // H += [Mx,My]*[r^2,r^2] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -406,7 +406,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(M4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My] */ + // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 32)), _mm_loadl_epi64((const xmmi *)(m + 48))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 40)), @@ -424,7 +424,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); - /* reduce */ + // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); @@ -447,7 +447,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */ + // H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; @@ -488,11 +488,11 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, H3 = st->H[3]; H4 = st->H[4]; - /* p = [r^2,r^2] */ + // p = [r^2,r^2] p = &st->P[1]; if (bytes >= 32) { - /* H *= [r^2,r^2] */ + // H *= [r^2,r^2] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); @@ -539,7 +539,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My] */ + // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -557,7 +557,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); - /* reduce */ + // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); @@ -580,7 +580,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = (H*[r^2,r^2] + [Mx,My]) */ + // H = (H*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; @@ -590,7 +590,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, consumed = 32; } - /* finalize, H *= [r^2,r] */ + // finalize, H *= [r^2,r] r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; @@ -605,7 +605,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, p->S23.d[2] = p->R23.d[2] * 5; p->S24.d[2] = p->R24.d[2] * 5; - /* H *= [r^2,r] */ + // H *= [r^2,r] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); @@ -674,7 +674,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = H[0]+H[1] */ + // H = H[0]+H[1] H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8)); H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8)); H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8)); @@ -713,7 +713,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, poly1305_state_internal *st = poly1305_aligned_state(state); size_t want; - /* need at least 32 initial bytes to start the accelerated branch */ + // need at least 32 initial bytes to start the accelerated branch if (!st->started) { if ((st->leftover == 0) && (bytes > 32)) { poly1305_first_block(st, m); @@ -734,7 +734,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, st->started = 1; } - /* handle leftover */ + // handle leftover if (st->leftover) { want = poly1305_min(64 - st->leftover, bytes); poly1305_block_copy(st->buffer + st->leftover, m, want); @@ -748,7 +748,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, st->leftover = 0; } - /* process 64 byte blocks */ + // process 64 byte blocks if (bytes >= 64) { want = (bytes & ~63); poly1305_blocks(st, m, want); @@ -779,7 +779,7 @@ void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { m += consumed; } - /* st->HH will either be 0 or have the combined result */ + // st->HH will either be 0 or have the combined result h0 = st->HH[0]; h1 = st->HH[1]; h2 = st->HH[2]; @@ -826,7 +826,7 @@ poly1305_donna_mul: goto poly1305_donna_atleast16bytes; } -/* final bytes */ +// final bytes poly1305_donna_atmost15bytes: if (!leftover) { goto poly1305_donna_finish; @@ -870,7 +870,7 @@ poly1305_donna_finish: h1 = (h1 & nc) | (g1 & c); h2 = (h2 & nc) | (g2 & c); - /* pad */ + // pad t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; h0 += (t0 & 0xfffffffffff); @@ -887,4 +887,4 @@ poly1305_donna_finish: U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24))); } -#endif /* !OPENSSL_WINDOWS && OPENSSL_X86_64 */ +#endif // !OPENSSL_WINDOWS && OPENSSL_X86_64 diff --git a/crypto/pool/internal.h b/crypto/pool/internal.h index 3ec2ec2e..5b288ebb 100644 --- a/crypto/pool/internal.h +++ b/crypto/pool/internal.h @@ -39,7 +39,7 @@ struct crypto_buffer_pool_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_POOL_INTERNAL_H */ +#endif // OPENSSL_HEADER_POOL_INTERNAL_H diff --git a/crypto/pool/pool.c b/crypto/pool/pool.c index 44d10af6..9cfbf1ee 100644 --- a/crypto/pool/pool.c +++ b/crypto/pool/pool.c @@ -125,8 +125,8 @@ CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_MUTEX_unlock_write(&pool->lock); if (!inserted) { - /* We raced to insert |buf| into the pool and lost, or else there was an - * error inserting. */ + // We raced to insert |buf| into the pool and lost, or else there was an + // error inserting. OPENSSL_free(buf->data); OPENSSL_free(buf); return duplicate; @@ -147,9 +147,9 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { CRYPTO_BUFFER_POOL *const pool = buf->pool; if (pool == NULL) { if (CRYPTO_refcount_dec_and_test_zero(&buf->references)) { - /* If a reference count of zero is observed, there cannot be a reference - * from any pool to this buffer and thus we are able to free this - * buffer. */ + // If a reference count of zero is observed, there cannot be a reference + // from any pool to this buffer and thus we are able to free this + // buffer. OPENSSL_free(buf->data); OPENSSL_free(buf); } @@ -163,10 +163,10 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { return; } - /* We have an exclusive lock on the pool, therefore no concurrent lookups can - * find this buffer and increment the reference count. Thus, if the count is - * zero there are and can never be any more references and thus we can free - * this buffer. */ + // We have an exclusive lock on the pool, therefore no concurrent lookups can + // find this buffer and increment the reference count. Thus, if the count is + // zero there are and can never be any more references and thus we can free + // this buffer. void *found = lh_CRYPTO_BUFFER_delete(pool->bufs, buf); assert(found != NULL); assert(found == buf); @@ -177,12 +177,12 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { } int CRYPTO_BUFFER_up_ref(CRYPTO_BUFFER *buf) { - /* This is safe in the case that |buf->pool| is NULL because it's just - * standard reference counting in that case. - * - * This is also safe if |buf->pool| is non-NULL because, if it were racing - * with |CRYPTO_BUFFER_free| then the two callers must have independent - * references already and so the reference count will never hit zero. */ + // This is safe in the case that |buf->pool| is NULL because it's just + // standard reference counting in that case. + // + // This is also safe if |buf->pool| is non-NULL because, if it were racing + // with |CRYPTO_BUFFER_free| then the two callers must have independent + // references already and so the reference count will never hit zero. CRYPTO_refcount_inc(&buf->references); return 1; } diff --git a/crypto/rand_extra/deterministic.c b/crypto/rand_extra/deterministic.c index 5d3a9ce4..17fa71e6 100644 --- a/crypto/rand_extra/deterministic.c +++ b/crypto/rand_extra/deterministic.c @@ -24,11 +24,11 @@ #include "../fipsmodule/rand/internal.h" -/* g_num_calls is the number of calls to |CRYPTO_sysrand| that have occurred. - * - * This is intentionally not thread-safe. If the fuzzer mode is ever used in a - * multi-threaded program, replace this with a thread-local. (A mutex would not - * be deterministic.) */ +// g_num_calls is the number of calls to |CRYPTO_sysrand| that have occurred. +// +// This is intentionally not thread-safe. If the fuzzer mode is ever used in a +// multi-threaded program, replace this with a thread-local. (A mutex would not +// be deterministic.) static uint64_t g_num_calls = 0; void RAND_reset_for_fuzzing(void) { g_num_calls = 0; } @@ -45,4 +45,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { g_num_calls++; } -#endif /* BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/crypto/rand_extra/forkunsafe.c b/crypto/rand_extra/forkunsafe.c index 58e241fb..0f1ececc 100644 --- a/crypto/rand_extra/forkunsafe.c +++ b/crypto/rand_extra/forkunsafe.c @@ -19,15 +19,15 @@ #include "../fipsmodule/rand/internal.h" -/* g_buffering_enabled is true if fork-unsafe buffering has been enabled. */ +// g_buffering_enabled is true if fork-unsafe buffering has been enabled. static int g_buffering_enabled = 0; -/* g_lock protects |g_buffering_enabled|. */ +// g_lock protects |g_buffering_enabled|. static struct CRYPTO_STATIC_MUTEX g_lock = CRYPTO_STATIC_MUTEX_INIT; #if !defined(OPENSSL_WINDOWS) void RAND_enable_fork_unsafe_buffering(int fd) { - /* We no longer support setting the file-descriptor with this function. */ + // We no longer support setting the file-descriptor with this function. if (fd != -1) { abort(); } diff --git a/crypto/rand_extra/fuchsia.c b/crypto/rand_extra/fuchsia.c index 9711c1db..9355d8c9 100644 --- a/crypto/rand_extra/fuchsia.c +++ b/crypto/rand_extra/fuchsia.c @@ -40,4 +40,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { } } -#endif /* OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/crypto/rand_extra/rand_extra.c b/crypto/rand_extra/rand_extra.c index 3b37e297..bed9e1ef 100644 --- a/crypto/rand_extra/rand_extra.c +++ b/crypto/rand_extra/rand_extra.c @@ -18,14 +18,14 @@ void RAND_seed(const void *buf, int num) { - /* OpenSSH calls |RAND_seed| before jailing on the assumption that any needed - * file descriptors etc will be opened. */ + // OpenSSH calls |RAND_seed| before jailing on the assumption that any needed + // file descriptors etc will be opened. uint8_t unused; RAND_bytes(&unused, sizeof(unused)); } int RAND_load_file(const char *path, long num) { - if (num < 0) { /* read the "whole file" */ + if (num < 0) { // read the "whole file" return 1; } else if (num <= INT_MAX) { return (int) num; diff --git a/crypto/rand_extra/windows.c b/crypto/rand_extra/windows.c index fb948472..c9555874 100644 --- a/crypto/rand_extra/windows.c +++ b/crypto/rand_extra/windows.c @@ -23,9 +23,9 @@ OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include -/* #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the - * "Community Additions" comment on MSDN here: - * http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */ +// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the +// "Community Additions" comment on MSDN here: +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx #define SystemFunction036 NTAPI SystemFunction036 #include #undef SystemFunction036 @@ -50,4 +50,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { return; } -#endif /* OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/crypto/refcount_c11.c b/crypto/refcount_c11.c index fbc0343d..0a331a45 100644 --- a/crypto/refcount_c11.c +++ b/crypto/refcount_c11.c @@ -25,7 +25,7 @@ #include -/* See comment above the typedef of CRYPTO_refcount_t about these tests. */ +// See comment above the typedef of CRYPTO_refcount_t about these tests. static_assert(alignof(CRYPTO_refcount_t) == alignof(_Atomic CRYPTO_refcount_t), "_Atomic alters the needed alignment of a reference count"); static_assert(sizeof(CRYPTO_refcount_t) == sizeof(_Atomic CRYPTO_refcount_t), @@ -64,4 +64,4 @@ int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *in_count) { } } -#endif /* OPENSSL_C11_ATOMIC */ +#endif // OPENSSL_C11_ATOMIC diff --git a/crypto/refcount_lock.c b/crypto/refcount_lock.c index ea6a06d3..8b855d62 100644 --- a/crypto/refcount_lock.c +++ b/crypto/refcount_lock.c @@ -50,4 +50,4 @@ int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *count) { return ret; } -#endif /* OPENSSL_C11_ATOMIC */ +#endif // OPENSSL_C11_ATOMIC diff --git a/crypto/rsa_extra/rsa_asn1.c b/crypto/rsa_extra/rsa_asn1.c index 785044e4..23c91bd9 100644 --- a/crypto/rsa_extra/rsa_asn1.c +++ b/crypto/rsa_extra/rsa_asn1.c @@ -87,7 +87,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) { static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* An RSA object may be missing some components. */ + // An RSA object may be missing some components. OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } @@ -124,10 +124,10 @@ RSA *RSA_parse_public_key(CBS *cbs) { } RSA *RSA_parse_public_key_buggy(CBS *cbs) { - /* Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Remove this code and callers in March 2016. */ + // Estonian IDs issued between September 2014 to September 2015 are + // broken. See https://crbug.com/532048 and https://crbug.com/534766. + // + // TODO(davidben): Remove this code and callers in March 2016. return parse_public_key(cbs, 1 /* buggy */); } @@ -169,8 +169,8 @@ int RSA_public_key_to_bytes(uint8_t **out_bytes, size_t *out_len, return 1; } -/* kVersionTwoPrime is the value of the version field for a two-prime - * RSAPrivateKey structure (RFC 3447). */ +// kVersionTwoPrime is the value of the version field for a two-prime +// RSAPrivateKey structure (RFC 3447). static const uint64_t kVersionTwoPrime = 0; RSA *RSA_parse_private_key(CBS *cbs) { diff --git a/crypto/stack/stack.c b/crypto/stack/stack.c index f78209d5..f6b44123 100644 --- a/crypto/stack/stack.c +++ b/crypto/stack/stack.c @@ -63,8 +63,8 @@ #include "../internal.h" -/* kMinSize is the number of pointers that will be initially allocated in a new - * stack. */ +// kMinSize is the number of pointers that will be initially allocated in a new +// stack. static const size_t kMinSize = 4; _STACK *sk_new(stack_cmp_func comp) { @@ -152,18 +152,18 @@ size_t sk_insert(_STACK *sk, void *p, size_t where) { } if (sk->num_alloc <= sk->num + 1) { - /* Attempt to double the size of the array. */ + // Attempt to double the size of the array. size_t new_alloc = sk->num_alloc << 1; size_t alloc_size = new_alloc * sizeof(void *); void **data; - /* If the doubling overflowed, try to increment. */ + // If the doubling overflowed, try to increment. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { new_alloc = sk->num_alloc + 1; alloc_size = new_alloc * sizeof(void *); } - /* If the increment also overflowed, fail. */ + // If the increment also overflowed, fail. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { return 0; } @@ -229,7 +229,7 @@ int sk_find(_STACK *sk, size_t *out_index, void *p) { } if (sk->comp == NULL) { - /* Use pointer equality when no comparison function has been set. */ + // Use pointer equality when no comparison function has been set. for (size_t i = 0; i < sk->num; i++) { if (sk->data[i] == p) { if (out_index) { @@ -247,18 +247,18 @@ int sk_find(_STACK *sk, size_t *out_index, void *p) { sk_sort(sk); - /* sk->comp is a function that takes pointers to pointers to elements, but - * qsort and bsearch take a comparison function that just takes pointers to - * elements. However, since we're passing an array of pointers to - * qsort/bsearch, we can just cast the comparison function and everything - * works. */ + // sk->comp is a function that takes pointers to pointers to elements, but + // qsort and bsearch take a comparison function that just takes pointers to + // elements. However, since we're passing an array of pointers to + // qsort/bsearch, we can just cast the comparison function and everything + // works. const void *const *r = bsearch(&p, sk->data, sk->num, sizeof(void *), (int (*)(const void *, const void *))sk->comp); if (r == NULL) { return 0; } size_t idx = ((void **)r) - sk->data; - /* This function always returns the first result. */ + // This function always returns the first result. while (idx > 0 && sk->comp((const void **)&p, (const void **)&sk->data[idx - 1]) == 0) { idx--; @@ -329,7 +329,7 @@ void sk_sort(_STACK *sk) { return; } - /* See the comment in sk_find about this cast. */ + // See the comment in sk_find about this cast. comp_func = (int (*)(const void *, const void *))(sk->comp); qsort(sk->data, sk->num, sizeof(void *), comp_func); sk->sorted = 1; diff --git a/crypto/test/file_test.h b/crypto/test/file_test.h index fd1dcd7c..204ef9cb 100644 --- a/crypto/test/file_test.h +++ b/crypto/test/file_test.h @@ -246,4 +246,4 @@ int FileTestMain(const FileTest::Options &opts); // name of a test file embedded in the test binary. void FileTestGTest(const char *path, std::function run_test); -#endif /* OPENSSL_HEADER_CRYPTO_TEST_FILE_TEST_H */ +#endif // OPENSSL_HEADER_CRYPTO_TEST_FILE_TEST_H diff --git a/crypto/test/gtest_main.h b/crypto/test/gtest_main.h index 395b2817..759aaf7e 100644 --- a/crypto/test/gtest_main.h +++ b/crypto/test/gtest_main.h @@ -75,4 +75,4 @@ inline void SetupGoogleTest() { } // namespace bssl -#endif /* OPENSSL_HEADER_CRYPTO_TEST_GTEST_MAIN_H */ +#endif // OPENSSL_HEADER_CRYPTO_TEST_GTEST_MAIN_H diff --git a/crypto/test/malloc.cc b/crypto/test/malloc.cc index bcd7974a..5f0bc6e2 100644 --- a/crypto/test/malloc.cc +++ b/crypto/test/malloc.cc @@ -140,4 +140,4 @@ void *realloc(void *ptr, size_t size) { } // extern "C" -#endif /* defined(linux) && GLIBC && !ARM && !AARCH64 && !ASAN */ +#endif // defined(linux) && GLIBC && !ARM && !AARCH64 && !ASAN diff --git a/crypto/test/test_util.h b/crypto/test/test_util.h index 3bf41abb..9c9ef58f 100644 --- a/crypto/test/test_util.h +++ b/crypto/test/test_util.h @@ -62,4 +62,4 @@ inline bool operator!=(const Bytes &a, const Bytes &b) { return !(a == b); } std::ostream &operator<<(std::ostream &os, const Bytes &in); -#endif /* OPENSSL_HEADER_CRYPTO_TEST_TEST_UTIL_H */ +#endif // OPENSSL_HEADER_CRYPTO_TEST_TEST_UTIL_H diff --git a/crypto/thread_none.c b/crypto/thread_none.c index 85768b4b..718d9601 100644 --- a/crypto/thread_none.c +++ b/crypto/thread_none.c @@ -56,4 +56,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_NO_THREADS */ +#endif // OPENSSL_NO_THREADS diff --git a/crypto/thread_pthread.c b/crypto/thread_pthread.c index d9e87f2d..90b3d605 100644 --- a/crypto/thread_pthread.c +++ b/crypto/thread_pthread.c @@ -173,4 +173,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_PTHREADS */ +#endif // OPENSSL_PTHREADS diff --git a/crypto/thread_test.cc b/crypto/thread_test.cc index f1b04f35..2a6f60be 100644 --- a/crypto/thread_test.cc +++ b/crypto/thread_test.cc @@ -34,7 +34,7 @@ typedef HANDLE thread_t; static DWORD WINAPI thread_run(LPVOID arg) { void (*thread_func)(void); - /* VC really doesn't like casting between data and function pointers. */ + // VC really doesn't like casting between data and function pointers. OPENSSL_memcpy(&thread_func, &arg, sizeof(thread_func)); thread_func(); return 0; @@ -42,7 +42,7 @@ static DWORD WINAPI thread_run(LPVOID arg) { static int run_thread(thread_t *out_thread, void (*thread_func)(void)) { void *arg; - /* VC really doesn't like casting between data and function pointers. */ + // VC really doesn't like casting between data and function pointers. OPENSSL_memcpy(&arg, &thread_func, sizeof(arg)); *out_thread = CreateThread(NULL /* security attributes */, @@ -78,15 +78,15 @@ static int wait_for_thread(thread_t thread) { return pthread_join(thread, NULL) == 0; } -#endif /* OPENSSL_WINDOWS */ +#endif // OPENSSL_WINDOWS static unsigned g_once_init_called = 0; static void once_init(void) { g_once_init_called++; - /* Sleep briefly so one |call_once_thread| instance will call |CRYPTO_once| - * while the other is running this function. */ + // Sleep briefly so one |call_once_thread| instance will call |CRYPTO_once| + // while the other is running this function. #if defined(OPENSSL_WINDOWS) Sleep(1 /* milliseconds */); #else @@ -129,9 +129,9 @@ TEST(ThreadTest, Once) { TEST(ThreadTest, InitZeros) { if (FIPS_mode()) { - /* Our FIPS tooling currently requires that |CRYPTO_ONCE_INIT|, - * |CRYPTO_STATIC_MUTEX_INIT| and |CRYPTO_EX_DATA_CLASS| are all zeros and - * so can be placed in the BSS section. */ + // Our FIPS tooling currently requires that |CRYPTO_ONCE_INIT|, + // |CRYPTO_STATIC_MUTEX_INIT| and |CRYPTO_EX_DATA_CLASS| are all zeros and + // so can be placed in the BSS section. EXPECT_EQ(Bytes((uint8_t *)&once_bss, sizeof(once_bss)), Bytes((uint8_t *)&once_init_value, sizeof(once_init_value))); EXPECT_EQ(Bytes((uint8_t *)&mutex_bss, sizeof(mutex_bss)), @@ -183,9 +183,9 @@ TEST(ThreadTest, ThreadLocal) { } TEST(ThreadTest, RandState) { - /* In FIPS mode, rand.c maintains a linked-list of thread-local data because - * we're required to clear it on process exit. This test exercises removing a - * value from that list. */ + // In FIPS mode, rand.c maintains a linked-list of thread-local data because + // we're required to clear it on process exit. This test exercises removing a + // value from that list. uint8_t buf[1]; RAND_bytes(buf, sizeof(buf)); @@ -197,4 +197,4 @@ TEST(ThreadTest, RandState) { ASSERT_TRUE(wait_for_thread(thread)); } -#endif /* !OPENSSL_NO_THREADS */ +#endif // !OPENSSL_NO_THREADS diff --git a/crypto/thread_win.c b/crypto/thread_win.c index 62119b4e..d6fa5484 100644 --- a/crypto/thread_win.c +++ b/crypto/thread_win.c @@ -63,7 +63,7 @@ void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) { } void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) { - /* SRWLOCKs require no cleanup. */ + // SRWLOCKs require no cleanup. } void CRYPTO_STATIC_MUTEX_lock_read(struct CRYPTO_STATIC_MUTEX *lock) { @@ -100,11 +100,11 @@ static void thread_local_init(void) { static void NTAPI thread_local_destructor(PVOID module, DWORD reason, PVOID reserved) { - /* Only free memory on |DLL_THREAD_DETACH|, not |DLL_PROCESS_DETACH|. In - * VS2015's debug runtime, the C runtime has been unloaded by the time - * |DLL_PROCESS_DETACH| runs. See https://crbug.com/575795. This is consistent - * with |pthread_key_create| which does not call destructors on process exit, - * only thread exit. */ + // Only free memory on |DLL_THREAD_DETACH|, not |DLL_PROCESS_DETACH|. In + // VS2015's debug runtime, the C runtime has been unloaded by the time + // |DLL_PROCESS_DETACH| runs. See https://crbug.com/575795. This is consistent + // with |pthread_key_create| which does not call destructors on process exit, + // only thread exit. if (reason != DLL_THREAD_DETACH) { return; } @@ -135,17 +135,17 @@ static void NTAPI thread_local_destructor(PVOID module, DWORD reason, OPENSSL_free(pointers); } -/* Thread Termination Callbacks. - * - * Windows doesn't support a per-thread destructor with its TLS primitives. - * So, we build it manually by inserting a function to be called on each - * thread's exit. This magic is from http://www.codeproject.com/threads/tls.asp - * and it works for VC++ 7.0 and later. - * - * Force a reference to _tls_used to make the linker create the TLS directory - * if it's not already there. (E.g. if __declspec(thread) is not used). Force - * a reference to p_thread_callback_boringssl to prevent whole program - * optimization from discarding the variable. */ +// Thread Termination Callbacks. +// +// Windows doesn't support a per-thread destructor with its TLS primitives. +// So, we build it manually by inserting a function to be called on each +// thread's exit. This magic is from http://www.codeproject.com/threads/tls.asp +// and it works for VC++ 7.0 and later. +// +// Force a reference to _tls_used to make the linker create the TLS directory +// if it's not already there. (E.g. if __declspec(thread) is not used). Force +// a reference to p_thread_callback_boringssl to prevent whole program +// optimization from discarding the variable. #ifdef _WIN64 #pragma comment(linker, "/INCLUDE:_tls_used") #pragma comment(linker, "/INCLUDE:p_thread_callback_boringssl") @@ -154,41 +154,41 @@ static void NTAPI thread_local_destructor(PVOID module, DWORD reason, #pragma comment(linker, "/INCLUDE:_p_thread_callback_boringssl") #endif -/* .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are - * called automatically by the OS loader code (not the CRT) when the module is - * loaded and on thread creation. They are NOT called if the module has been - * loaded by a LoadLibrary() call. It must have implicitly been loaded at - * process startup. - * - * By implicitly loaded, I mean that it is directly referenced by the main EXE - * or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being - * implicitly loaded. - * - * See VC\crt\src\tlssup.c for reference. */ - -/* The linker must not discard p_thread_callback_boringssl. (We force a reference - * to this variable with a linker /INCLUDE:symbol pragma to ensure that.) If - * this variable is discarded, the OnThreadExit function will never be - * called. */ +// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are +// called automatically by the OS loader code (not the CRT) when the module is +// loaded and on thread creation. They are NOT called if the module has been +// loaded by a LoadLibrary() call. It must have implicitly been loaded at +// process startup. +// +// By implicitly loaded, I mean that it is directly referenced by the main EXE +// or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being +// implicitly loaded. +// +// See VC\crt\src\tlssup.c for reference. + +// The linker must not discard p_thread_callback_boringssl. (We force a +// reference to this variable with a linker /INCLUDE:symbol pragma to ensure +// that.) If this variable is discarded, the OnThreadExit function will never +// be called. #ifdef _WIN64 -/* .CRT section is merged with .rdata on x64 so it must be constant data. */ +// .CRT section is merged with .rdata on x64 so it must be constant data. #pragma const_seg(".CRT$XLC") -/* When defining a const variable, it must have external linkage to be sure the - * linker doesn't discard it. */ +// When defining a const variable, it must have external linkage to be sure the +// linker doesn't discard it. extern const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl; const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; -/* Reset the default section. */ +// Reset the default section. #pragma const_seg() #else #pragma data_seg(".CRT$XLC") PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; -/* Reset the default section. */ +// Reset the default section. #pragma data_seg() -#endif /* _WIN64 */ +#endif // _WIN64 void *CRYPTO_get_thread_local(thread_local_data_t index) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); @@ -234,4 +234,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_WINDOWS_THREADS */ +#endif // OPENSSL_WINDOWS_THREADS