diff --git a/common/sha2.c b/common/sha2.c index 13582c1d..12ba35be 100644 --- a/common/sha2.c +++ b/common/sha2.c @@ -7,14 +7,29 @@ #include "sha2.h" -static uint64_t load_bigendian(const unsigned char *x) { - return (uint64_t)(x[7]) | (((uint64_t)(x[6])) << 8) | (((uint64_t)(x[5])) << 16) | - (((uint64_t)(x[4])) << 24) | (((uint64_t)(x[3])) << 32) | - (((uint64_t)(x[2])) << 40) | (((uint64_t)(x[1])) << 48) | - (((uint64_t)(x[0])) << 56); +static uint32_t load_bigendian_32(const uint8_t *x) { + return (uint32_t)(x[3]) | (((uint32_t)(x[2])) << 8) | + (((uint32_t)(x[1])) << 16) | (((uint32_t)(x[0])) << 24); } -static void store_bigendian(uint8_t *x, uint64_t u) { +static uint64_t load_bigendian_64(const uint8_t *x) { + return (uint64_t)(x[7]) | (((uint64_t)(x[6])) << 8) | + (((uint64_t)(x[5])) << 16) | (((uint64_t)(x[4])) << 24) | + (((uint64_t)(x[3])) << 32) | (((uint64_t)(x[2])) << 40) | + (((uint64_t)(x[1])) << 48) | (((uint64_t)(x[0])) << 56); +} + +static void store_bigendian_32(uint8_t *x, uint64_t u) { + x[3] = (uint8_t) u; + u >>= 8; + x[2] = (uint8_t) u; + u >>= 8; + x[1] = (uint8_t) u; + u >>= 8; + x[0] = (uint8_t) u; +} + +static void store_bigendian_64(uint8_t *x, uint64_t u) { x[7] = (uint8_t) u; u >>= 8; x[6] = (uint8_t) u; @@ -33,50 +48,244 @@ static void store_bigendian(uint8_t *x, uint64_t u) { } #define SHR(x, c) ((x) >> (c)) -#define ROTR(x, c) (((x) >> (c)) | ((x) << (64 - (c)))) +#define ROTR_32(x, c) (((x) >> (c)) | ((x) << (32 - (c)))) +#define ROTR_64(x, c) (((x) >> (c)) | ((x) << (64 - (c)))) #define Ch(x, y, z) (((x) & (y)) ^ (~(x) & (z))) #define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) -#define Sigma0(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39)) -#define Sigma1(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41)) -#define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x, 7)) -#define sigma1(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHR(x, 6)) -#define M(w0, w14, w9, w1) w0 = sigma1(w14) + (w9) + sigma0(w1) + (w0); +#define Sigma0_32(x) (ROTR_32(x, 2) ^ ROTR_32(x,13) ^ ROTR_32(x,22)) +#define Sigma1_32(x) (ROTR_32(x, 6) ^ ROTR_32(x,11) ^ ROTR_32(x,25)) +#define sigma0_32(x) (ROTR_32(x, 7) ^ ROTR_32(x,18) ^ SHR(x, 3)) +#define sigma1_32(x) (ROTR_32(x,17) ^ ROTR_32(x,19) ^ SHR(x,10)) -#define EXPAND \ - M(w0, w14, w9, w1) \ - M(w1, w15, w10, w2) \ - M(w2, w0, w11, w3) \ - M(w3, w1, w12, w4) \ - M(w4, w2, w13, w5) \ - M(w5, w3, w14, w6) \ - M(w6, w4, w15, w7) \ - M(w7, w5, w0, w8) \ - M(w8, w6, w1, w9) \ - M(w9, w7, w2, w10) \ - M(w10, w8, w3, w11) \ - M(w11, w9, w4, w12) \ - M(w12, w10, w5, w13) \ - M(w13, w11, w6, w14) \ - M(w14, w12, w7, w15) \ - M(w15, w13, w8, w0) +#define Sigma0_64(x) (ROTR_64(x, 28) ^ ROTR_64(x, 34) ^ ROTR_64(x, 39)) +#define Sigma1_64(x) (ROTR_64(x, 14) ^ ROTR_64(x, 18) ^ ROTR_64(x, 41)) +#define sigma0_64(x) (ROTR_64(x, 1) ^ ROTR_64(x, 8) ^ SHR(x, 7)) +#define sigma1_64(x) (ROTR_64(x, 19) ^ ROTR_64(x, 61) ^ SHR(x, 6)) -#define F(w, k) \ - T1 = h + Sigma1(e) + Ch(e, f, g) + (k) + (w); \ - T2 = Sigma0(a) + Maj(a, b, c); \ - h = g; \ - g = f; \ - f = e; \ - e = d + T1; \ - d = c; \ - c = b; \ - b = a; \ +#define M_32(w0, w14, w9, w1) w0 = sigma1_32(w14) + (w9) + sigma0_32(w1) + (w0); +#define M_64(w0, w14, w9, w1) w0 = sigma1_64(w14) + (w9) + sigma0_64(w1) + (w0); + +#define EXPAND_32 \ + M_32(w0, w14, w9, w1) \ + M_32(w1, w15, w10, w2) \ + M_32(w2, w0, w11, w3) \ + M_32(w3, w1, w12, w4) \ + M_32(w4, w2, w13, w5) \ + M_32(w5, w3, w14, w6) \ + M_32(w6, w4, w15, w7) \ + M_32(w7, w5, w0, w8) \ + M_32(w8, w6, w1, w9) \ + M_32(w9, w7, w2, w10) \ + M_32(w10, w8, w3, w11) \ + M_32(w11, w9, w4, w12) \ + M_32(w12, w10, w5, w13) \ + M_32(w13, w11, w6, w14) \ + M_32(w14, w12, w7, w15) \ + M_32(w15, w13, w8, w0) + +#define EXPAND_64 \ + M_64(w0, w14, w9, w1) \ + M_64(w1, w15, w10, w2) \ + M_64(w2, w0, w11, w3) \ + M_64(w3, w1, w12, w4) \ + M_64(w4, w2, w13, w5) \ + M_64(w5, w3, w14, w6) \ + M_64(w6, w4, w15, w7) \ + M_64(w7, w5, w0, w8) \ + M_64(w8, w6, w1, w9) \ + M_64(w9, w7, w2, w10) \ + M_64(w10, w8, w3, w11) \ + M_64(w11, w9, w4, w12) \ + M_64(w12, w10, w5, w13) \ + M_64(w13, w11, w6, w14) \ + M_64(w14, w12, w7, w15) \ + M_64(w15, w13, w8, w0) + +#define F_32(w, k) \ + T1 = h + Sigma1_32(e) + Ch(e, f, g) + (k) + (w); \ + T2 = Sigma0_32(a) + Maj(a, b, c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ a = T1 + T2; +#define F_64(w, k) \ + T1 = h + Sigma1_64(e) + Ch(e, f, g) + (k) + (w); \ + T2 = Sigma0_64(a) + Maj(a, b, c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +static size_t crypto_hashblocks_sha256(uint8_t *statebytes, + const uint8_t *in, size_t inlen) { + uint32_t state[8]; + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; + uint32_t e; + uint32_t f; + uint32_t g; + uint32_t h; + uint32_t T1; + uint32_t T2; + + a = load_bigendian_32(statebytes + 0); + state[0] = a; + b = load_bigendian_32(statebytes + 4); + state[1] = b; + c = load_bigendian_32(statebytes + 8); + state[2] = c; + d = load_bigendian_32(statebytes + 12); + state[3] = d; + e = load_bigendian_32(statebytes + 16); + state[4] = e; + f = load_bigendian_32(statebytes + 20); + state[5] = f; + g = load_bigendian_32(statebytes + 24); + state[6] = g; + h = load_bigendian_32(statebytes + 28); + state[7] = h; + + while (inlen >= 64) { + uint32_t w0 = load_bigendian_32(in + 0); + uint32_t w1 = load_bigendian_32(in + 4); + uint32_t w2 = load_bigendian_32(in + 8); + uint32_t w3 = load_bigendian_32(in + 12); + uint32_t w4 = load_bigendian_32(in + 16); + uint32_t w5 = load_bigendian_32(in + 20); + uint32_t w6 = load_bigendian_32(in + 24); + uint32_t w7 = load_bigendian_32(in + 28); + uint32_t w8 = load_bigendian_32(in + 32); + uint32_t w9 = load_bigendian_32(in + 36); + uint32_t w10 = load_bigendian_32(in + 40); + uint32_t w11 = load_bigendian_32(in + 44); + uint32_t w12 = load_bigendian_32(in + 48); + uint32_t w13 = load_bigendian_32(in + 52); + uint32_t w14 = load_bigendian_32(in + 56); + uint32_t w15 = load_bigendian_32(in + 60); + + F_32(w0, 0x428a2f98) + F_32(w1, 0x71374491) + F_32(w2, 0xb5c0fbcf) + F_32(w3, 0xe9b5dba5) + F_32(w4, 0x3956c25b) + F_32(w5, 0x59f111f1) + F_32(w6, 0x923f82a4) + F_32(w7, 0xab1c5ed5) + F_32(w8, 0xd807aa98) + F_32(w9, 0x12835b01) + F_32(w10, 0x243185be) + F_32(w11, 0x550c7dc3) + F_32(w12, 0x72be5d74) + F_32(w13, 0x80deb1fe) + F_32(w14, 0x9bdc06a7) + F_32(w15, 0xc19bf174) + + EXPAND_32 + + F_32(w0, 0xe49b69c1) + F_32(w1, 0xefbe4786) + F_32(w2, 0x0fc19dc6) + F_32(w3, 0x240ca1cc) + F_32(w4, 0x2de92c6f) + F_32(w5, 0x4a7484aa) + F_32(w6, 0x5cb0a9dc) + F_32(w7, 0x76f988da) + F_32(w8, 0x983e5152) + F_32(w9, 0xa831c66d) + F_32(w10, 0xb00327c8) + F_32(w11, 0xbf597fc7) + F_32(w12, 0xc6e00bf3) + F_32(w13, 0xd5a79147) + F_32(w14, 0x06ca6351) + F_32(w15, 0x14292967) + + EXPAND_32 + + F_32(w0, 0x27b70a85) + F_32(w1, 0x2e1b2138) + F_32(w2, 0x4d2c6dfc) + F_32(w3, 0x53380d13) + F_32(w4, 0x650a7354) + F_32(w5, 0x766a0abb) + F_32(w6, 0x81c2c92e) + F_32(w7, 0x92722c85) + F_32(w8, 0xa2bfe8a1) + F_32(w9, 0xa81a664b) + F_32(w10, 0xc24b8b70) + F_32(w11, 0xc76c51a3) + F_32(w12, 0xd192e819) + F_32(w13, 0xd6990624) + F_32(w14, 0xf40e3585) + F_32(w15, 0x106aa070) + + EXPAND_32 + + F_32(w0, 0x19a4c116) + F_32(w1, 0x1e376c08) + F_32(w2, 0x2748774c) + F_32(w3, 0x34b0bcb5) + F_32(w4, 0x391c0cb3) + F_32(w5, 0x4ed8aa4a) + F_32(w6, 0x5b9cca4f) + F_32(w7, 0x682e6ff3) + F_32(w8, 0x748f82ee) + F_32(w9, 0x78a5636f) + F_32(w10, 0x84c87814) + F_32(w11, 0x8cc70208) + F_32(w12, 0x90befffa) + F_32(w13, 0xa4506ceb) + F_32(w14, 0xbef9a3f7) + F_32(w15, 0xc67178f2) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 64; + inlen -= 64; + } + + store_bigendian_32(statebytes + 0, state[0]); + store_bigendian_32(statebytes + 4, state[1]); + store_bigendian_32(statebytes + 8, state[2]); + store_bigendian_32(statebytes + 12, state[3]); + store_bigendian_32(statebytes + 16, state[4]); + store_bigendian_32(statebytes + 20, state[5]); + store_bigendian_32(statebytes + 24, state[6]); + store_bigendian_32(statebytes + 28, state[7]); + + return inlen; +} + static size_t crypto_hashblocks_sha512(uint8_t *statebytes, - const uint8_t *in, - size_t inlen) { + const uint8_t *in, size_t inlen) { uint64_t state[8]; uint64_t a; uint64_t b; @@ -89,133 +298,133 @@ static size_t crypto_hashblocks_sha512(uint8_t *statebytes, uint64_t T1; uint64_t T2; - a = load_bigendian(statebytes + 0); + a = load_bigendian_64(statebytes + 0); state[0] = a; - b = load_bigendian(statebytes + 8); + b = load_bigendian_64(statebytes + 8); state[1] = b; - c = load_bigendian(statebytes + 16); + c = load_bigendian_64(statebytes + 16); state[2] = c; - d = load_bigendian(statebytes + 24); + d = load_bigendian_64(statebytes + 24); state[3] = d; - e = load_bigendian(statebytes + 32); + e = load_bigendian_64(statebytes + 32); state[4] = e; - f = load_bigendian(statebytes + 40); + f = load_bigendian_64(statebytes + 40); state[5] = f; - g = load_bigendian(statebytes + 48); + g = load_bigendian_64(statebytes + 48); state[6] = g; - h = load_bigendian(statebytes + 56); + h = load_bigendian_64(statebytes + 56); state[7] = h; while (inlen >= 128) { - uint64_t w0 = load_bigendian(in + 0); - uint64_t w1 = load_bigendian(in + 8); - uint64_t w2 = load_bigendian(in + 16); - uint64_t w3 = load_bigendian(in + 24); - uint64_t w4 = load_bigendian(in + 32); - uint64_t w5 = load_bigendian(in + 40); - uint64_t w6 = load_bigendian(in + 48); - uint64_t w7 = load_bigendian(in + 56); - uint64_t w8 = load_bigendian(in + 64); - uint64_t w9 = load_bigendian(in + 72); - uint64_t w10 = load_bigendian(in + 80); - uint64_t w11 = load_bigendian(in + 88); - uint64_t w12 = load_bigendian(in + 96); - uint64_t w13 = load_bigendian(in + 104); - uint64_t w14 = load_bigendian(in + 112); - uint64_t w15 = load_bigendian(in + 120); + uint64_t w0 = load_bigendian_64(in + 0); + uint64_t w1 = load_bigendian_64(in + 8); + uint64_t w2 = load_bigendian_64(in + 16); + uint64_t w3 = load_bigendian_64(in + 24); + uint64_t w4 = load_bigendian_64(in + 32); + uint64_t w5 = load_bigendian_64(in + 40); + uint64_t w6 = load_bigendian_64(in + 48); + uint64_t w7 = load_bigendian_64(in + 56); + uint64_t w8 = load_bigendian_64(in + 64); + uint64_t w9 = load_bigendian_64(in + 72); + uint64_t w10 = load_bigendian_64(in + 80); + uint64_t w11 = load_bigendian_64(in + 88); + uint64_t w12 = load_bigendian_64(in + 96); + uint64_t w13 = load_bigendian_64(in + 104); + uint64_t w14 = load_bigendian_64(in + 112); + uint64_t w15 = load_bigendian_64(in + 120); - F(w0, 0x428a2f98d728ae22ULL) - F(w1, 0x7137449123ef65cdULL) - F(w2, 0xb5c0fbcfec4d3b2fULL) - F(w3, 0xe9b5dba58189dbbcULL) - F(w4, 0x3956c25bf348b538ULL) - F(w5, 0x59f111f1b605d019ULL) - F(w6, 0x923f82a4af194f9bULL) - F(w7, 0xab1c5ed5da6d8118ULL) - F(w8, 0xd807aa98a3030242ULL) - F(w9, 0x12835b0145706fbeULL) - F(w10, 0x243185be4ee4b28cULL) - F(w11, 0x550c7dc3d5ffb4e2ULL) - F(w12, 0x72be5d74f27b896fULL) - F(w13, 0x80deb1fe3b1696b1ULL) - F(w14, 0x9bdc06a725c71235ULL) - F(w15, 0xc19bf174cf692694ULL) + F_64(w0, 0x428a2f98d728ae22ULL) + F_64(w1, 0x7137449123ef65cdULL) + F_64(w2, 0xb5c0fbcfec4d3b2fULL) + F_64(w3, 0xe9b5dba58189dbbcULL) + F_64(w4, 0x3956c25bf348b538ULL) + F_64(w5, 0x59f111f1b605d019ULL) + F_64(w6, 0x923f82a4af194f9bULL) + F_64(w7, 0xab1c5ed5da6d8118ULL) + F_64(w8, 0xd807aa98a3030242ULL) + F_64(w9, 0x12835b0145706fbeULL) + F_64(w10, 0x243185be4ee4b28cULL) + F_64(w11, 0x550c7dc3d5ffb4e2ULL) + F_64(w12, 0x72be5d74f27b896fULL) + F_64(w13, 0x80deb1fe3b1696b1ULL) + F_64(w14, 0x9bdc06a725c71235ULL) + F_64(w15, 0xc19bf174cf692694ULL) - EXPAND + EXPAND_64 - F(w0, 0xe49b69c19ef14ad2ULL) - F(w1, 0xefbe4786384f25e3ULL) - F(w2, 0x0fc19dc68b8cd5b5ULL) - F(w3, 0x240ca1cc77ac9c65ULL) - F(w4, 0x2de92c6f592b0275ULL) - F(w5, 0x4a7484aa6ea6e483ULL) - F(w6, 0x5cb0a9dcbd41fbd4ULL) - F(w7, 0x76f988da831153b5ULL) - F(w8, 0x983e5152ee66dfabULL) - F(w9, 0xa831c66d2db43210ULL) - F(w10, 0xb00327c898fb213fULL) - F(w11, 0xbf597fc7beef0ee4ULL) - F(w12, 0xc6e00bf33da88fc2ULL) - F(w13, 0xd5a79147930aa725ULL) - F(w14, 0x06ca6351e003826fULL) - F(w15, 0x142929670a0e6e70ULL) + F_64(w0, 0xe49b69c19ef14ad2ULL) + F_64(w1, 0xefbe4786384f25e3ULL) + F_64(w2, 0x0fc19dc68b8cd5b5ULL) + F_64(w3, 0x240ca1cc77ac9c65ULL) + F_64(w4, 0x2de92c6f592b0275ULL) + F_64(w5, 0x4a7484aa6ea6e483ULL) + F_64(w6, 0x5cb0a9dcbd41fbd4ULL) + F_64(w7, 0x76f988da831153b5ULL) + F_64(w8, 0x983e5152ee66dfabULL) + F_64(w9, 0xa831c66d2db43210ULL) + F_64(w10, 0xb00327c898fb213fULL) + F_64(w11, 0xbf597fc7beef0ee4ULL) + F_64(w12, 0xc6e00bf33da88fc2ULL) + F_64(w13, 0xd5a79147930aa725ULL) + F_64(w14, 0x06ca6351e003826fULL) + F_64(w15, 0x142929670a0e6e70ULL) - EXPAND + EXPAND_64 - F(w0, 0x27b70a8546d22ffcULL) - F(w1, 0x2e1b21385c26c926ULL) - F(w2, 0x4d2c6dfc5ac42aedULL) - F(w3, 0x53380d139d95b3dfULL) - F(w4, 0x650a73548baf63deULL) - F(w5, 0x766a0abb3c77b2a8ULL) - F(w6, 0x81c2c92e47edaee6ULL) - F(w7, 0x92722c851482353bULL) - F(w8, 0xa2bfe8a14cf10364ULL) - F(w9, 0xa81a664bbc423001ULL) - F(w10, 0xc24b8b70d0f89791ULL) - F(w11, 0xc76c51a30654be30ULL) - F(w12, 0xd192e819d6ef5218ULL) - F(w13, 0xd69906245565a910ULL) - F(w14, 0xf40e35855771202aULL) - F(w15, 0x106aa07032bbd1b8ULL) + F_64(w0, 0x27b70a8546d22ffcULL) + F_64(w1, 0x2e1b21385c26c926ULL) + F_64(w2, 0x4d2c6dfc5ac42aedULL) + F_64(w3, 0x53380d139d95b3dfULL) + F_64(w4, 0x650a73548baf63deULL) + F_64(w5, 0x766a0abb3c77b2a8ULL) + F_64(w6, 0x81c2c92e47edaee6ULL) + F_64(w7, 0x92722c851482353bULL) + F_64(w8, 0xa2bfe8a14cf10364ULL) + F_64(w9, 0xa81a664bbc423001ULL) + F_64(w10, 0xc24b8b70d0f89791ULL) + F_64(w11, 0xc76c51a30654be30ULL) + F_64(w12, 0xd192e819d6ef5218ULL) + F_64(w13, 0xd69906245565a910ULL) + F_64(w14, 0xf40e35855771202aULL) + F_64(w15, 0x106aa07032bbd1b8ULL) - EXPAND + EXPAND_64 - F(w0, 0x19a4c116b8d2d0c8ULL) - F(w1, 0x1e376c085141ab53ULL) - F(w2, 0x2748774cdf8eeb99ULL) - F(w3, 0x34b0bcb5e19b48a8ULL) - F(w4, 0x391c0cb3c5c95a63ULL) - F(w5, 0x4ed8aa4ae3418acbULL) - F(w6, 0x5b9cca4f7763e373ULL) - F(w7, 0x682e6ff3d6b2b8a3ULL) - F(w8, 0x748f82ee5defb2fcULL) - F(w9, 0x78a5636f43172f60ULL) - F(w10, 0x84c87814a1f0ab72ULL) - F(w11, 0x8cc702081a6439ecULL) - F(w12, 0x90befffa23631e28ULL) - F(w13, 0xa4506cebde82bde9ULL) - F(w14, 0xbef9a3f7b2c67915ULL) - F(w15, 0xc67178f2e372532bULL) + F_64(w0, 0x19a4c116b8d2d0c8ULL) + F_64(w1, 0x1e376c085141ab53ULL) + F_64(w2, 0x2748774cdf8eeb99ULL) + F_64(w3, 0x34b0bcb5e19b48a8ULL) + F_64(w4, 0x391c0cb3c5c95a63ULL) + F_64(w5, 0x4ed8aa4ae3418acbULL) + F_64(w6, 0x5b9cca4f7763e373ULL) + F_64(w7, 0x682e6ff3d6b2b8a3ULL) + F_64(w8, 0x748f82ee5defb2fcULL) + F_64(w9, 0x78a5636f43172f60ULL) + F_64(w10, 0x84c87814a1f0ab72ULL) + F_64(w11, 0x8cc702081a6439ecULL) + F_64(w12, 0x90befffa23631e28ULL) + F_64(w13, 0xa4506cebde82bde9ULL) + F_64(w14, 0xbef9a3f7b2c67915ULL) + F_64(w15, 0xc67178f2e372532bULL) - EXPAND + EXPAND_64 - F(w0, 0xca273eceea26619cULL) - F(w1, 0xd186b8c721c0c207ULL) - F(w2, 0xeada7dd6cde0eb1eULL) - F(w3, 0xf57d4f7fee6ed178ULL) - F(w4, 0x06f067aa72176fbaULL) - F(w5, 0x0a637dc5a2c898a6ULL) - F(w6, 0x113f9804bef90daeULL) - F(w7, 0x1b710b35131c471bULL) - F(w8, 0x28db77f523047d84ULL) - F(w9, 0x32caab7b40c72493ULL) - F(w10, 0x3c9ebe0a15c9bebcULL) - F(w11, 0x431d67c49c100d4cULL) - F(w12, 0x4cc5d4becb3e42b6ULL) - F(w13, 0x597f299cfc657e2aULL) - F(w14, 0x5fcb6fab3ad6faecULL) - F(w15, 0x6c44198c4a475817ULL) + F_64(w0, 0xca273eceea26619cULL) + F_64(w1, 0xd186b8c721c0c207ULL) + F_64(w2, 0xeada7dd6cde0eb1eULL) + F_64(w3, 0xf57d4f7fee6ed178ULL) + F_64(w4, 0x06f067aa72176fbaULL) + F_64(w5, 0x0a637dc5a2c898a6ULL) + F_64(w6, 0x113f9804bef90daeULL) + F_64(w7, 0x1b710b35131c471bULL) + F_64(w8, 0x28db77f523047d84ULL) + F_64(w9, 0x32caab7b40c72493ULL) + F_64(w10, 0x3c9ebe0a15c9bebcULL) + F_64(w11, 0x431d67c49c100d4cULL) + F_64(w12, 0x4cc5d4becb3e42b6ULL) + F_64(w13, 0x597f299cfc657e2aULL) + F_64(w14, 0x5fcb6fab3ad6faecULL) + F_64(w15, 0x6c44198c4a475817ULL) a += state[0]; b += state[1]; @@ -239,19 +448,31 @@ static size_t crypto_hashblocks_sha512(uint8_t *statebytes, inlen -= 128; } - store_bigendian(statebytes + 0, state[0]); - store_bigendian(statebytes + 8, state[1]); - store_bigendian(statebytes + 16, state[2]); - store_bigendian(statebytes + 24, state[3]); - store_bigendian(statebytes + 32, state[4]); - store_bigendian(statebytes + 40, state[5]); - store_bigendian(statebytes + 48, state[6]); - store_bigendian(statebytes + 56, state[7]); + store_bigendian_64(statebytes + 0, state[0]); + store_bigendian_64(statebytes + 8, state[1]); + store_bigendian_64(statebytes + 16, state[2]); + store_bigendian_64(statebytes + 24, state[3]); + store_bigendian_64(statebytes + 32, state[4]); + store_bigendian_64(statebytes + 40, state[5]); + store_bigendian_64(statebytes + 48, state[6]); + store_bigendian_64(statebytes + 56, state[7]); return inlen; } -#define blocks crypto_hashblocks_sha512 +static const uint8_t iv_224[32] = { + 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, + 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, + 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, + 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4 +}; + +static const uint8_t iv_256[32] = { + 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, + 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, + 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, + 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 +}; static const uint8_t iv_384[64] = { 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, @@ -271,16 +492,128 @@ static const uint8_t iv_512[64] = { 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 }; -int sha384(uint8_t *out, const uint8_t *in, size_t inlen) { - uint8_t h[64]; - uint8_t padded[256]; - uint64_t bytes = inlen; +void sha224_inc_init(uint8_t *state) { + for (size_t i = 0; i < 32; ++i) { + state[i] = iv_224[i]; + } + for (size_t i = 32; i < 40; ++i) { + state[i] = 0; + } +} +void sha256_inc_init(uint8_t *state) { + for (size_t i = 0; i < 32; ++i) { + state[i] = iv_256[i]; + } + for (size_t i = 32; i < 40; ++i) { + state[i] = 0; + } +} + +void sha384_inc_init(uint8_t *state) { for (size_t i = 0; i < 64; ++i) { - h[i] = iv_384[i]; + state[i] = iv_384[i]; + } + for (size_t i = 64; i < 72; ++i) { + state[i] = 0; + } +} + +void sha512_inc_init(uint8_t *state) { + for (size_t i = 0; i < 64; ++i) { + state[i] = iv_512[i]; + } + for (size_t i = 64; i < 72; ++i) { + state[i] = 0; + } +} + +void sha256_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + uint64_t bytes = load_bigendian_64(state + 32); + + crypto_hashblocks_sha256(state, in, 64 * inblocks); + bytes += 64 * inblocks; + + store_bigendian_64(state + 32, bytes); +} + +void sha224_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + sha256_inc_blocks(state, in, inblocks); +} + +void sha512_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + uint64_t bytes = load_bigendian_64(state + 64); + + crypto_hashblocks_sha256(state, in, 128 * inblocks); + bytes += 128 * inblocks; + + store_bigendian_64(state + 64, bytes); +} + +void sha384_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + sha512_inc_blocks(state, in, inblocks); +} + +void sha256_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + uint8_t padded[128]; + uint64_t bytes = load_bigendian_64(state + 32) + inlen; + + crypto_hashblocks_sha256(state, in, inlen); + in += inlen; + inlen &= 63; + in -= inlen; + + for (size_t i = 0; i < inlen; ++i) { + padded[i] = in[i]; + } + padded[inlen] = 0x80; + + if (inlen < 56) { + for (size_t i = inlen + 1; i < 56; ++i) { + padded[i] = 0; + } + padded[56] = (uint8_t) (bytes >> 53); + padded[57] = (uint8_t) (bytes >> 45); + padded[58] = (uint8_t) (bytes >> 37); + padded[59] = (uint8_t) (bytes >> 29); + padded[60] = (uint8_t) (bytes >> 21); + padded[61] = (uint8_t) (bytes >> 13); + padded[62] = (uint8_t) (bytes >> 5); + padded[63] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha256(state, padded, 64); + } else { + for (size_t i = inlen + 1; i < 120; ++i) { + padded[i] = 0; + } + padded[120] = (uint8_t) (bytes >> 53); + padded[121] = (uint8_t) (bytes >> 45); + padded[122] = (uint8_t) (bytes >> 37); + padded[123] = (uint8_t) (bytes >> 29); + padded[124] = (uint8_t) (bytes >> 21); + padded[125] = (uint8_t) (bytes >> 13); + padded[126] = (uint8_t) (bytes >> 5); + padded[127] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha256(state, padded, 128); } - blocks(h, in, inlen); + for (size_t i = 0; i < 32; ++i) { + out[i] = state[i]; + } +} + +void sha224_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + sha256_inc_finalize(state, state, in, inlen); + + for (size_t i = 0; i < 28; ++i) { + out[i] = state[i]; + } +} + +void sha512_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + uint8_t padded[256]; + uint64_t bytes = load_bigendian_64(state + 64) + inlen; + + crypto_hashblocks_sha512(state, in, inlen); in += inlen; inlen &= 127; in -= inlen; @@ -303,7 +636,7 @@ int sha384(uint8_t *out, const uint8_t *in, size_t inlen) { padded[125] = (uint8_t) (bytes >> 13); padded[126] = (uint8_t) (bytes >> 5); padded[127] = (uint8_t) (bytes << 3); - blocks(h, padded, 128); + crypto_hashblocks_sha512(state, padded, 128); } else { for (size_t i = inlen + 1; i < 247; ++i) { padded[i] = 0; @@ -317,68 +650,46 @@ int sha384(uint8_t *out, const uint8_t *in, size_t inlen) { padded[253] = (uint8_t) (bytes >> 13); padded[254] = (uint8_t) (bytes >> 5); padded[255] = (uint8_t) (bytes << 3); - blocks(h, padded, 256); + crypto_hashblocks_sha512(state, padded, 256); } + for (size_t i = 0; i < 64; ++i) { + out[i] = state[i]; + } +} + +void sha384_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + sha512_inc_finalize(state, state, in, inlen); + for (size_t i = 0; i < 48; ++i) { - out[i] = h[i]; + out[i] = state[i]; } - - return 0; } -int sha512(uint8_t *out, const uint8_t *in, size_t inlen) { - uint8_t h[64]; - uint8_t padded[256]; - uint64_t bytes = inlen; +void sha224(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[40]; - for (size_t i = 0; i < 64; ++i) { - h[i] = iv_512[i]; - } - - blocks(h, in, inlen); - in += inlen; - inlen &= 127; - in -= inlen; - - for (size_t i = 0; i < inlen; ++i) { - padded[i] = in[i]; - } - padded[inlen] = 0x80; - - if (inlen < 112) { - for (size_t i = inlen + 1; i < 119; ++i) { - padded[i] = 0; - } - padded[119] = (uint8_t) (bytes >> 61); - padded[120] = (uint8_t) (bytes >> 53); - padded[121] = (uint8_t) (bytes >> 45); - padded[122] = (uint8_t) (bytes >> 37); - padded[123] = (uint8_t) (bytes >> 29); - padded[124] = (uint8_t) (bytes >> 21); - padded[125] = (uint8_t) (bytes >> 13); - padded[126] = (uint8_t) (bytes >> 5); - padded[127] = (uint8_t) (bytes << 3); - blocks(h, padded, 128); - } else { - for (size_t i = inlen + 1; i < 247; ++i) { - padded[i] = 0; - } - padded[247] = (uint8_t) (bytes >> 61); - padded[248] = (uint8_t) (bytes >> 53); - padded[249] = (uint8_t) (bytes >> 45); - padded[250] = (uint8_t) (bytes >> 37); - padded[251] = (uint8_t) (bytes >> 29); - padded[252] = (uint8_t) (bytes >> 21); - padded[253] = (uint8_t) (bytes >> 13); - padded[254] = (uint8_t) (bytes >> 5); - padded[255] = (uint8_t) (bytes << 3); - blocks(h, padded, 256); - } - - for (size_t i = 0; i < 64; ++i) { - out[i] = h[i]; - } - - return 0; + sha224_inc_init(state); + sha224_inc_finalize(out, state, in, inlen); +} + +void sha256(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[40]; + + sha256_inc_init(state); + sha256_inc_finalize(out, state, in, inlen); +} + +void sha384(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[72]; + + sha384_inc_init(state); + sha384_inc_finalize(out, state, in, inlen); +} + +void sha512(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[72]; + + sha512_inc_init(state); + sha512_inc_finalize(out, state, in, inlen); } diff --git a/common/sha2.h b/common/sha2.h index dfa9a2be..ce6a8aff 100644 --- a/common/sha2.h +++ b/common/sha2.h @@ -1,7 +1,31 @@ #ifndef SHA2_H #define SHA2_H -int sha384(uint8_t *out, const uint8_t *in, size_t inlen); -int sha512(uint8_t *out, const uint8_t *in, size_t inlen); +#include +#include + +/* The incremental API allows hashing of individual input blocks; these blocks + must be exactly 64 bytes each. + Use the 'finalize' functions for any remaining bytes (possibly over 64). */ + +void sha224_inc_init(uint8_t *state); +void sha224_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha224_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha224(uint8_t *out, const uint8_t *in, size_t inlen); + +void sha256_inc_init(uint8_t *state); +void sha256_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha256_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha256(uint8_t *out, const uint8_t *in, size_t inlen); + +void sha384_inc_init(uint8_t *state); +void sha384_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha384_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha384(uint8_t *out, const uint8_t *in, size_t inlen); + +void sha512_inc_init(uint8_t *state); +void sha512_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha512_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha512(uint8_t *out, const uint8_t *in, size_t inlen); #endif diff --git a/test/Makefile b/test/Makefile index 52cc3cce..a21545e3 100644 --- a/test/Makefile +++ b/test/Makefile @@ -37,6 +37,10 @@ $(DEST_DIR)/test_fips202: common/fips202.c $(COMMON_FILES) mkdir -p $(DEST_DIR) $(CC) $(CFLAGS) $< $(COMMON_FILES) -o $@ +$(DEST_DIR)/test_sha2: common/sha2.c $(COMMON_FILES) + mkdir -p $(DEST_DIR) + $(CC) $(CFLAGS) $< $(COMMON_FILES) -o $@ + $(DEST_DIR)/functest_$(SCHEME)_$(IMPLEMENTATION): build-scheme crypto_$(TYPE)/functest.c $(COMMON_FILES) $(COMMON_DIR)/randombytes.c $(COMMON_HEADERS) mkdir -p $(DEST_DIR) $(CC) $(CFLAGS) -DPQCLEAN_NAMESPACE=PQCLEAN_$(SCHEME_UPPERCASE)_$(IMPLEMENTATION_UPPERCASE) -I$(SCHEME_DIR) crypto_$(TYPE)/functest.c $(COMMON_FILES) $(COMMON_DIR)/notrandombytes.c -o $@ -L$(SCHEME_DIR) -l$(SCHEME)_$(IMPLEMENTATION) @@ -50,3 +54,4 @@ clean: $(RM) $(DEST_DIR)/functest_$(SCHEME)_$(IMPLEMENTATION) $(RM) $(DEST_DIR)/testvectors_$(SCHEME)_$(IMPLEMENTATION) $(RM) $(DEST_DIR)/test_fips202 + $(RM) $(DEST_DIR)/test_sha2 diff --git a/test/common/sha2.c b/test/common/sha2.c new file mode 100644 index 00000000..f08eb06b --- /dev/null +++ b/test/common/sha2.c @@ -0,0 +1,173 @@ +#include "sha2.h" + +#include +#include +#include +#include + +const unsigned char plaintext[113] = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"; + +const unsigned char expected_224[28] = { + 0xc9, 0x7c, 0xa9, 0xa5, 0x59, 0x85, 0x0c, 0xe9, 0x7a, 0x04, 0xa9, 0x6d, + 0xef, 0x6d, 0x99, 0xa9, 0xe0, 0xe0, 0xe2, 0xab, 0x14, 0xe6, 0xb8, 0xdf, + 0x26, 0x5f, 0xc0, 0xb3 +}; + +const unsigned char expected_256[32] = { + 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, 0x03, 0x6c, 0xe5, 0x9e, + 0x7b, 0x04, 0x92, 0x37, 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, + 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1 +}; + +const unsigned char expected_384[48] = { + 0x09, 0x33, 0x0c, 0x33, 0xf7, 0x11, 0x47, 0xe8, 0x3d, 0x19, 0x2f, 0xc7, + 0x82, 0xcd, 0x1b, 0x47, 0x53, 0x11, 0x1b, 0x17, 0x3b, 0x3b, 0x05, 0xd2, + 0x2f, 0xa0, 0x80, 0x86, 0xe3, 0xb0, 0xf7, 0x12, 0xfc, 0xc7, 0xc7, 0x1a, + 0x55, 0x7e, 0x2d, 0xb9, 0x66, 0xc3, 0xe9, 0xfa, 0x91, 0x74, 0x60, 0x39 +}; + +const unsigned char expected_512[64] = { + 0x8e, 0x95, 0x9b, 0x75, 0xda, 0xe3, 0x13, 0xda, 0x8c, 0xf4, 0xf7, 0x28, + 0x14, 0xfc, 0x14, 0x3f, 0x8f, 0x77, 0x79, 0xc6, 0xeb, 0x9f, 0x7f, 0xa1, + 0x72, 0x99, 0xae, 0xad, 0xb6, 0x88, 0x90, 0x18, 0x50, 0x1d, 0x28, 0x9e, + 0x49, 0x00, 0xf7, 0xe4, 0x33, 0x1b, 0x99, 0xde, 0xc4, 0xb5, 0x43, 0x3a, + 0xc7, 0xd3, 0x29, 0xee, 0xb6, 0xdd, 0x26, 0x54, 0x5e, 0x96, 0xe5, 0x5b, + 0x87, 0x4b, 0xe9, 0x09 +}; + +static int test_sha256_incremental(void) { + unsigned char output[32]; + uint8_t state[40]; + int i = 0; + + sha256_inc_init(state); + sha256_inc_blocks(state, plaintext, 1); + sha256_inc_finalize(output, state, plaintext + 64, 112 - 64); + + if (memcmp(expected_256, output, 32)) { + printf("ERROR sha256 incremental did not match sha256.\n"); + printf(" Expected: "); + for (i = 0; i < 32; i++) { + printf("%02X", expected_256[i]); + } + printf("\n"); + printf(" Received: "); + for (i = 0; i < 32; i++) { + printf("%02X", output[i]); + } + printf("\n"); + return 1; + } + + return 0; +} + +static int test_sha224(void) { + unsigned char output[28]; + int i = 0; + + sha224(output, plaintext, 112); + + if (memcmp(expected_224, output, 28)) { + printf("ERROR sha224 output did not match test vector.\n"); + printf("Expected: "); + for (i = 0; i < 28; i++) { + printf("%02X", expected_224[i]); + } + printf("\n"); + printf("Received: "); + for (i = 0; i < 28; i++) { + printf("%02X", output[i]); + } + printf("\n"); + return 1; + } + + return 0; +} + +static int test_sha256(void) { + unsigned char output[32]; + int i = 0; + + sha256(output, plaintext, 112); + + if (memcmp(expected_256, output, 32)) { + printf("ERROR sha256 output did not match test vector.\n"); + printf("Expected: "); + for (i = 0; i < 32; i++) { + printf("%02X", expected_256[i]); + } + printf("\n"); + printf("Received: "); + for (i = 0; i < 32; i++) { + printf("%02X", output[i]); + } + printf("\n"); + return 1; + } + + return 0; +} + +static int test_sha384(void) { + unsigned char output[48]; + int i = 0; + + sha384(output, plaintext, 112); + + if (memcmp(expected_384, output, 48)) { + printf("ERROR sha384 output did not match test vector.\n"); + printf("Expected: "); + for (i = 0; i < 48; i++) { + printf("%02X", expected_384[i]); + } + printf("\n"); + printf("Received: "); + for (i = 0; i < 48; i++) { + printf("%02X", output[i]); + } + printf("\n"); + return 1; + } + + return 0; +} + +static int test_sha512(void) { + unsigned char output[64]; + int i = 0; + + sha512(output, plaintext, 112); + + if (memcmp(expected_512, output, 64)) { + printf("ERROR sha512 output did not match test vector.\n"); + printf("Expected: "); + for (i = 0; i < 64; i++) { + printf("%02X", expected_512[i]); + } + printf("\n"); + printf("Received: "); + for (i = 0; i < 64; i++) { + printf("%02X", output[i]); + } + printf("\n"); + return 1; + } + + return 0; +} + +int main(void) { + int result = 0; + result += test_sha224(); + result += test_sha256(); + result += test_sha256_incremental(); + result += test_sha384(); + result += test_sha512(); + + if (result != 0) { + puts("Errors occurred"); + } + return result; +}