You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

603 line
21 KiB

  1. /* Copyright (c) 2014, Google Inc.
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
  14. #include <assert.h>
  15. #include <limits.h>
  16. #include <string.h>
  17. #include <openssl/aead.h>
  18. #include <openssl/cipher.h>
  19. #include <openssl/err.h>
  20. #include <openssl/hmac.h>
  21. #include <openssl/md5.h>
  22. #include <openssl/mem.h>
  23. #include <openssl/sha.h>
  24. #include <openssl/type_check.h>
  25. #include "../internal.h"
  26. #include "internal.h"
  27. typedef struct {
  28. EVP_CIPHER_CTX cipher_ctx;
  29. HMAC_CTX hmac_ctx;
  30. /* mac_key is the portion of the key used for the MAC. It is retained
  31. * separately for the constant-time CBC code. */
  32. uint8_t mac_key[EVP_MAX_MD_SIZE];
  33. uint8_t mac_key_len;
  34. /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
  35. * IV. */
  36. char implicit_iv;
  37. } AEAD_TLS_CTX;
  38. OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
  39. static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
  40. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  41. EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
  42. HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
  43. OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
  44. OPENSSL_free(tls_ctx);
  45. ctx->aead_state = NULL;
  46. }
  47. static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
  48. size_t tag_len, enum evp_aead_direction_t dir,
  49. const EVP_CIPHER *cipher, const EVP_MD *md,
  50. char implicit_iv) {
  51. if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
  52. tag_len != EVP_MD_size(md)) {
  53. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE);
  54. return 0;
  55. }
  56. if (key_len != EVP_AEAD_key_length(ctx->aead)) {
  57. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
  58. return 0;
  59. }
  60. size_t mac_key_len = EVP_MD_size(md);
  61. size_t enc_key_len = EVP_CIPHER_key_length(cipher);
  62. assert(mac_key_len + enc_key_len +
  63. (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
  64. AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
  65. if (tls_ctx == NULL) {
  66. OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
  67. return 0;
  68. }
  69. EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
  70. HMAC_CTX_init(&tls_ctx->hmac_ctx);
  71. assert(mac_key_len <= EVP_MAX_MD_SIZE);
  72. memcpy(tls_ctx->mac_key, key, mac_key_len);
  73. tls_ctx->mac_key_len = (uint8_t)mac_key_len;
  74. tls_ctx->implicit_iv = implicit_iv;
  75. ctx->aead_state = tls_ctx;
  76. if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
  77. implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
  78. dir == evp_aead_seal) ||
  79. !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
  80. aead_tls_cleanup(ctx);
  81. ctx->aead_state = NULL;
  82. return 0;
  83. }
  84. EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
  85. return 1;
  86. }
  87. static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  88. size_t *out_len, size_t max_out_len,
  89. const uint8_t *nonce, size_t nonce_len,
  90. const uint8_t *in, size_t in_len,
  91. const uint8_t *ad, size_t ad_len) {
  92. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  93. size_t total = 0;
  94. if (!tls_ctx->cipher_ctx.encrypt) {
  95. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  96. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
  97. return 0;
  98. }
  99. if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
  100. in_len > INT_MAX) {
  101. /* EVP_CIPHER takes int as input. */
  102. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  103. return 0;
  104. }
  105. if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
  106. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  107. return 0;
  108. }
  109. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  110. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
  111. return 0;
  112. }
  113. if (ad_len != 13 - 2 /* length bytes */) {
  114. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
  115. return 0;
  116. }
  117. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  118. * length for legacy ciphers. */
  119. uint8_t ad_extra[2];
  120. ad_extra[0] = (uint8_t)(in_len >> 8);
  121. ad_extra[1] = (uint8_t)(in_len & 0xff);
  122. /* Compute the MAC. This must be first in case the operation is being done
  123. * in-place. */
  124. uint8_t mac[EVP_MAX_MD_SIZE];
  125. unsigned mac_len;
  126. if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
  127. !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) ||
  128. !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) ||
  129. !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) ||
  130. !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) {
  131. return 0;
  132. }
  133. /* Configure the explicit IV. */
  134. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  135. !tls_ctx->implicit_iv &&
  136. !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  137. return 0;
  138. }
  139. /* Encrypt the input. */
  140. int len;
  141. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
  142. (int)in_len)) {
  143. return 0;
  144. }
  145. total = len;
  146. /* Feed the MAC into the cipher. */
  147. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
  148. (int)mac_len)) {
  149. return 0;
  150. }
  151. total += len;
  152. unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
  153. if (block_size > 1) {
  154. assert(block_size <= 256);
  155. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
  156. /* Compute padding and feed that into the cipher. */
  157. uint8_t padding[256];
  158. unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
  159. memset(padding, padding_len - 1, padding_len);
  160. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
  161. (int)padding_len)) {
  162. return 0;
  163. }
  164. total += len;
  165. }
  166. if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  167. return 0;
  168. }
  169. total += len;
  170. *out_len = total;
  171. return 1;
  172. }
  173. static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  174. size_t *out_len, size_t max_out_len,
  175. const uint8_t *nonce, size_t nonce_len,
  176. const uint8_t *in, size_t in_len,
  177. const uint8_t *ad, size_t ad_len) {
  178. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  179. if (tls_ctx->cipher_ctx.encrypt) {
  180. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  181. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
  182. return 0;
  183. }
  184. if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
  185. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  186. return 0;
  187. }
  188. if (max_out_len < in_len) {
  189. /* This requires that the caller provide space for the MAC, even though it
  190. * will always be removed on return. */
  191. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  192. return 0;
  193. }
  194. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  195. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
  196. return 0;
  197. }
  198. if (ad_len != 13 - 2 /* length bytes */) {
  199. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
  200. return 0;
  201. }
  202. if (in_len > INT_MAX) {
  203. /* EVP_CIPHER takes int as input. */
  204. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  205. return 0;
  206. }
  207. /* Configure the explicit IV. */
  208. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  209. !tls_ctx->implicit_iv &&
  210. !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  211. return 0;
  212. }
  213. /* Decrypt to get the plaintext + MAC + padding. */
  214. size_t total = 0;
  215. int len;
  216. if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
  217. return 0;
  218. }
  219. total += len;
  220. if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  221. return 0;
  222. }
  223. total += len;
  224. assert(total == in_len);
  225. /* Remove CBC padding. Code from here on is timing-sensitive with respect to
  226. * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
  227. unsigned padding_ok, data_plus_mac_len;
  228. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
  229. if (!EVP_tls_cbc_remove_padding(
  230. &padding_ok, &data_plus_mac_len, out, total,
  231. EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
  232. (unsigned)HMAC_size(&tls_ctx->hmac_ctx))) {
  233. /* Publicly invalid. This can be rejected in non-constant time. */
  234. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  235. return 0;
  236. }
  237. } else {
  238. padding_ok = ~0u;
  239. data_plus_mac_len = total;
  240. /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
  241. * already been checked against the MAC size at the top of the function. */
  242. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
  243. }
  244. unsigned data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
  245. /* At this point, if the padding is valid, the first |data_plus_mac_len| bytes
  246. * after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
  247. * still large enough to extract a MAC, but it will be irrelevant. */
  248. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  249. * length for legacy ciphers. */
  250. uint8_t ad_fixed[13];
  251. memcpy(ad_fixed, ad, 11);
  252. ad_fixed[11] = (uint8_t)(data_len >> 8);
  253. ad_fixed[12] = (uint8_t)(data_len & 0xff);
  254. ad_len += 2;
  255. /* Compute the MAC and extract the one in the record. */
  256. uint8_t mac[EVP_MAX_MD_SIZE];
  257. size_t mac_len;
  258. uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
  259. uint8_t *record_mac;
  260. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  261. EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
  262. if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
  263. ad_fixed, out, data_plus_mac_len, total,
  264. tls_ctx->mac_key, tls_ctx->mac_key_len)) {
  265. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  266. return 0;
  267. }
  268. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  269. record_mac = record_mac_tmp;
  270. EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
  271. } else {
  272. /* We should support the constant-time path for all CBC-mode ciphers
  273. * implemented. */
  274. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
  275. unsigned mac_len_u;
  276. if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
  277. !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) ||
  278. !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) ||
  279. !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) {
  280. return 0;
  281. }
  282. mac_len = mac_len_u;
  283. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  284. record_mac = &out[data_len];
  285. }
  286. /* Perform the MAC check and the padding check in constant-time. It should be
  287. * safe to simply perform the padding check first, but it would not be under a
  288. * different choice of MAC location on padding failure. See
  289. * EVP_tls_cbc_remove_padding. */
  290. unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
  291. 0);
  292. good &= padding_ok;
  293. if (!good) {
  294. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  295. return 0;
  296. }
  297. /* End of timing-sensitive code. */
  298. *out_len = data_len;
  299. return 1;
  300. }
  301. static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  302. size_t key_len, size_t tag_len,
  303. enum evp_aead_direction_t dir) {
  304. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  305. EVP_sha1(), 0);
  306. }
  307. static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
  308. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  309. enum evp_aead_direction_t dir) {
  310. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  311. EVP_sha1(), 1);
  312. }
  313. static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  314. const uint8_t *key, size_t key_len,
  315. size_t tag_len,
  316. enum evp_aead_direction_t dir) {
  317. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  318. EVP_sha256(), 0);
  319. }
  320. static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  321. size_t key_len, size_t tag_len,
  322. enum evp_aead_direction_t dir) {
  323. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  324. EVP_sha1(), 0);
  325. }
  326. static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
  327. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  328. enum evp_aead_direction_t dir) {
  329. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  330. EVP_sha1(), 1);
  331. }
  332. static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  333. const uint8_t *key, size_t key_len,
  334. size_t tag_len,
  335. enum evp_aead_direction_t dir) {
  336. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  337. EVP_sha256(), 0);
  338. }
  339. static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
  340. const uint8_t *key, size_t key_len,
  341. size_t tag_len,
  342. enum evp_aead_direction_t dir) {
  343. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  344. EVP_sha384(), 0);
  345. }
  346. static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
  347. const uint8_t *key, size_t key_len,
  348. size_t tag_len,
  349. enum evp_aead_direction_t dir) {
  350. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  351. EVP_sha1(), 0);
  352. }
  353. static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
  354. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  355. enum evp_aead_direction_t dir) {
  356. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  357. EVP_sha1(), 1);
  358. }
  359. static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
  360. size_t *out_iv_len) {
  361. const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
  362. const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx);
  363. if (iv_len <= 1) {
  364. return 0;
  365. }
  366. *out_iv = tls_ctx->cipher_ctx.iv;
  367. *out_iv_len = iv_len;
  368. return 1;
  369. }
  370. static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  371. size_t key_len, size_t tag_len,
  372. enum evp_aead_direction_t dir) {
  373. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_enc_null(),
  374. EVP_sha1(), 1 /* implicit iv */);
  375. }
  376. static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
  377. SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
  378. 16, /* nonce len (IV) */
  379. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  380. SHA_DIGEST_LENGTH, /* max tag length */
  381. NULL, /* init */
  382. aead_aes_128_cbc_sha1_tls_init,
  383. aead_tls_cleanup,
  384. aead_tls_seal,
  385. aead_tls_open,
  386. NULL, /* get_iv */
  387. };
  388. static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
  389. SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
  390. 0, /* nonce len */
  391. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  392. SHA_DIGEST_LENGTH, /* max tag length */
  393. NULL, /* init */
  394. aead_aes_128_cbc_sha1_tls_implicit_iv_init,
  395. aead_tls_cleanup,
  396. aead_tls_seal,
  397. aead_tls_open,
  398. aead_tls_get_iv, /* get_iv */
  399. };
  400. static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
  401. SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
  402. 16, /* nonce len (IV) */
  403. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  404. SHA256_DIGEST_LENGTH, /* max tag length */
  405. NULL, /* init */
  406. aead_aes_128_cbc_sha256_tls_init,
  407. aead_tls_cleanup,
  408. aead_tls_seal,
  409. aead_tls_open,
  410. NULL, /* get_iv */
  411. };
  412. static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
  413. SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
  414. 16, /* nonce len (IV) */
  415. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  416. SHA_DIGEST_LENGTH, /* max tag length */
  417. NULL, /* init */
  418. aead_aes_256_cbc_sha1_tls_init,
  419. aead_tls_cleanup,
  420. aead_tls_seal,
  421. aead_tls_open,
  422. NULL, /* get_iv */
  423. };
  424. static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
  425. SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
  426. 0, /* nonce len */
  427. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  428. SHA_DIGEST_LENGTH, /* max tag length */
  429. NULL, /* init */
  430. aead_aes_256_cbc_sha1_tls_implicit_iv_init,
  431. aead_tls_cleanup,
  432. aead_tls_seal,
  433. aead_tls_open,
  434. aead_tls_get_iv, /* get_iv */
  435. };
  436. static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
  437. SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
  438. 16, /* nonce len (IV) */
  439. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  440. SHA256_DIGEST_LENGTH, /* max tag length */
  441. NULL, /* init */
  442. aead_aes_256_cbc_sha256_tls_init,
  443. aead_tls_cleanup,
  444. aead_tls_seal,
  445. aead_tls_open,
  446. NULL, /* get_iv */
  447. };
  448. static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
  449. SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
  450. 16, /* nonce len (IV) */
  451. 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
  452. SHA384_DIGEST_LENGTH, /* max tag length */
  453. NULL, /* init */
  454. aead_aes_256_cbc_sha384_tls_init,
  455. aead_tls_cleanup,
  456. aead_tls_seal,
  457. aead_tls_open,
  458. NULL, /* get_iv */
  459. };
  460. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
  461. SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
  462. 8, /* nonce len (IV) */
  463. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  464. SHA_DIGEST_LENGTH, /* max tag length */
  465. NULL, /* init */
  466. aead_des_ede3_cbc_sha1_tls_init,
  467. aead_tls_cleanup,
  468. aead_tls_seal,
  469. aead_tls_open,
  470. NULL, /* get_iv */
  471. };
  472. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
  473. SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
  474. 0, /* nonce len */
  475. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  476. SHA_DIGEST_LENGTH, /* max tag length */
  477. NULL, /* init */
  478. aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
  479. aead_tls_cleanup,
  480. aead_tls_seal,
  481. aead_tls_open,
  482. aead_tls_get_iv, /* get_iv */
  483. };
  484. static const EVP_AEAD aead_null_sha1_tls = {
  485. SHA_DIGEST_LENGTH, /* key len */
  486. 0, /* nonce len */
  487. SHA_DIGEST_LENGTH, /* overhead (SHA1) */
  488. SHA_DIGEST_LENGTH, /* max tag length */
  489. NULL, /* init */
  490. aead_null_sha1_tls_init,
  491. aead_tls_cleanup,
  492. aead_tls_seal,
  493. aead_tls_open,
  494. NULL, /* get_iv */
  495. };
  496. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
  497. return &aead_aes_128_cbc_sha1_tls;
  498. }
  499. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
  500. return &aead_aes_128_cbc_sha1_tls_implicit_iv;
  501. }
  502. const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
  503. return &aead_aes_128_cbc_sha256_tls;
  504. }
  505. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
  506. return &aead_aes_256_cbc_sha1_tls;
  507. }
  508. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
  509. return &aead_aes_256_cbc_sha1_tls_implicit_iv;
  510. }
  511. const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
  512. return &aead_aes_256_cbc_sha256_tls;
  513. }
  514. const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
  515. return &aead_aes_256_cbc_sha384_tls;
  516. }
  517. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
  518. return &aead_des_ede3_cbc_sha1_tls;
  519. }
  520. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
  521. return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
  522. }
  523. const EVP_AEAD *EVP_aead_null_sha1_tls(void) { return &aead_null_sha1_tls; }