You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

614 lines
21 KiB

  1. /* Copyright (c) 2014, Google Inc.
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
  14. #include <assert.h>
  15. #include <limits.h>
  16. #include <string.h>
  17. #include <openssl/aead.h>
  18. #include <openssl/cipher.h>
  19. #include <openssl/err.h>
  20. #include <openssl/hmac.h>
  21. #include <openssl/mem.h>
  22. #include <openssl/sha.h>
  23. #include <openssl/type_check.h>
  24. #include "../crypto/internal.h"
  25. #include "internal.h"
  26. typedef struct {
  27. EVP_CIPHER_CTX cipher_ctx;
  28. HMAC_CTX hmac_ctx;
  29. /* mac_key is the portion of the key used for the MAC. It is retained
  30. * separately for the constant-time CBC code. */
  31. uint8_t mac_key[EVP_MAX_MD_SIZE];
  32. uint8_t mac_key_len;
  33. /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
  34. * IV. */
  35. char implicit_iv;
  36. } AEAD_TLS_CTX;
  37. OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
  38. static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
  39. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  40. EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
  41. HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
  42. OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
  43. OPENSSL_free(tls_ctx);
  44. ctx->aead_state = NULL;
  45. }
  46. static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
  47. size_t tag_len, enum evp_aead_direction_t dir,
  48. const EVP_CIPHER *cipher, const EVP_MD *md,
  49. char implicit_iv) {
  50. if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
  51. tag_len != EVP_MD_size(md)) {
  52. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_UNSUPPORTED_TAG_SIZE);
  53. return 0;
  54. }
  55. if (key_len != EVP_AEAD_key_length(ctx->aead)) {
  56. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_BAD_KEY_LENGTH);
  57. return 0;
  58. }
  59. size_t mac_key_len = EVP_MD_size(md);
  60. size_t enc_key_len = EVP_CIPHER_key_length(cipher);
  61. assert(mac_key_len + enc_key_len +
  62. (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
  63. /* Although EVP_rc4() is a variable-length cipher, the default key size is
  64. * correct for TLS. */
  65. AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
  66. if (tls_ctx == NULL) {
  67. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, ERR_R_MALLOC_FAILURE);
  68. return 0;
  69. }
  70. EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
  71. HMAC_CTX_init(&tls_ctx->hmac_ctx);
  72. assert(mac_key_len <= EVP_MAX_MD_SIZE);
  73. memcpy(tls_ctx->mac_key, key, mac_key_len);
  74. tls_ctx->mac_key_len = (uint8_t)mac_key_len;
  75. tls_ctx->implicit_iv = implicit_iv;
  76. ctx->aead_state = tls_ctx;
  77. if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
  78. implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
  79. dir == evp_aead_seal) ||
  80. !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
  81. aead_tls_cleanup(ctx);
  82. ctx->aead_state = NULL;
  83. return 0;
  84. }
  85. EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
  86. return 1;
  87. }
  88. static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  89. size_t *out_len, size_t max_out_len,
  90. const uint8_t *nonce, size_t nonce_len,
  91. const uint8_t *in, size_t in_len,
  92. const uint8_t *ad, size_t ad_len) {
  93. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  94. size_t total = 0;
  95. if (!tls_ctx->cipher_ctx.encrypt) {
  96. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  97. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION);
  98. return 0;
  99. }
  100. if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
  101. in_len > INT_MAX) {
  102. /* EVP_CIPHER takes int as input. */
  103. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE);
  104. return 0;
  105. }
  106. if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
  107. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL);
  108. return 0;
  109. }
  110. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  111. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE);
  112. return 0;
  113. }
  114. if (ad_len != 13 - 2 /* length bytes */) {
  115. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE);
  116. return 0;
  117. }
  118. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  119. * length for legacy ciphers. */
  120. uint8_t ad_extra[2];
  121. ad_extra[0] = (uint8_t)(in_len >> 8);
  122. ad_extra[1] = (uint8_t)(in_len & 0xff);
  123. /* Compute the MAC. This must be first in case the operation is being done
  124. * in-place. */
  125. uint8_t mac[EVP_MAX_MD_SIZE];
  126. unsigned mac_len;
  127. HMAC_CTX hmac_ctx;
  128. HMAC_CTX_init(&hmac_ctx);
  129. if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
  130. !HMAC_Update(&hmac_ctx, ad, ad_len) ||
  131. !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) ||
  132. !HMAC_Update(&hmac_ctx, in, in_len) ||
  133. !HMAC_Final(&hmac_ctx, mac, &mac_len)) {
  134. HMAC_CTX_cleanup(&hmac_ctx);
  135. return 0;
  136. }
  137. HMAC_CTX_cleanup(&hmac_ctx);
  138. /* Configure the explicit IV. */
  139. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  140. !tls_ctx->implicit_iv &&
  141. !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  142. return 0;
  143. }
  144. /* Encrypt the input. */
  145. int len;
  146. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
  147. (int)in_len)) {
  148. return 0;
  149. }
  150. total = len;
  151. /* Feed the MAC into the cipher. */
  152. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
  153. (int)mac_len)) {
  154. return 0;
  155. }
  156. total += len;
  157. unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
  158. if (block_size > 1) {
  159. assert(block_size <= 256);
  160. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
  161. /* Compute padding and feed that into the cipher. */
  162. uint8_t padding[256];
  163. unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
  164. memset(padding, padding_len - 1, padding_len);
  165. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
  166. (int)padding_len)) {
  167. return 0;
  168. }
  169. total += len;
  170. }
  171. if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  172. return 0;
  173. }
  174. total += len;
  175. *out_len = total;
  176. return 1;
  177. }
  178. static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  179. size_t *out_len, size_t max_out_len,
  180. const uint8_t *nonce, size_t nonce_len,
  181. const uint8_t *in, size_t in_len,
  182. const uint8_t *ad, size_t ad_len) {
  183. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  184. if (tls_ctx->cipher_ctx.encrypt) {
  185. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  186. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_OPERATION);
  187. return 0;
  188. }
  189. if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
  190. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  191. return 0;
  192. }
  193. if (max_out_len < in_len) {
  194. /* This requires that the caller provide space for the MAC, even though it
  195. * will always be removed on return. */
  196. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BUFFER_TOO_SMALL);
  197. return 0;
  198. }
  199. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  200. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_NONCE_SIZE);
  201. return 0;
  202. }
  203. if (ad_len != 13 - 2 /* length bytes */) {
  204. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_AD_SIZE);
  205. return 0;
  206. }
  207. if (in_len > INT_MAX) {
  208. /* EVP_CIPHER takes int as input. */
  209. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_TOO_LARGE);
  210. return 0;
  211. }
  212. /* Configure the explicit IV. */
  213. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  214. !tls_ctx->implicit_iv &&
  215. !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  216. return 0;
  217. }
  218. /* Decrypt to get the plaintext + MAC + padding. */
  219. size_t total = 0;
  220. int len;
  221. if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
  222. return 0;
  223. }
  224. total += len;
  225. if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  226. return 0;
  227. }
  228. total += len;
  229. assert(total == in_len);
  230. /* Remove CBC padding. Code from here on is timing-sensitive with respect to
  231. * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
  232. int padding_ok;
  233. unsigned data_plus_mac_len, data_len;
  234. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
  235. padding_ok = EVP_tls_cbc_remove_padding(
  236. &data_plus_mac_len, out, total,
  237. EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
  238. (unsigned)HMAC_size(&tls_ctx->hmac_ctx));
  239. /* Publicly invalid. This can be rejected in non-constant time. */
  240. if (padding_ok == 0) {
  241. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  242. return 0;
  243. }
  244. } else {
  245. padding_ok = 1;
  246. data_plus_mac_len = total;
  247. /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
  248. * already been checked against the MAC size at the top of the function. */
  249. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
  250. }
  251. data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
  252. /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the
  253. * first |data_plus_mac_size| bytes after |out| are the plaintext and
  254. * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */
  255. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  256. * length for legacy ciphers. */
  257. uint8_t ad_fixed[13];
  258. memcpy(ad_fixed, ad, 11);
  259. ad_fixed[11] = (uint8_t)(data_len >> 8);
  260. ad_fixed[12] = (uint8_t)(data_len & 0xff);
  261. ad_len += 2;
  262. /* Compute the MAC and extract the one in the record. */
  263. uint8_t mac[EVP_MAX_MD_SIZE];
  264. size_t mac_len;
  265. uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
  266. uint8_t *record_mac;
  267. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  268. EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
  269. if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
  270. ad_fixed, out, data_plus_mac_len, total,
  271. tls_ctx->mac_key, tls_ctx->mac_key_len)) {
  272. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  273. return 0;
  274. }
  275. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  276. record_mac = record_mac_tmp;
  277. EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
  278. } else {
  279. /* We should support the constant-time path for all CBC-mode ciphers
  280. * implemented. */
  281. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
  282. HMAC_CTX hmac_ctx;
  283. HMAC_CTX_init(&hmac_ctx);
  284. unsigned mac_len_u;
  285. if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
  286. !HMAC_Update(&hmac_ctx, ad_fixed, ad_len) ||
  287. !HMAC_Update(&hmac_ctx, out, data_len) ||
  288. !HMAC_Final(&hmac_ctx, mac, &mac_len_u)) {
  289. HMAC_CTX_cleanup(&hmac_ctx);
  290. return 0;
  291. }
  292. mac_len = mac_len_u;
  293. HMAC_CTX_cleanup(&hmac_ctx);
  294. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  295. record_mac = &out[data_len];
  296. }
  297. /* Perform the MAC check and the padding check in constant-time. It should be
  298. * safe to simply perform the padding check first, but it would not be under a
  299. * different choice of MAC location on padding failure. See
  300. * EVP_tls_cbc_remove_padding. */
  301. unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
  302. 0);
  303. good &= constant_time_eq_int(padding_ok, 1);
  304. if (!good) {
  305. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  306. return 0;
  307. }
  308. /* End of timing-sensitive code. */
  309. *out_len = data_len;
  310. return 1;
  311. }
  312. static int aead_rc4_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  313. size_t key_len, size_t tag_len,
  314. enum evp_aead_direction_t dir) {
  315. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_rc4(), EVP_sha1(),
  316. 0);
  317. }
  318. static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  319. size_t key_len, size_t tag_len,
  320. enum evp_aead_direction_t dir) {
  321. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  322. EVP_sha1(), 0);
  323. }
  324. static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
  325. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  326. enum evp_aead_direction_t dir) {
  327. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  328. EVP_sha1(), 1);
  329. }
  330. static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  331. const uint8_t *key, size_t key_len,
  332. size_t tag_len,
  333. enum evp_aead_direction_t dir) {
  334. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  335. EVP_sha256(), 0);
  336. }
  337. static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  338. size_t key_len, size_t tag_len,
  339. enum evp_aead_direction_t dir) {
  340. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  341. EVP_sha1(), 0);
  342. }
  343. static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
  344. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  345. enum evp_aead_direction_t dir) {
  346. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  347. EVP_sha1(), 1);
  348. }
  349. static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  350. const uint8_t *key, size_t key_len,
  351. size_t tag_len,
  352. enum evp_aead_direction_t dir) {
  353. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  354. EVP_sha256(), 0);
  355. }
  356. static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
  357. const uint8_t *key, size_t key_len,
  358. size_t tag_len,
  359. enum evp_aead_direction_t dir) {
  360. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  361. EVP_sha384(), 0);
  362. }
  363. static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
  364. const uint8_t *key, size_t key_len,
  365. size_t tag_len,
  366. enum evp_aead_direction_t dir) {
  367. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  368. EVP_sha1(), 0);
  369. }
  370. static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
  371. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  372. enum evp_aead_direction_t dir) {
  373. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  374. EVP_sha1(), 1);
  375. }
  376. static int aead_rc4_sha1_tls_get_rc4_state(const EVP_AEAD_CTX *ctx,
  377. const RC4_KEY **out_key) {
  378. const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
  379. if (EVP_CIPHER_CTX_cipher(&tls_ctx->cipher_ctx) != EVP_rc4()) {
  380. return 0;
  381. }
  382. *out_key = (const RC4_KEY*) tls_ctx->cipher_ctx.cipher_data;
  383. return 1;
  384. }
  385. static const EVP_AEAD aead_rc4_sha1_tls = {
  386. SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + RC4) */
  387. 0, /* nonce len */
  388. SHA_DIGEST_LENGTH, /* overhead */
  389. SHA_DIGEST_LENGTH, /* max tag length */
  390. NULL, /* init */
  391. aead_rc4_sha1_tls_init,
  392. aead_tls_cleanup,
  393. aead_tls_seal,
  394. aead_tls_open,
  395. aead_rc4_sha1_tls_get_rc4_state, /* get_rc4_state */
  396. };
  397. static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
  398. SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
  399. 16, /* nonce len (IV) */
  400. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  401. SHA_DIGEST_LENGTH, /* max tag length */
  402. NULL, /* init */
  403. aead_aes_128_cbc_sha1_tls_init,
  404. aead_tls_cleanup,
  405. aead_tls_seal,
  406. aead_tls_open,
  407. NULL, /* get_rc4_state */
  408. };
  409. static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
  410. SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
  411. 0, /* nonce len */
  412. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  413. SHA_DIGEST_LENGTH, /* max tag length */
  414. NULL, /* init */
  415. aead_aes_128_cbc_sha1_tls_implicit_iv_init,
  416. aead_tls_cleanup,
  417. aead_tls_seal,
  418. aead_tls_open,
  419. NULL, /* get_rc4_state */
  420. };
  421. static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
  422. SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
  423. 16, /* nonce len (IV) */
  424. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  425. SHA_DIGEST_LENGTH, /* max tag length */
  426. NULL, /* init */
  427. aead_aes_128_cbc_sha256_tls_init,
  428. aead_tls_cleanup,
  429. aead_tls_seal,
  430. aead_tls_open,
  431. NULL, /* get_rc4_state */
  432. };
  433. static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
  434. SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
  435. 16, /* nonce len (IV) */
  436. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  437. SHA_DIGEST_LENGTH, /* max tag length */
  438. NULL, /* init */
  439. aead_aes_256_cbc_sha1_tls_init,
  440. aead_tls_cleanup,
  441. aead_tls_seal,
  442. aead_tls_open,
  443. NULL, /* get_rc4_state */
  444. };
  445. static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
  446. SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
  447. 0, /* nonce len */
  448. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  449. SHA_DIGEST_LENGTH, /* max tag length */
  450. NULL, /* init */
  451. aead_aes_256_cbc_sha1_tls_implicit_iv_init,
  452. aead_tls_cleanup,
  453. aead_tls_seal,
  454. aead_tls_open,
  455. NULL, /* get_rc4_state */
  456. };
  457. static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
  458. SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
  459. 16, /* nonce len (IV) */
  460. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  461. SHA_DIGEST_LENGTH, /* max tag length */
  462. NULL, /* init */
  463. aead_aes_256_cbc_sha256_tls_init,
  464. aead_tls_cleanup,
  465. aead_tls_seal,
  466. aead_tls_open,
  467. NULL, /* get_rc4_state */
  468. };
  469. static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
  470. SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
  471. 16, /* nonce len (IV) */
  472. 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
  473. SHA_DIGEST_LENGTH, /* max tag length */
  474. NULL, /* init */
  475. aead_aes_256_cbc_sha384_tls_init,
  476. aead_tls_cleanup,
  477. aead_tls_seal,
  478. aead_tls_open,
  479. NULL, /* get_rc4_state */
  480. };
  481. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
  482. SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
  483. 8, /* nonce len (IV) */
  484. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  485. SHA_DIGEST_LENGTH, /* max tag length */
  486. NULL, /* init */
  487. aead_des_ede3_cbc_sha1_tls_init,
  488. aead_tls_cleanup,
  489. aead_tls_seal,
  490. aead_tls_open,
  491. NULL, /* get_rc4_state */
  492. };
  493. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
  494. SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
  495. 0, /* nonce len */
  496. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  497. SHA_DIGEST_LENGTH, /* max tag length */
  498. NULL, /* init */
  499. aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
  500. aead_tls_cleanup,
  501. aead_tls_seal,
  502. aead_tls_open,
  503. NULL, /* get_rc4_state */
  504. };
  505. const EVP_AEAD *EVP_aead_rc4_sha1_tls(void) { return &aead_rc4_sha1_tls; }
  506. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
  507. return &aead_aes_128_cbc_sha1_tls;
  508. }
  509. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
  510. return &aead_aes_128_cbc_sha1_tls_implicit_iv;
  511. }
  512. const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
  513. return &aead_aes_128_cbc_sha256_tls;
  514. }
  515. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
  516. return &aead_aes_256_cbc_sha1_tls;
  517. }
  518. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
  519. return &aead_aes_256_cbc_sha1_tls_implicit_iv;
  520. }
  521. const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
  522. return &aead_aes_256_cbc_sha256_tls;
  523. }
  524. const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
  525. return &aead_aes_256_cbc_sha384_tls;
  526. }
  527. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
  528. return &aead_des_ede3_cbc_sha1_tls;
  529. }
  530. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
  531. return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
  532. }