You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

613 lines
21 KiB

  1. /* Copyright (c) 2014, Google Inc.
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
  14. #include <assert.h>
  15. #include <limits.h>
  16. #include <string.h>
  17. #include <openssl/aead.h>
  18. #include <openssl/cipher.h>
  19. #include <openssl/err.h>
  20. #include <openssl/hmac.h>
  21. #include <openssl/mem.h>
  22. #include <openssl/sha.h>
  23. #include <openssl/type_check.h>
  24. #include "../crypto/internal.h"
  25. #include "internal.h"
  26. typedef struct {
  27. EVP_CIPHER_CTX cipher_ctx;
  28. HMAC_CTX hmac_ctx;
  29. /* mac_key is the portion of the key used for the MAC. It is retained
  30. * separately for the constant-time CBC code. */
  31. uint8_t mac_key[EVP_MAX_MD_SIZE];
  32. uint8_t mac_key_len;
  33. /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
  34. * IV. */
  35. char implicit_iv;
  36. } AEAD_TLS_CTX;
  37. OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
  38. static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
  39. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  40. EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
  41. HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
  42. OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
  43. OPENSSL_free(tls_ctx);
  44. ctx->aead_state = NULL;
  45. }
  46. static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
  47. size_t tag_len, enum evp_aead_direction_t dir,
  48. const EVP_CIPHER *cipher, const EVP_MD *md,
  49. char implicit_iv) {
  50. if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
  51. tag_len != EVP_MD_size(md)) {
  52. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_UNSUPPORTED_TAG_SIZE);
  53. return 0;
  54. }
  55. if (key_len != EVP_AEAD_key_length(ctx->aead)) {
  56. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_BAD_KEY_LENGTH);
  57. return 0;
  58. }
  59. size_t mac_key_len = EVP_MD_size(md);
  60. size_t enc_key_len = EVP_CIPHER_key_length(cipher);
  61. assert(mac_key_len + enc_key_len +
  62. (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
  63. /* Although EVP_rc4() is a variable-length cipher, the default key size is
  64. * correct for TLS. */
  65. AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
  66. if (tls_ctx == NULL) {
  67. OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, ERR_R_MALLOC_FAILURE);
  68. return 0;
  69. }
  70. EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
  71. HMAC_CTX_init(&tls_ctx->hmac_ctx);
  72. assert(mac_key_len <= EVP_MAX_MD_SIZE);
  73. memcpy(tls_ctx->mac_key, key, mac_key_len);
  74. tls_ctx->mac_key_len = (uint8_t)mac_key_len;
  75. tls_ctx->implicit_iv = implicit_iv;
  76. ctx->aead_state = tls_ctx;
  77. if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
  78. implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
  79. dir == evp_aead_seal) ||
  80. !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
  81. aead_tls_cleanup(ctx);
  82. return 0;
  83. }
  84. EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
  85. return 1;
  86. }
  87. static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  88. size_t *out_len, size_t max_out_len,
  89. const uint8_t *nonce, size_t nonce_len,
  90. const uint8_t *in, size_t in_len,
  91. const uint8_t *ad, size_t ad_len) {
  92. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  93. size_t total = 0;
  94. if (!tls_ctx->cipher_ctx.encrypt) {
  95. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  96. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION);
  97. return 0;
  98. }
  99. if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
  100. in_len > INT_MAX) {
  101. /* EVP_CIPHER takes int as input. */
  102. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE);
  103. return 0;
  104. }
  105. if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
  106. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL);
  107. return 0;
  108. }
  109. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  110. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE);
  111. return 0;
  112. }
  113. if (ad_len != 13 - 2 /* length bytes */) {
  114. OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE);
  115. return 0;
  116. }
  117. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  118. * length for legacy ciphers. */
  119. uint8_t ad_extra[2];
  120. ad_extra[0] = (uint8_t)(in_len >> 8);
  121. ad_extra[1] = (uint8_t)(in_len & 0xff);
  122. /* Compute the MAC. This must be first in case the operation is being done
  123. * in-place. */
  124. uint8_t mac[EVP_MAX_MD_SIZE];
  125. unsigned mac_len;
  126. HMAC_CTX hmac_ctx;
  127. HMAC_CTX_init(&hmac_ctx);
  128. if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
  129. !HMAC_Update(&hmac_ctx, ad, ad_len) ||
  130. !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) ||
  131. !HMAC_Update(&hmac_ctx, in, in_len) ||
  132. !HMAC_Final(&hmac_ctx, mac, &mac_len)) {
  133. HMAC_CTX_cleanup(&hmac_ctx);
  134. return 0;
  135. }
  136. HMAC_CTX_cleanup(&hmac_ctx);
  137. /* Configure the explicit IV. */
  138. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  139. !tls_ctx->implicit_iv &&
  140. !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  141. return 0;
  142. }
  143. /* Encrypt the input. */
  144. int len;
  145. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
  146. (int)in_len)) {
  147. return 0;
  148. }
  149. total = len;
  150. /* Feed the MAC into the cipher. */
  151. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
  152. (int)mac_len)) {
  153. return 0;
  154. }
  155. total += len;
  156. unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
  157. if (block_size > 1) {
  158. assert(block_size <= 256);
  159. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
  160. /* Compute padding and feed that into the cipher. */
  161. uint8_t padding[256];
  162. unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
  163. memset(padding, padding_len - 1, padding_len);
  164. if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
  165. (int)padding_len)) {
  166. return 0;
  167. }
  168. total += len;
  169. }
  170. if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  171. return 0;
  172. }
  173. total += len;
  174. *out_len = total;
  175. return 1;
  176. }
  177. static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  178. size_t *out_len, size_t max_out_len,
  179. const uint8_t *nonce, size_t nonce_len,
  180. const uint8_t *in, size_t in_len,
  181. const uint8_t *ad, size_t ad_len) {
  182. AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
  183. if (tls_ctx->cipher_ctx.encrypt) {
  184. /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
  185. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_OPERATION);
  186. return 0;
  187. }
  188. if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
  189. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  190. return 0;
  191. }
  192. if (max_out_len < in_len) {
  193. /* This requires that the caller provide space for the MAC, even though it
  194. * will always be removed on return. */
  195. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BUFFER_TOO_SMALL);
  196. return 0;
  197. }
  198. if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
  199. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_NONCE_SIZE);
  200. return 0;
  201. }
  202. if (ad_len != 13 - 2 /* length bytes */) {
  203. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_AD_SIZE);
  204. return 0;
  205. }
  206. if (in_len > INT_MAX) {
  207. /* EVP_CIPHER takes int as input. */
  208. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_TOO_LARGE);
  209. return 0;
  210. }
  211. /* Configure the explicit IV. */
  212. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  213. !tls_ctx->implicit_iv &&
  214. !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
  215. return 0;
  216. }
  217. /* Decrypt to get the plaintext + MAC + padding. */
  218. size_t total = 0;
  219. int len;
  220. if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
  221. return 0;
  222. }
  223. total += len;
  224. if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
  225. return 0;
  226. }
  227. total += len;
  228. assert(total == in_len);
  229. /* Remove CBC padding. Code from here on is timing-sensitive with respect to
  230. * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
  231. int padding_ok;
  232. unsigned data_plus_mac_len, data_len;
  233. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
  234. padding_ok = EVP_tls_cbc_remove_padding(
  235. &data_plus_mac_len, out, total,
  236. EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
  237. (unsigned)HMAC_size(&tls_ctx->hmac_ctx));
  238. /* Publicly invalid. This can be rejected in non-constant time. */
  239. if (padding_ok == 0) {
  240. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  241. return 0;
  242. }
  243. } else {
  244. padding_ok = 1;
  245. data_plus_mac_len = total;
  246. /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
  247. * already been checked against the MAC size at the top of the function. */
  248. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
  249. }
  250. data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
  251. /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the
  252. * first |data_plus_mac_size| bytes after |out| are the plaintext and
  253. * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */
  254. /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
  255. * length for legacy ciphers. */
  256. uint8_t ad_fixed[13];
  257. memcpy(ad_fixed, ad, 11);
  258. ad_fixed[11] = (uint8_t)(data_len >> 8);
  259. ad_fixed[12] = (uint8_t)(data_len & 0xff);
  260. ad_len += 2;
  261. /* Compute the MAC and extract the one in the record. */
  262. uint8_t mac[EVP_MAX_MD_SIZE];
  263. size_t mac_len;
  264. uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
  265. uint8_t *record_mac;
  266. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
  267. EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
  268. if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
  269. ad_fixed, out, data_plus_mac_len, total,
  270. tls_ctx->mac_key, tls_ctx->mac_key_len)) {
  271. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  272. return 0;
  273. }
  274. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  275. record_mac = record_mac_tmp;
  276. EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
  277. } else {
  278. /* We should support the constant-time path for all CBC-mode ciphers
  279. * implemented. */
  280. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
  281. HMAC_CTX hmac_ctx;
  282. HMAC_CTX_init(&hmac_ctx);
  283. unsigned mac_len_u;
  284. if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
  285. !HMAC_Update(&hmac_ctx, ad_fixed, ad_len) ||
  286. !HMAC_Update(&hmac_ctx, out, data_len) ||
  287. !HMAC_Final(&hmac_ctx, mac, &mac_len_u)) {
  288. HMAC_CTX_cleanup(&hmac_ctx);
  289. return 0;
  290. }
  291. mac_len = mac_len_u;
  292. HMAC_CTX_cleanup(&hmac_ctx);
  293. assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
  294. record_mac = &out[data_len];
  295. }
  296. /* Perform the MAC check and the padding check in constant-time. It should be
  297. * safe to simply perform the padding check first, but it would not be under a
  298. * different choice of MAC location on padding failure. See
  299. * EVP_tls_cbc_remove_padding. */
  300. unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
  301. 0);
  302. good &= constant_time_eq_int(padding_ok, 1);
  303. if (!good) {
  304. OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
  305. return 0;
  306. }
  307. /* End of timing-sensitive code. */
  308. *out_len = data_len;
  309. return 1;
  310. }
  311. static int aead_rc4_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  312. size_t key_len, size_t tag_len,
  313. enum evp_aead_direction_t dir) {
  314. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_rc4(), EVP_sha1(),
  315. 0);
  316. }
  317. static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  318. size_t key_len, size_t tag_len,
  319. enum evp_aead_direction_t dir) {
  320. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  321. EVP_sha1(), 0);
  322. }
  323. static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
  324. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  325. enum evp_aead_direction_t dir) {
  326. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  327. EVP_sha1(), 1);
  328. }
  329. static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  330. const uint8_t *key, size_t key_len,
  331. size_t tag_len,
  332. enum evp_aead_direction_t dir) {
  333. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
  334. EVP_sha256(), 0);
  335. }
  336. static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  337. size_t key_len, size_t tag_len,
  338. enum evp_aead_direction_t dir) {
  339. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  340. EVP_sha1(), 0);
  341. }
  342. static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
  343. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  344. enum evp_aead_direction_t dir) {
  345. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  346. EVP_sha1(), 1);
  347. }
  348. static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
  349. const uint8_t *key, size_t key_len,
  350. size_t tag_len,
  351. enum evp_aead_direction_t dir) {
  352. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  353. EVP_sha256(), 0);
  354. }
  355. static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
  356. const uint8_t *key, size_t key_len,
  357. size_t tag_len,
  358. enum evp_aead_direction_t dir) {
  359. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
  360. EVP_sha384(), 0);
  361. }
  362. static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
  363. const uint8_t *key, size_t key_len,
  364. size_t tag_len,
  365. enum evp_aead_direction_t dir) {
  366. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  367. EVP_sha1(), 0);
  368. }
  369. static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
  370. EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
  371. enum evp_aead_direction_t dir) {
  372. return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
  373. EVP_sha1(), 1);
  374. }
  375. static int aead_rc4_sha1_tls_get_rc4_state(const EVP_AEAD_CTX *ctx,
  376. const RC4_KEY **out_key) {
  377. const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
  378. if (EVP_CIPHER_CTX_cipher(&tls_ctx->cipher_ctx) != EVP_rc4()) {
  379. return 0;
  380. }
  381. *out_key = (const RC4_KEY*) tls_ctx->cipher_ctx.cipher_data;
  382. return 1;
  383. }
  384. static const EVP_AEAD aead_rc4_sha1_tls = {
  385. SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + RC4) */
  386. 0, /* nonce len */
  387. SHA_DIGEST_LENGTH, /* overhead */
  388. SHA_DIGEST_LENGTH, /* max tag length */
  389. NULL, /* init */
  390. aead_rc4_sha1_tls_init,
  391. aead_tls_cleanup,
  392. aead_tls_seal,
  393. aead_tls_open,
  394. aead_rc4_sha1_tls_get_rc4_state, /* get_rc4_state */
  395. };
  396. static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
  397. SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
  398. 16, /* nonce len (IV) */
  399. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  400. SHA_DIGEST_LENGTH, /* max tag length */
  401. NULL, /* init */
  402. aead_aes_128_cbc_sha1_tls_init,
  403. aead_tls_cleanup,
  404. aead_tls_seal,
  405. aead_tls_open,
  406. NULL, /* get_rc4_state */
  407. };
  408. static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
  409. SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
  410. 0, /* nonce len */
  411. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  412. SHA_DIGEST_LENGTH, /* max tag length */
  413. NULL, /* init */
  414. aead_aes_128_cbc_sha1_tls_implicit_iv_init,
  415. aead_tls_cleanup,
  416. aead_tls_seal,
  417. aead_tls_open,
  418. NULL, /* get_rc4_state */
  419. };
  420. static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
  421. SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
  422. 16, /* nonce len (IV) */
  423. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  424. SHA_DIGEST_LENGTH, /* max tag length */
  425. NULL, /* init */
  426. aead_aes_128_cbc_sha256_tls_init,
  427. aead_tls_cleanup,
  428. aead_tls_seal,
  429. aead_tls_open,
  430. NULL, /* get_rc4_state */
  431. };
  432. static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
  433. SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
  434. 16, /* nonce len (IV) */
  435. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  436. SHA_DIGEST_LENGTH, /* max tag length */
  437. NULL, /* init */
  438. aead_aes_256_cbc_sha1_tls_init,
  439. aead_tls_cleanup,
  440. aead_tls_seal,
  441. aead_tls_open,
  442. NULL, /* get_rc4_state */
  443. };
  444. static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
  445. SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
  446. 0, /* nonce len */
  447. 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  448. SHA_DIGEST_LENGTH, /* max tag length */
  449. NULL, /* init */
  450. aead_aes_256_cbc_sha1_tls_implicit_iv_init,
  451. aead_tls_cleanup,
  452. aead_tls_seal,
  453. aead_tls_open,
  454. NULL, /* get_rc4_state */
  455. };
  456. static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
  457. SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
  458. 16, /* nonce len (IV) */
  459. 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
  460. SHA_DIGEST_LENGTH, /* max tag length */
  461. NULL, /* init */
  462. aead_aes_256_cbc_sha256_tls_init,
  463. aead_tls_cleanup,
  464. aead_tls_seal,
  465. aead_tls_open,
  466. NULL, /* get_rc4_state */
  467. };
  468. static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
  469. SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
  470. 16, /* nonce len (IV) */
  471. 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
  472. SHA_DIGEST_LENGTH, /* max tag length */
  473. NULL, /* init */
  474. aead_aes_256_cbc_sha384_tls_init,
  475. aead_tls_cleanup,
  476. aead_tls_seal,
  477. aead_tls_open,
  478. NULL, /* get_rc4_state */
  479. };
  480. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
  481. SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
  482. 8, /* nonce len (IV) */
  483. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  484. SHA_DIGEST_LENGTH, /* max tag length */
  485. NULL, /* init */
  486. aead_des_ede3_cbc_sha1_tls_init,
  487. aead_tls_cleanup,
  488. aead_tls_seal,
  489. aead_tls_open,
  490. NULL, /* get_rc4_state */
  491. };
  492. static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
  493. SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
  494. 0, /* nonce len */
  495. 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
  496. SHA_DIGEST_LENGTH, /* max tag length */
  497. NULL, /* init */
  498. aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
  499. aead_tls_cleanup,
  500. aead_tls_seal,
  501. aead_tls_open,
  502. NULL, /* get_rc4_state */
  503. };
  504. const EVP_AEAD *EVP_aead_rc4_sha1_tls(void) { return &aead_rc4_sha1_tls; }
  505. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
  506. return &aead_aes_128_cbc_sha1_tls;
  507. }
  508. const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
  509. return &aead_aes_128_cbc_sha1_tls_implicit_iv;
  510. }
  511. const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
  512. return &aead_aes_128_cbc_sha256_tls;
  513. }
  514. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
  515. return &aead_aes_256_cbc_sha1_tls;
  516. }
  517. const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
  518. return &aead_aes_256_cbc_sha1_tls_implicit_iv;
  519. }
  520. const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
  521. return &aead_aes_256_cbc_sha256_tls;
  522. }
  523. const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
  524. return &aead_aes_256_cbc_sha384_tls;
  525. }
  526. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
  527. return &aead_des_ede3_cbc_sha1_tls;
  528. }
  529. const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
  530. return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
  531. }