No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 
 
 
 
 

1757 líneas
56 KiB

  1. /* ====================================================================
  2. * Copyright (c) 2001-2011 The OpenSSL Project. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in
  13. * the documentation and/or other materials provided with the
  14. * distribution.
  15. *
  16. * 3. All advertising materials mentioning features or use of this
  17. * software must display the following acknowledgment:
  18. * "This product includes software developed by the OpenSSL Project
  19. * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
  20. *
  21. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  22. * endorse or promote products derived from this software without
  23. * prior written permission. For written permission, please contact
  24. * openssl-core@openssl.org.
  25. *
  26. * 5. Products derived from this software may not be called "OpenSSL"
  27. * nor may "OpenSSL" appear in their names without prior written
  28. * permission of the OpenSSL Project.
  29. *
  30. * 6. Redistributions of any form whatsoever must retain the following
  31. * acknowledgment:
  32. * "This product includes software developed by the OpenSSL Project
  33. * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
  34. *
  35. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  36. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  37. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  38. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  39. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  42. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  43. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  44. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  45. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  46. * OF THE POSSIBILITY OF SUCH DAMAGE.
  47. * ==================================================================== */
  48. #include <string.h>
  49. #include <openssl/aead.h>
  50. #include <openssl/aes.h>
  51. #include <openssl/cipher.h>
  52. #include <openssl/cpu.h>
  53. #include <openssl/err.h>
  54. #include <openssl/mem.h>
  55. #include <openssl/nid.h>
  56. #include <openssl/rand.h>
  57. #include <openssl/sha.h>
  58. #include "internal.h"
  59. #include "../internal.h"
  60. #include "../modes/internal.h"
  61. #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
  62. #include <openssl/arm_arch.h>
  63. #endif
  64. OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) /* Unreachable code. */
  65. typedef struct {
  66. union {
  67. double align;
  68. AES_KEY ks;
  69. } ks;
  70. block128_f block;
  71. union {
  72. cbc128_f cbc;
  73. ctr128_f ctr;
  74. } stream;
  75. } EVP_AES_KEY;
  76. typedef struct {
  77. union {
  78. double align;
  79. AES_KEY ks;
  80. } ks; /* AES key schedule to use */
  81. int key_set; /* Set if key initialised */
  82. int iv_set; /* Set if an iv is set */
  83. GCM128_CONTEXT gcm;
  84. uint8_t *iv; /* Temporary IV store */
  85. int ivlen; /* IV length */
  86. int taglen;
  87. int iv_gen; /* It is OK to generate IVs */
  88. ctr128_f ctr;
  89. } EVP_AES_GCM_CTX;
  90. #if !defined(OPENSSL_NO_ASM) && \
  91. (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
  92. #define VPAES
  93. static char vpaes_capable(void) {
  94. return (OPENSSL_ia32cap_P[1] & (1 << (41 - 32))) != 0;
  95. }
  96. #if defined(OPENSSL_X86_64)
  97. #define BSAES
  98. static char bsaes_capable(void) {
  99. return vpaes_capable();
  100. }
  101. #endif
  102. #elif !defined(OPENSSL_NO_ASM) && \
  103. (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))
  104. #if defined(OPENSSL_ARM) && __ARM_MAX_ARCH__ >= 7
  105. #define BSAES
  106. static char bsaes_capable(void) {
  107. return CRYPTO_is_NEON_capable();
  108. }
  109. #endif
  110. #define HWAES
  111. static int hwaes_capable(void) {
  112. return CRYPTO_is_ARMv8_AES_capable();
  113. }
  114. #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE)
  115. #define HWAES
  116. static int hwaes_capable(void) {
  117. return CRYPTO_is_PPC64LE_vcrypto_capable();
  118. }
  119. #endif /* OPENSSL_PPC64LE */
  120. #if defined(BSAES)
  121. /* On platforms where BSAES gets defined (just above), then these functions are
  122. * provided by asm. */
  123. void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  124. const AES_KEY *key, uint8_t ivec[16], int enc);
  125. void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
  126. const AES_KEY *key, const uint8_t ivec[16]);
  127. #else
  128. static char bsaes_capable(void) {
  129. return 0;
  130. }
  131. /* On other platforms, bsaes_capable() will always return false and so the
  132. * following will never be called. */
  133. static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  134. const AES_KEY *key, uint8_t ivec[16], int enc) {
  135. abort();
  136. }
  137. static void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
  138. size_t len, const AES_KEY *key,
  139. const uint8_t ivec[16]) {
  140. abort();
  141. }
  142. #endif
  143. #if defined(VPAES)
  144. /* On platforms where VPAES gets defined (just above), then these functions are
  145. * provided by asm. */
  146. int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
  147. int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
  148. void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  149. void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  150. void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  151. const AES_KEY *key, uint8_t *ivec, int enc);
  152. #else
  153. static char vpaes_capable(void) {
  154. return 0;
  155. }
  156. /* On other platforms, vpaes_capable() will always return false and so the
  157. * following will never be called. */
  158. static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits,
  159. AES_KEY *key) {
  160. abort();
  161. }
  162. static int vpaes_set_decrypt_key(const uint8_t *userKey, int bits,
  163. AES_KEY *key) {
  164. abort();
  165. }
  166. static void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
  167. abort();
  168. }
  169. static void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
  170. abort();
  171. }
  172. static void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  173. const AES_KEY *key, uint8_t *ivec, int enc) {
  174. abort();
  175. }
  176. #endif
  177. #if defined(HWAES)
  178. int aes_hw_set_encrypt_key(const uint8_t *user_key, const int bits,
  179. AES_KEY *key);
  180. int aes_hw_set_decrypt_key(const uint8_t *user_key, const int bits,
  181. AES_KEY *key);
  182. void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  183. void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  184. void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  185. const AES_KEY *key, uint8_t *ivec, const int enc);
  186. void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
  187. const AES_KEY *key, const uint8_t ivec[16]);
  188. #else
  189. /* If HWAES isn't defined then we provide dummy functions for each of the hwaes
  190. * functions. */
  191. static int hwaes_capable(void) {
  192. return 0;
  193. }
  194. static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits,
  195. AES_KEY *key) {
  196. abort();
  197. }
  198. static int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits,
  199. AES_KEY *key) {
  200. abort();
  201. }
  202. static void aes_hw_encrypt(const uint8_t *in, uint8_t *out,
  203. const AES_KEY *key) {
  204. abort();
  205. }
  206. static void aes_hw_decrypt(const uint8_t *in, uint8_t *out,
  207. const AES_KEY *key) {
  208. abort();
  209. }
  210. static void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  211. const AES_KEY *key, uint8_t *ivec, int enc) {
  212. abort();
  213. }
  214. static void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
  215. size_t len, const AES_KEY *key,
  216. const uint8_t ivec[16]) {
  217. abort();
  218. }
  219. #endif
  220. #if !defined(OPENSSL_NO_ASM) && \
  221. (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
  222. int aesni_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
  223. int aesni_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
  224. void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  225. void aesni_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
  226. void aesni_ecb_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  227. const AES_KEY *key, int enc);
  228. void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
  229. const AES_KEY *key, uint8_t *ivec, int enc);
  230. #else
  231. /* On other platforms, aesni_capable() will always return false and so the
  232. * following will never be called. */
  233. static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
  234. abort();
  235. }
  236. static int aesni_set_encrypt_key(const uint8_t *userKey, int bits,
  237. AES_KEY *key) {
  238. abort();
  239. }
  240. static void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out,
  241. size_t blocks, const void *key,
  242. const uint8_t *ivec) {
  243. abort();
  244. }
  245. #endif
  246. static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
  247. const uint8_t *iv, int enc) {
  248. int ret, mode;
  249. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  250. mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
  251. if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) {
  252. if (hwaes_capable()) {
  253. ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  254. dat->block = (block128_f)aes_hw_decrypt;
  255. dat->stream.cbc = NULL;
  256. if (mode == EVP_CIPH_CBC_MODE) {
  257. dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt;
  258. }
  259. } else if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) {
  260. ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  261. dat->block = (block128_f)AES_decrypt;
  262. dat->stream.cbc = (cbc128_f)bsaes_cbc_encrypt;
  263. } else if (vpaes_capable()) {
  264. ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  265. dat->block = (block128_f)vpaes_decrypt;
  266. dat->stream.cbc =
  267. mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL;
  268. } else {
  269. ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  270. dat->block = (block128_f)AES_decrypt;
  271. dat->stream.cbc =
  272. mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL;
  273. }
  274. } else if (hwaes_capable()) {
  275. ret = aes_hw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  276. dat->block = (block128_f)aes_hw_encrypt;
  277. dat->stream.cbc = NULL;
  278. if (mode == EVP_CIPH_CBC_MODE) {
  279. dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt;
  280. } else if (mode == EVP_CIPH_CTR_MODE) {
  281. dat->stream.ctr = (ctr128_f)aes_hw_ctr32_encrypt_blocks;
  282. }
  283. } else if (bsaes_capable() && mode == EVP_CIPH_CTR_MODE) {
  284. ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  285. dat->block = (block128_f)AES_encrypt;
  286. dat->stream.ctr = (ctr128_f)bsaes_ctr32_encrypt_blocks;
  287. } else if (vpaes_capable()) {
  288. ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  289. dat->block = (block128_f)vpaes_encrypt;
  290. dat->stream.cbc =
  291. mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL;
  292. } else {
  293. ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
  294. dat->block = (block128_f)AES_encrypt;
  295. dat->stream.cbc =
  296. mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL;
  297. }
  298. if (ret < 0) {
  299. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED);
  300. return 0;
  301. }
  302. return 1;
  303. }
  304. static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  305. size_t len) {
  306. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  307. if (dat->stream.cbc) {
  308. (*dat->stream.cbc)(in, out, len, &dat->ks, ctx->iv, ctx->encrypt);
  309. } else if (ctx->encrypt) {
  310. CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, dat->block);
  311. } else {
  312. CRYPTO_cbc128_decrypt(in, out, len, &dat->ks, ctx->iv, dat->block);
  313. }
  314. return 1;
  315. }
  316. static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  317. size_t len) {
  318. size_t bl = ctx->cipher->block_size;
  319. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  320. if (len < bl) {
  321. return 1;
  322. }
  323. len -= bl;
  324. for (size_t i = 0; i <= len; i += bl) {
  325. (*dat->block)(in + i, out + i, &dat->ks);
  326. }
  327. return 1;
  328. }
  329. static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  330. size_t len) {
  331. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  332. if (dat->stream.ctr) {
  333. CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks, ctx->iv, ctx->buf,
  334. &ctx->num, dat->stream.ctr);
  335. } else {
  336. CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, ctx->iv, ctx->buf, &ctx->num,
  337. dat->block);
  338. }
  339. return 1;
  340. }
  341. static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  342. size_t len) {
  343. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  344. CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, ctx->iv, &ctx->num, dat->block);
  345. return 1;
  346. }
  347. static char aesni_capable(void);
  348. static ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx,
  349. block128_f *out_block, const uint8_t *key,
  350. size_t key_len) {
  351. if (aesni_capable()) {
  352. aesni_set_encrypt_key(key, key_len * 8, aes_key);
  353. if (gcm_ctx != NULL) {
  354. CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aesni_encrypt);
  355. }
  356. if (out_block) {
  357. *out_block = (block128_f) aesni_encrypt;
  358. }
  359. return (ctr128_f)aesni_ctr32_encrypt_blocks;
  360. }
  361. if (hwaes_capable()) {
  362. aes_hw_set_encrypt_key(key, key_len * 8, aes_key);
  363. if (gcm_ctx != NULL) {
  364. CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aes_hw_encrypt);
  365. }
  366. if (out_block) {
  367. *out_block = (block128_f) aes_hw_encrypt;
  368. }
  369. return (ctr128_f)aes_hw_ctr32_encrypt_blocks;
  370. }
  371. if (bsaes_capable()) {
  372. AES_set_encrypt_key(key, key_len * 8, aes_key);
  373. if (gcm_ctx != NULL) {
  374. CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
  375. }
  376. if (out_block) {
  377. *out_block = (block128_f) AES_encrypt;
  378. }
  379. return (ctr128_f)bsaes_ctr32_encrypt_blocks;
  380. }
  381. if (vpaes_capable()) {
  382. vpaes_set_encrypt_key(key, key_len * 8, aes_key);
  383. if (out_block) {
  384. *out_block = (block128_f) vpaes_encrypt;
  385. }
  386. if (gcm_ctx != NULL) {
  387. CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)vpaes_encrypt);
  388. }
  389. return NULL;
  390. }
  391. AES_set_encrypt_key(key, key_len * 8, aes_key);
  392. if (gcm_ctx != NULL) {
  393. CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
  394. }
  395. if (out_block) {
  396. *out_block = (block128_f) AES_encrypt;
  397. }
  398. return NULL;
  399. }
  400. static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
  401. const uint8_t *iv, int enc) {
  402. EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
  403. if (!iv && !key) {
  404. return 1;
  405. }
  406. if (key) {
  407. gctx->ctr =
  408. aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len);
  409. /* If we have an iv can set it directly, otherwise use saved IV. */
  410. if (iv == NULL && gctx->iv_set) {
  411. iv = gctx->iv;
  412. }
  413. if (iv) {
  414. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
  415. gctx->iv_set = 1;
  416. }
  417. gctx->key_set = 1;
  418. } else {
  419. /* If key set use IV, otherwise copy */
  420. if (gctx->key_set) {
  421. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
  422. } else {
  423. OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen);
  424. }
  425. gctx->iv_set = 1;
  426. gctx->iv_gen = 0;
  427. }
  428. return 1;
  429. }
  430. static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) {
  431. EVP_AES_GCM_CTX *gctx = c->cipher_data;
  432. OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
  433. if (gctx->iv != c->iv) {
  434. OPENSSL_free(gctx->iv);
  435. }
  436. }
  437. /* increment counter (64-bit int) by 1 */
  438. static void ctr64_inc(uint8_t *counter) {
  439. int n = 8;
  440. uint8_t c;
  441. do {
  442. --n;
  443. c = counter[n];
  444. ++c;
  445. counter[n] = c;
  446. if (c) {
  447. return;
  448. }
  449. } while (n);
  450. }
  451. static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) {
  452. EVP_AES_GCM_CTX *gctx = c->cipher_data;
  453. switch (type) {
  454. case EVP_CTRL_INIT:
  455. gctx->key_set = 0;
  456. gctx->iv_set = 0;
  457. gctx->ivlen = c->cipher->iv_len;
  458. gctx->iv = c->iv;
  459. gctx->taglen = -1;
  460. gctx->iv_gen = 0;
  461. return 1;
  462. case EVP_CTRL_GCM_SET_IVLEN:
  463. if (arg <= 0) {
  464. return 0;
  465. }
  466. /* Allocate memory for IV if needed */
  467. if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) {
  468. if (gctx->iv != c->iv) {
  469. OPENSSL_free(gctx->iv);
  470. }
  471. gctx->iv = OPENSSL_malloc(arg);
  472. if (!gctx->iv) {
  473. return 0;
  474. }
  475. }
  476. gctx->ivlen = arg;
  477. return 1;
  478. case EVP_CTRL_GCM_SET_TAG:
  479. if (arg <= 0 || arg > 16 || c->encrypt) {
  480. return 0;
  481. }
  482. OPENSSL_memcpy(c->buf, ptr, arg);
  483. gctx->taglen = arg;
  484. return 1;
  485. case EVP_CTRL_GCM_GET_TAG:
  486. if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) {
  487. return 0;
  488. }
  489. OPENSSL_memcpy(ptr, c->buf, arg);
  490. return 1;
  491. case EVP_CTRL_GCM_SET_IV_FIXED:
  492. /* Special case: -1 length restores whole IV */
  493. if (arg == -1) {
  494. OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen);
  495. gctx->iv_gen = 1;
  496. return 1;
  497. }
  498. /* Fixed field must be at least 4 bytes and invocation field
  499. * at least 8. */
  500. if (arg < 4 || (gctx->ivlen - arg) < 8) {
  501. return 0;
  502. }
  503. if (arg) {
  504. OPENSSL_memcpy(gctx->iv, ptr, arg);
  505. }
  506. if (c->encrypt && !RAND_bytes(gctx->iv + arg, gctx->ivlen - arg)) {
  507. return 0;
  508. }
  509. gctx->iv_gen = 1;
  510. return 1;
  511. case EVP_CTRL_GCM_IV_GEN:
  512. if (gctx->iv_gen == 0 || gctx->key_set == 0) {
  513. return 0;
  514. }
  515. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen);
  516. if (arg <= 0 || arg > gctx->ivlen) {
  517. arg = gctx->ivlen;
  518. }
  519. OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
  520. /* Invocation field will be at least 8 bytes in size and
  521. * so no need to check wrap around or increment more than
  522. * last 8 bytes. */
  523. ctr64_inc(gctx->iv + gctx->ivlen - 8);
  524. gctx->iv_set = 1;
  525. return 1;
  526. case EVP_CTRL_GCM_SET_IV_INV:
  527. if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) {
  528. return 0;
  529. }
  530. OPENSSL_memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
  531. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen);
  532. gctx->iv_set = 1;
  533. return 1;
  534. case EVP_CTRL_COPY: {
  535. EVP_CIPHER_CTX *out = ptr;
  536. EVP_AES_GCM_CTX *gctx_out = out->cipher_data;
  537. if (gctx->iv == c->iv) {
  538. gctx_out->iv = out->iv;
  539. } else {
  540. gctx_out->iv = OPENSSL_malloc(gctx->ivlen);
  541. if (!gctx_out->iv) {
  542. return 0;
  543. }
  544. OPENSSL_memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
  545. }
  546. return 1;
  547. }
  548. default:
  549. return -1;
  550. }
  551. }
  552. static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  553. size_t len) {
  554. EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
  555. /* If not set up, return error */
  556. if (!gctx->key_set) {
  557. return -1;
  558. }
  559. if (!gctx->iv_set) {
  560. return -1;
  561. }
  562. if (in) {
  563. if (out == NULL) {
  564. if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len)) {
  565. return -1;
  566. }
  567. } else if (ctx->encrypt) {
  568. if (gctx->ctr) {
  569. if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len,
  570. gctx->ctr)) {
  571. return -1;
  572. }
  573. } else {
  574. if (!CRYPTO_gcm128_encrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) {
  575. return -1;
  576. }
  577. }
  578. } else {
  579. if (gctx->ctr) {
  580. if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len,
  581. gctx->ctr)) {
  582. return -1;
  583. }
  584. } else {
  585. if (!CRYPTO_gcm128_decrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) {
  586. return -1;
  587. }
  588. }
  589. }
  590. return len;
  591. } else {
  592. if (!ctx->encrypt) {
  593. if (gctx->taglen < 0 ||
  594. !CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen)) {
  595. return -1;
  596. }
  597. gctx->iv_set = 0;
  598. return 0;
  599. }
  600. CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
  601. gctx->taglen = 16;
  602. /* Don't reuse the IV */
  603. gctx->iv_set = 0;
  604. return 0;
  605. }
  606. }
  607. static const EVP_CIPHER aes_128_cbc = {
  608. NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */,
  609. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  610. NULL /* app_data */, aes_init_key, aes_cbc_cipher,
  611. NULL /* cleanup */, NULL /* ctrl */};
  612. static const EVP_CIPHER aes_128_ctr = {
  613. NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */,
  614. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  615. NULL /* app_data */, aes_init_key, aes_ctr_cipher,
  616. NULL /* cleanup */, NULL /* ctrl */};
  617. static const EVP_CIPHER aes_128_ecb = {
  618. NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */,
  619. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  620. NULL /* app_data */, aes_init_key, aes_ecb_cipher,
  621. NULL /* cleanup */, NULL /* ctrl */};
  622. static const EVP_CIPHER aes_128_ofb = {
  623. NID_aes_128_ofb128, 1 /* block_size */, 16 /* key_size */,
  624. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE,
  625. NULL /* app_data */, aes_init_key, aes_ofb_cipher,
  626. NULL /* cleanup */, NULL /* ctrl */};
  627. static const EVP_CIPHER aes_128_gcm = {
  628. NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */,
  629. sizeof(EVP_AES_GCM_CTX),
  630. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  631. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
  632. EVP_CIPH_FLAG_AEAD_CIPHER,
  633. NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  634. aes_gcm_ctrl};
  635. static const EVP_CIPHER aes_192_cbc = {
  636. NID_aes_192_cbc, 16 /* block_size */, 24 /* key_size */,
  637. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  638. NULL /* app_data */, aes_init_key, aes_cbc_cipher,
  639. NULL /* cleanup */, NULL /* ctrl */};
  640. static const EVP_CIPHER aes_192_ctr = {
  641. NID_aes_192_ctr, 1 /* block_size */, 24 /* key_size */,
  642. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  643. NULL /* app_data */, aes_init_key, aes_ctr_cipher,
  644. NULL /* cleanup */, NULL /* ctrl */};
  645. static const EVP_CIPHER aes_192_ecb = {
  646. NID_aes_192_ecb, 16 /* block_size */, 24 /* key_size */,
  647. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  648. NULL /* app_data */, aes_init_key, aes_ecb_cipher,
  649. NULL /* cleanup */, NULL /* ctrl */};
  650. static const EVP_CIPHER aes_192_gcm = {
  651. NID_aes_192_gcm, 1 /* block_size */, 24 /* key_size */, 12 /* iv_len */,
  652. sizeof(EVP_AES_GCM_CTX),
  653. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  654. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
  655. EVP_CIPH_FLAG_AEAD_CIPHER,
  656. NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  657. aes_gcm_ctrl};
  658. static const EVP_CIPHER aes_256_cbc = {
  659. NID_aes_256_cbc, 16 /* block_size */, 32 /* key_size */,
  660. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  661. NULL /* app_data */, aes_init_key, aes_cbc_cipher,
  662. NULL /* cleanup */, NULL /* ctrl */};
  663. static const EVP_CIPHER aes_256_ctr = {
  664. NID_aes_256_ctr, 1 /* block_size */, 32 /* key_size */,
  665. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  666. NULL /* app_data */, aes_init_key, aes_ctr_cipher,
  667. NULL /* cleanup */, NULL /* ctrl */};
  668. static const EVP_CIPHER aes_256_ecb = {
  669. NID_aes_256_ecb, 16 /* block_size */, 32 /* key_size */,
  670. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  671. NULL /* app_data */, aes_init_key, aes_ecb_cipher,
  672. NULL /* cleanup */, NULL /* ctrl */};
  673. static const EVP_CIPHER aes_256_ofb = {
  674. NID_aes_256_ofb128, 1 /* block_size */, 32 /* key_size */,
  675. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE,
  676. NULL /* app_data */, aes_init_key, aes_ofb_cipher,
  677. NULL /* cleanup */, NULL /* ctrl */};
  678. static const EVP_CIPHER aes_256_gcm = {
  679. NID_aes_256_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */,
  680. sizeof(EVP_AES_GCM_CTX),
  681. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  682. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
  683. EVP_CIPH_FLAG_AEAD_CIPHER,
  684. NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  685. aes_gcm_ctrl};
  686. #if !defined(OPENSSL_NO_ASM) && \
  687. (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
  688. /* AES-NI section. */
  689. static char aesni_capable(void) {
  690. return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0;
  691. }
  692. static int aesni_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
  693. const uint8_t *iv, int enc) {
  694. int ret, mode;
  695. EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
  696. mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
  697. if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) {
  698. ret = aesni_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
  699. dat->block = (block128_f)aesni_decrypt;
  700. dat->stream.cbc =
  701. mode == EVP_CIPH_CBC_MODE ? (cbc128_f)aesni_cbc_encrypt : NULL;
  702. } else {
  703. ret = aesni_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
  704. dat->block = (block128_f)aesni_encrypt;
  705. if (mode == EVP_CIPH_CBC_MODE) {
  706. dat->stream.cbc = (cbc128_f)aesni_cbc_encrypt;
  707. } else if (mode == EVP_CIPH_CTR_MODE) {
  708. dat->stream.ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
  709. } else {
  710. dat->stream.cbc = NULL;
  711. }
  712. }
  713. if (ret < 0) {
  714. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED);
  715. return 0;
  716. }
  717. return 1;
  718. }
  719. static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
  720. const uint8_t *in, size_t len) {
  721. aesni_cbc_encrypt(in, out, len, ctx->cipher_data, ctx->iv, ctx->encrypt);
  722. return 1;
  723. }
  724. static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
  725. const uint8_t *in, size_t len) {
  726. size_t bl = ctx->cipher->block_size;
  727. if (len < bl) {
  728. return 1;
  729. }
  730. aesni_ecb_encrypt(in, out, len, ctx->cipher_data, ctx->encrypt);
  731. return 1;
  732. }
  733. static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
  734. const uint8_t *iv, int enc) {
  735. EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
  736. if (!iv && !key) {
  737. return 1;
  738. }
  739. if (key) {
  740. aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
  741. CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt);
  742. gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
  743. /* If we have an iv can set it directly, otherwise use
  744. * saved IV. */
  745. if (iv == NULL && gctx->iv_set) {
  746. iv = gctx->iv;
  747. }
  748. if (iv) {
  749. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
  750. gctx->iv_set = 1;
  751. }
  752. gctx->key_set = 1;
  753. } else {
  754. /* If key set use IV, otherwise copy */
  755. if (gctx->key_set) {
  756. CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
  757. } else {
  758. OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen);
  759. }
  760. gctx->iv_set = 1;
  761. gctx->iv_gen = 0;
  762. }
  763. return 1;
  764. }
  765. static const EVP_CIPHER aesni_128_cbc = {
  766. NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */,
  767. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  768. NULL /* app_data */, aesni_init_key, aesni_cbc_cipher,
  769. NULL /* cleanup */, NULL /* ctrl */};
  770. static const EVP_CIPHER aesni_128_ctr = {
  771. NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */,
  772. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  773. NULL /* app_data */, aesni_init_key, aes_ctr_cipher,
  774. NULL /* cleanup */, NULL /* ctrl */};
  775. static const EVP_CIPHER aesni_128_ecb = {
  776. NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */,
  777. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  778. NULL /* app_data */, aesni_init_key, aesni_ecb_cipher,
  779. NULL /* cleanup */, NULL /* ctrl */};
  780. static const EVP_CIPHER aesni_128_ofb = {
  781. NID_aes_128_ofb128, 1 /* block_size */, 16 /* key_size */,
  782. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE,
  783. NULL /* app_data */, aesni_init_key, aes_ofb_cipher,
  784. NULL /* cleanup */, NULL /* ctrl */};
  785. static const EVP_CIPHER aesni_128_gcm = {
  786. NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */,
  787. sizeof(EVP_AES_GCM_CTX),
  788. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  789. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
  790. EVP_CIPH_FLAG_AEAD_CIPHER,
  791. NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  792. aes_gcm_ctrl};
  793. static const EVP_CIPHER aesni_192_cbc = {
  794. NID_aes_192_cbc, 16 /* block_size */, 24 /* key_size */,
  795. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  796. NULL /* app_data */, aesni_init_key, aesni_cbc_cipher,
  797. NULL /* cleanup */, NULL /* ctrl */};
  798. static const EVP_CIPHER aesni_192_ctr = {
  799. NID_aes_192_ctr, 1 /* block_size */, 24 /* key_size */,
  800. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  801. NULL /* app_data */, aesni_init_key, aes_ctr_cipher,
  802. NULL /* cleanup */, NULL /* ctrl */};
  803. static const EVP_CIPHER aesni_192_ecb = {
  804. NID_aes_192_ecb, 16 /* block_size */, 24 /* key_size */,
  805. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  806. NULL /* app_data */, aesni_init_key, aesni_ecb_cipher,
  807. NULL /* cleanup */, NULL /* ctrl */};
  808. static const EVP_CIPHER aesni_192_gcm = {
  809. NID_aes_192_gcm, 1 /* block_size */, 24 /* key_size */, 12 /* iv_len */,
  810. sizeof(EVP_AES_GCM_CTX),
  811. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  812. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT |
  813. EVP_CIPH_FLAG_AEAD_CIPHER,
  814. NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  815. aes_gcm_ctrl};
  816. static const EVP_CIPHER aesni_256_cbc = {
  817. NID_aes_256_cbc, 16 /* block_size */, 32 /* key_size */,
  818. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE,
  819. NULL /* app_data */, aesni_init_key, aesni_cbc_cipher,
  820. NULL /* cleanup */, NULL /* ctrl */};
  821. static const EVP_CIPHER aesni_256_ctr = {
  822. NID_aes_256_ctr, 1 /* block_size */, 32 /* key_size */,
  823. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE,
  824. NULL /* app_data */, aesni_init_key, aes_ctr_cipher,
  825. NULL /* cleanup */, NULL /* ctrl */};
  826. static const EVP_CIPHER aesni_256_ecb = {
  827. NID_aes_256_ecb, 16 /* block_size */, 32 /* key_size */,
  828. 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE,
  829. NULL /* app_data */, aesni_init_key, aesni_ecb_cipher,
  830. NULL /* cleanup */, NULL /* ctrl */};
  831. static const EVP_CIPHER aesni_256_ofb = {
  832. NID_aes_256_ofb128, 1 /* block_size */, 32 /* key_size */,
  833. 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE,
  834. NULL /* app_data */, aesni_init_key, aes_ofb_cipher,
  835. NULL /* cleanup */, NULL /* ctrl */};
  836. static const EVP_CIPHER aesni_256_gcm = {
  837. NID_aes_256_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */,
  838. sizeof(EVP_AES_GCM_CTX),
  839. EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER |
  840. EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_CUSTOM_COPY |
  841. EVP_CIPH_FLAG_AEAD_CIPHER,
  842. NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup,
  843. aes_gcm_ctrl};
  844. #define EVP_CIPHER_FUNCTION(keybits, mode) \
  845. const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \
  846. if (aesni_capable()) { \
  847. return &aesni_##keybits##_##mode; \
  848. } else { \
  849. return &aes_##keybits##_##mode; \
  850. } \
  851. }
  852. #else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */
  853. static char aesni_capable(void) {
  854. return 0;
  855. }
  856. #define EVP_CIPHER_FUNCTION(keybits, mode) \
  857. const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \
  858. return &aes_##keybits##_##mode; \
  859. }
  860. #endif
  861. EVP_CIPHER_FUNCTION(128, cbc)
  862. EVP_CIPHER_FUNCTION(128, ctr)
  863. EVP_CIPHER_FUNCTION(128, ecb)
  864. EVP_CIPHER_FUNCTION(128, ofb)
  865. EVP_CIPHER_FUNCTION(128, gcm)
  866. EVP_CIPHER_FUNCTION(192, cbc)
  867. EVP_CIPHER_FUNCTION(192, ctr)
  868. EVP_CIPHER_FUNCTION(192, ecb)
  869. EVP_CIPHER_FUNCTION(192, gcm)
  870. EVP_CIPHER_FUNCTION(256, cbc)
  871. EVP_CIPHER_FUNCTION(256, ctr)
  872. EVP_CIPHER_FUNCTION(256, ecb)
  873. EVP_CIPHER_FUNCTION(256, ofb)
  874. EVP_CIPHER_FUNCTION(256, gcm)
  875. #define EVP_AEAD_AES_GCM_TAG_LEN 16
  876. struct aead_aes_gcm_ctx {
  877. union {
  878. double align;
  879. AES_KEY ks;
  880. } ks;
  881. GCM128_CONTEXT gcm;
  882. ctr128_f ctr;
  883. uint8_t tag_len;
  884. };
  885. static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  886. size_t key_len, size_t tag_len) {
  887. struct aead_aes_gcm_ctx *gcm_ctx;
  888. const size_t key_bits = key_len * 8;
  889. if (key_bits != 128 && key_bits != 256) {
  890. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
  891. return 0; /* EVP_AEAD_CTX_init should catch this. */
  892. }
  893. if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
  894. tag_len = EVP_AEAD_AES_GCM_TAG_LEN;
  895. }
  896. if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) {
  897. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
  898. return 0;
  899. }
  900. gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_ctx));
  901. if (gcm_ctx == NULL) {
  902. return 0;
  903. }
  904. gcm_ctx->ctr =
  905. aes_ctr_set_key(&gcm_ctx->ks.ks, &gcm_ctx->gcm, NULL, key, key_len);
  906. gcm_ctx->tag_len = tag_len;
  907. ctx->aead_state = gcm_ctx;
  908. return 1;
  909. }
  910. static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) {
  911. struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
  912. OPENSSL_cleanse(gcm_ctx, sizeof(struct aead_aes_gcm_ctx));
  913. OPENSSL_free(gcm_ctx);
  914. }
  915. static int aead_aes_gcm_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  916. size_t *out_len, size_t max_out_len,
  917. const uint8_t *nonce, size_t nonce_len,
  918. const uint8_t *in, size_t in_len,
  919. const uint8_t *ad, size_t ad_len) {
  920. const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
  921. GCM128_CONTEXT gcm;
  922. if (in_len + gcm_ctx->tag_len < in_len) {
  923. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  924. return 0;
  925. }
  926. if (max_out_len < in_len + gcm_ctx->tag_len) {
  927. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  928. return 0;
  929. }
  930. const AES_KEY *key = &gcm_ctx->ks.ks;
  931. OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
  932. CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len);
  933. if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
  934. return 0;
  935. }
  936. if (gcm_ctx->ctr) {
  937. if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, in, out, in_len,
  938. gcm_ctx->ctr)) {
  939. return 0;
  940. }
  941. } else {
  942. if (!CRYPTO_gcm128_encrypt(&gcm, key, in, out, in_len)) {
  943. return 0;
  944. }
  945. }
  946. CRYPTO_gcm128_tag(&gcm, out + in_len, gcm_ctx->tag_len);
  947. *out_len = in_len + gcm_ctx->tag_len;
  948. return 1;
  949. }
  950. static int aead_aes_gcm_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  951. size_t *out_len, size_t max_out_len,
  952. const uint8_t *nonce, size_t nonce_len,
  953. const uint8_t *in, size_t in_len,
  954. const uint8_t *ad, size_t ad_len) {
  955. const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
  956. uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN];
  957. size_t plaintext_len;
  958. GCM128_CONTEXT gcm;
  959. if (in_len < gcm_ctx->tag_len) {
  960. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  961. return 0;
  962. }
  963. plaintext_len = in_len - gcm_ctx->tag_len;
  964. if (max_out_len < plaintext_len) {
  965. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  966. return 0;
  967. }
  968. const AES_KEY *key = &gcm_ctx->ks.ks;
  969. OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
  970. CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len);
  971. if (!CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
  972. return 0;
  973. }
  974. if (gcm_ctx->ctr) {
  975. if (!CRYPTO_gcm128_decrypt_ctr32(&gcm, key, in, out,
  976. in_len - gcm_ctx->tag_len, gcm_ctx->ctr)) {
  977. return 0;
  978. }
  979. } else {
  980. if (!CRYPTO_gcm128_decrypt(&gcm, key, in, out, in_len - gcm_ctx->tag_len)) {
  981. return 0;
  982. }
  983. }
  984. CRYPTO_gcm128_tag(&gcm, tag, gcm_ctx->tag_len);
  985. if (CRYPTO_memcmp(tag, in + plaintext_len, gcm_ctx->tag_len) != 0) {
  986. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  987. return 0;
  988. }
  989. *out_len = plaintext_len;
  990. return 1;
  991. }
  992. static const EVP_AEAD aead_aes_128_gcm = {
  993. 16, /* key len */
  994. 12, /* nonce len */
  995. EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
  996. EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
  997. aead_aes_gcm_init,
  998. NULL, /* init_with_direction */
  999. aead_aes_gcm_cleanup,
  1000. aead_aes_gcm_seal,
  1001. aead_aes_gcm_open,
  1002. NULL, /* get_iv */
  1003. };
  1004. static const EVP_AEAD aead_aes_256_gcm = {
  1005. 32, /* key len */
  1006. 12, /* nonce len */
  1007. EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
  1008. EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
  1009. aead_aes_gcm_init,
  1010. NULL, /* init_with_direction */
  1011. aead_aes_gcm_cleanup,
  1012. aead_aes_gcm_seal,
  1013. aead_aes_gcm_open,
  1014. NULL, /* get_iv */
  1015. };
  1016. const EVP_AEAD *EVP_aead_aes_128_gcm(void) { return &aead_aes_128_gcm; }
  1017. const EVP_AEAD *EVP_aead_aes_256_gcm(void) { return &aead_aes_256_gcm; }
  1018. #define EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN SHA256_DIGEST_LENGTH
  1019. #define EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN 12
  1020. struct aead_aes_ctr_hmac_sha256_ctx {
  1021. union {
  1022. double align;
  1023. AES_KEY ks;
  1024. } ks;
  1025. ctr128_f ctr;
  1026. block128_f block;
  1027. SHA256_CTX inner_init_state;
  1028. SHA256_CTX outer_init_state;
  1029. uint8_t tag_len;
  1030. };
  1031. static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer,
  1032. const uint8_t hmac_key[32]) {
  1033. static const size_t hmac_key_len = 32;
  1034. uint8_t block[SHA256_CBLOCK];
  1035. OPENSSL_memcpy(block, hmac_key, hmac_key_len);
  1036. OPENSSL_memset(block + hmac_key_len, 0x36, sizeof(block) - hmac_key_len);
  1037. unsigned i;
  1038. for (i = 0; i < hmac_key_len; i++) {
  1039. block[i] ^= 0x36;
  1040. }
  1041. SHA256_Init(out_inner);
  1042. SHA256_Update(out_inner, block, sizeof(block));
  1043. OPENSSL_memset(block + hmac_key_len, 0x5c, sizeof(block) - hmac_key_len);
  1044. for (i = 0; i < hmac_key_len; i++) {
  1045. block[i] ^= (0x36 ^ 0x5c);
  1046. }
  1047. SHA256_Init(out_outer);
  1048. SHA256_Update(out_outer, block, sizeof(block));
  1049. }
  1050. static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  1051. size_t key_len, size_t tag_len) {
  1052. struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx;
  1053. static const size_t hmac_key_len = 32;
  1054. if (key_len < hmac_key_len) {
  1055. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
  1056. return 0; /* EVP_AEAD_CTX_init should catch this. */
  1057. }
  1058. const size_t aes_key_len = key_len - hmac_key_len;
  1059. if (aes_key_len != 16 && aes_key_len != 32) {
  1060. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
  1061. return 0; /* EVP_AEAD_CTX_init should catch this. */
  1062. }
  1063. if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
  1064. tag_len = EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN;
  1065. }
  1066. if (tag_len > EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN) {
  1067. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
  1068. return 0;
  1069. }
  1070. aes_ctx = OPENSSL_malloc(sizeof(struct aead_aes_ctr_hmac_sha256_ctx));
  1071. if (aes_ctx == NULL) {
  1072. OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
  1073. return 0;
  1074. }
  1075. aes_ctx->ctr =
  1076. aes_ctr_set_key(&aes_ctx->ks.ks, NULL, &aes_ctx->block, key, aes_key_len);
  1077. aes_ctx->tag_len = tag_len;
  1078. hmac_init(&aes_ctx->inner_init_state, &aes_ctx->outer_init_state,
  1079. key + aes_key_len);
  1080. ctx->aead_state = aes_ctx;
  1081. return 1;
  1082. }
  1083. static void aead_aes_ctr_hmac_sha256_cleanup(EVP_AEAD_CTX *ctx) {
  1084. struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state;
  1085. OPENSSL_cleanse(aes_ctx, sizeof(struct aead_aes_ctr_hmac_sha256_ctx));
  1086. OPENSSL_free(aes_ctx);
  1087. }
  1088. static void hmac_update_uint64(SHA256_CTX *sha256, uint64_t value) {
  1089. unsigned i;
  1090. uint8_t bytes[8];
  1091. for (i = 0; i < sizeof(bytes); i++) {
  1092. bytes[i] = value & 0xff;
  1093. value >>= 8;
  1094. }
  1095. SHA256_Update(sha256, bytes, sizeof(bytes));
  1096. }
  1097. static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH],
  1098. const SHA256_CTX *inner_init_state,
  1099. const SHA256_CTX *outer_init_state,
  1100. const uint8_t *ad, size_t ad_len,
  1101. const uint8_t *nonce, const uint8_t *ciphertext,
  1102. size_t ciphertext_len) {
  1103. SHA256_CTX sha256;
  1104. OPENSSL_memcpy(&sha256, inner_init_state, sizeof(sha256));
  1105. hmac_update_uint64(&sha256, ad_len);
  1106. hmac_update_uint64(&sha256, ciphertext_len);
  1107. SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN);
  1108. SHA256_Update(&sha256, ad, ad_len);
  1109. /* Pad with zeros to the end of the SHA-256 block. */
  1110. const unsigned num_padding =
  1111. (SHA256_CBLOCK - ((sizeof(uint64_t)*2 +
  1112. EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) %
  1113. SHA256_CBLOCK)) %
  1114. SHA256_CBLOCK;
  1115. uint8_t padding[SHA256_CBLOCK];
  1116. OPENSSL_memset(padding, 0, num_padding);
  1117. SHA256_Update(&sha256, padding, num_padding);
  1118. SHA256_Update(&sha256, ciphertext, ciphertext_len);
  1119. uint8_t inner_digest[SHA256_DIGEST_LENGTH];
  1120. SHA256_Final(inner_digest, &sha256);
  1121. OPENSSL_memcpy(&sha256, outer_init_state, sizeof(sha256));
  1122. SHA256_Update(&sha256, inner_digest, sizeof(inner_digest));
  1123. SHA256_Final(out, &sha256);
  1124. }
  1125. static void aead_aes_ctr_hmac_sha256_crypt(
  1126. const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out,
  1127. const uint8_t *in, size_t len, const uint8_t *nonce) {
  1128. /* Since the AEAD operation is one-shot, keeping a buffer of unused keystream
  1129. * bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. */
  1130. uint8_t partial_block_buffer[AES_BLOCK_SIZE];
  1131. unsigned partial_block_offset = 0;
  1132. OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer));
  1133. uint8_t counter[AES_BLOCK_SIZE];
  1134. OPENSSL_memcpy(counter, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN);
  1135. OPENSSL_memset(counter + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN, 0, 4);
  1136. if (aes_ctx->ctr) {
  1137. CRYPTO_ctr128_encrypt_ctr32(in, out, len, &aes_ctx->ks.ks, counter,
  1138. partial_block_buffer, &partial_block_offset,
  1139. aes_ctx->ctr);
  1140. } else {
  1141. CRYPTO_ctr128_encrypt(in, out, len, &aes_ctx->ks.ks, counter,
  1142. partial_block_buffer, &partial_block_offset,
  1143. aes_ctx->block);
  1144. }
  1145. }
  1146. static int aead_aes_ctr_hmac_sha256_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  1147. size_t *out_len, size_t max_out_len,
  1148. const uint8_t *nonce, size_t nonce_len,
  1149. const uint8_t *in, size_t in_len,
  1150. const uint8_t *ad, size_t ad_len) {
  1151. const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state;
  1152. const uint64_t in_len_64 = in_len;
  1153. if (in_len + aes_ctx->tag_len < in_len ||
  1154. /* This input is so large it would overflow the 32-bit block counter. */
  1155. in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) {
  1156. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  1157. return 0;
  1158. }
  1159. if (max_out_len < in_len + aes_ctx->tag_len) {
  1160. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  1161. return 0;
  1162. }
  1163. if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) {
  1164. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
  1165. return 0;
  1166. }
  1167. aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce);
  1168. uint8_t hmac_result[SHA256_DIGEST_LENGTH];
  1169. hmac_calculate(hmac_result, &aes_ctx->inner_init_state,
  1170. &aes_ctx->outer_init_state, ad, ad_len, nonce, out, in_len);
  1171. OPENSSL_memcpy(out + in_len, hmac_result, aes_ctx->tag_len);
  1172. *out_len = in_len + aes_ctx->tag_len;
  1173. return 1;
  1174. }
  1175. static int aead_aes_ctr_hmac_sha256_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  1176. size_t *out_len, size_t max_out_len,
  1177. const uint8_t *nonce, size_t nonce_len,
  1178. const uint8_t *in, size_t in_len,
  1179. const uint8_t *ad, size_t ad_len) {
  1180. const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state;
  1181. size_t plaintext_len;
  1182. if (in_len < aes_ctx->tag_len) {
  1183. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  1184. return 0;
  1185. }
  1186. plaintext_len = in_len - aes_ctx->tag_len;
  1187. if (max_out_len < plaintext_len) {
  1188. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  1189. return 0;
  1190. }
  1191. if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) {
  1192. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
  1193. return 0;
  1194. }
  1195. uint8_t hmac_result[SHA256_DIGEST_LENGTH];
  1196. hmac_calculate(hmac_result, &aes_ctx->inner_init_state,
  1197. &aes_ctx->outer_init_state, ad, ad_len, nonce, in,
  1198. plaintext_len);
  1199. if (CRYPTO_memcmp(hmac_result, in + plaintext_len, aes_ctx->tag_len) != 0) {
  1200. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  1201. return 0;
  1202. }
  1203. aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, plaintext_len, nonce);
  1204. *out_len = plaintext_len;
  1205. return 1;
  1206. }
  1207. static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = {
  1208. 16 /* AES key */ + 32 /* HMAC key */,
  1209. 12, /* nonce length */
  1210. EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */
  1211. EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */
  1212. aead_aes_ctr_hmac_sha256_init,
  1213. NULL /* init_with_direction */,
  1214. aead_aes_ctr_hmac_sha256_cleanup,
  1215. aead_aes_ctr_hmac_sha256_seal,
  1216. aead_aes_ctr_hmac_sha256_open,
  1217. NULL /* get_iv */,
  1218. };
  1219. static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = {
  1220. 32 /* AES key */ + 32 /* HMAC key */,
  1221. 12, /* nonce length */
  1222. EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */
  1223. EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */
  1224. aead_aes_ctr_hmac_sha256_init,
  1225. NULL /* init_with_direction */,
  1226. aead_aes_ctr_hmac_sha256_cleanup,
  1227. aead_aes_ctr_hmac_sha256_seal,
  1228. aead_aes_ctr_hmac_sha256_open,
  1229. NULL /* get_iv */,
  1230. };
  1231. const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void) {
  1232. return &aead_aes_128_ctr_hmac_sha256;
  1233. }
  1234. const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void) {
  1235. return &aead_aes_256_ctr_hmac_sha256;
  1236. }
  1237. #if !defined(OPENSSL_SMALL)
  1238. #define EVP_AEAD_AES_GCM_SIV_TAG_LEN 16
  1239. struct aead_aes_gcm_siv_ctx {
  1240. union {
  1241. double align;
  1242. AES_KEY ks;
  1243. } ks;
  1244. block128_f kgk_block;
  1245. unsigned is_256:1;
  1246. };
  1247. static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
  1248. size_t key_len, size_t tag_len) {
  1249. const size_t key_bits = key_len * 8;
  1250. if (key_bits != 128 && key_bits != 256) {
  1251. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
  1252. return 0; /* EVP_AEAD_CTX_init should catch this. */
  1253. }
  1254. if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
  1255. tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN;
  1256. }
  1257. if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
  1258. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
  1259. return 0;
  1260. }
  1261. struct aead_aes_gcm_siv_ctx *gcm_siv_ctx =
  1262. OPENSSL_malloc(sizeof(struct aead_aes_gcm_siv_ctx));
  1263. if (gcm_siv_ctx == NULL) {
  1264. return 0;
  1265. }
  1266. OPENSSL_memset(gcm_siv_ctx, 0, sizeof(struct aead_aes_gcm_siv_ctx));
  1267. if (aesni_capable()) {
  1268. aesni_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks);
  1269. gcm_siv_ctx->kgk_block = (block128_f)aesni_encrypt;
  1270. } else if (hwaes_capable()) {
  1271. aes_hw_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks);
  1272. gcm_siv_ctx->kgk_block = (block128_f)aes_hw_encrypt;
  1273. } else if (vpaes_capable()) {
  1274. vpaes_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks);
  1275. gcm_siv_ctx->kgk_block = (block128_f)vpaes_encrypt;
  1276. } else {
  1277. AES_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks);
  1278. gcm_siv_ctx->kgk_block = (block128_f)AES_encrypt;
  1279. }
  1280. gcm_siv_ctx->is_256 = (key_len == 32);
  1281. ctx->aead_state = gcm_siv_ctx;
  1282. return 1;
  1283. }
  1284. static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) {
  1285. struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state;
  1286. OPENSSL_cleanse(gcm_siv_ctx, sizeof(struct aead_aes_gcm_siv_ctx));
  1287. OPENSSL_free(gcm_siv_ctx);
  1288. }
  1289. /* gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from
  1290. * |in| to |out|, using the block function |enc_block| with |key| in counter
  1291. * mode, starting at |initial_counter|. This differs from the traditional
  1292. * counter mode code in that the counter is handled little-endian, only the
  1293. * first four bytes are used and the GCM-SIV tweak to the final byte is
  1294. * applied. The |in| and |out| pointers may be equal but otherwise must not
  1295. * alias. */
  1296. static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len,
  1297. const uint8_t initial_counter[AES_BLOCK_SIZE],
  1298. block128_f enc_block, const AES_KEY *key) {
  1299. union {
  1300. uint32_t w[4];
  1301. uint8_t c[16];
  1302. } counter;
  1303. OPENSSL_memcpy(counter.c, initial_counter, AES_BLOCK_SIZE);
  1304. counter.c[15] |= 0x80;
  1305. for (size_t done = 0; done < in_len;) {
  1306. uint8_t keystream[AES_BLOCK_SIZE];
  1307. enc_block(counter.c, keystream, key);
  1308. counter.w[0]++;
  1309. size_t todo = AES_BLOCK_SIZE;
  1310. if (in_len - done < todo) {
  1311. todo = in_len - done;
  1312. }
  1313. for (size_t i = 0; i < todo; i++) {
  1314. out[done + i] = keystream[i] ^ in[done + i];
  1315. }
  1316. done += todo;
  1317. }
  1318. }
  1319. /* gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and
  1320. * AD. The result is written to |out_tag|. */
  1321. static void gcm_siv_polyval(uint8_t out_tag[16], const uint8_t *in,
  1322. size_t in_len, const uint8_t *ad, size_t ad_len,
  1323. const uint8_t auth_key[16]) {
  1324. struct polyval_ctx polyval_ctx;
  1325. CRYPTO_POLYVAL_init(&polyval_ctx, auth_key);
  1326. CRYPTO_POLYVAL_update_blocks(&polyval_ctx, ad, ad_len & ~15);
  1327. uint8_t scratch[16];
  1328. if (ad_len & 15) {
  1329. OPENSSL_memset(scratch, 0, sizeof(scratch));
  1330. OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15);
  1331. CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch));
  1332. }
  1333. CRYPTO_POLYVAL_update_blocks(&polyval_ctx, in, in_len & ~15);
  1334. if (in_len & 15) {
  1335. OPENSSL_memset(scratch, 0, sizeof(scratch));
  1336. OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15);
  1337. CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch));
  1338. }
  1339. union {
  1340. uint8_t c[16];
  1341. struct {
  1342. uint64_t ad;
  1343. uint64_t in;
  1344. } bitlens;
  1345. } length_block;
  1346. length_block.bitlens.ad = ad_len * 8;
  1347. length_block.bitlens.in = in_len * 8;
  1348. CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block.c,
  1349. sizeof(length_block));
  1350. CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag);
  1351. out_tag[15] &= 0x7f;
  1352. }
  1353. /* gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. */
  1354. struct gcm_siv_record_keys {
  1355. uint8_t auth_key[16];
  1356. union {
  1357. double align;
  1358. AES_KEY ks;
  1359. } enc_key;
  1360. block128_f enc_block;
  1361. };
  1362. /* gcm_siv_keys calculates the keys for a specific GCM-SIV record with the
  1363. * given nonce and writes them to |*out_keys|. */
  1364. static void gcm_siv_keys(
  1365. const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx,
  1366. struct gcm_siv_record_keys *out_keys,
  1367. const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_TAG_LEN]) {
  1368. const AES_KEY *const key = &gcm_siv_ctx->ks.ks;
  1369. gcm_siv_ctx->kgk_block(nonce, out_keys->auth_key, key);
  1370. if (gcm_siv_ctx->is_256) {
  1371. uint8_t record_enc_key[32];
  1372. gcm_siv_ctx->kgk_block(out_keys->auth_key, record_enc_key + 16, key);
  1373. gcm_siv_ctx->kgk_block(record_enc_key + 16, record_enc_key, key);
  1374. aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block,
  1375. record_enc_key, sizeof(record_enc_key));
  1376. } else {
  1377. uint8_t record_enc_key[16];
  1378. gcm_siv_ctx->kgk_block(out_keys->auth_key, record_enc_key, key);
  1379. aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block,
  1380. record_enc_key, sizeof(record_enc_key));
  1381. }
  1382. }
  1383. static int aead_aes_gcm_siv_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
  1384. size_t *out_len, size_t max_out_len,
  1385. const uint8_t *nonce, size_t nonce_len,
  1386. const uint8_t *in, size_t in_len,
  1387. const uint8_t *ad, size_t ad_len) {
  1388. const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state;
  1389. const uint64_t in_len_64 = in_len;
  1390. const uint64_t ad_len_64 = ad_len;
  1391. if (in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN < in_len ||
  1392. in_len_64 > (UINT64_C(1) << 36) ||
  1393. ad_len_64 >= (UINT64_C(1) << 61)) {
  1394. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  1395. return 0;
  1396. }
  1397. if (max_out_len < in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
  1398. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  1399. return 0;
  1400. }
  1401. if (nonce_len != AES_BLOCK_SIZE) {
  1402. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
  1403. return 0;
  1404. }
  1405. struct gcm_siv_record_keys keys;
  1406. gcm_siv_keys(gcm_siv_ctx, &keys, nonce);
  1407. uint8_t tag[16];
  1408. gcm_siv_polyval(tag, in, in_len, ad, ad_len, keys.auth_key);
  1409. keys.enc_block(tag, tag, &keys.enc_key.ks);
  1410. gcm_siv_crypt(out, in, in_len, tag, keys.enc_block, &keys.enc_key.ks);
  1411. OPENSSL_memcpy(&out[in_len], tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN);
  1412. *out_len = in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN;
  1413. return 1;
  1414. }
  1415. static int aead_aes_gcm_siv_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
  1416. size_t *out_len, size_t max_out_len,
  1417. const uint8_t *nonce, size_t nonce_len,
  1418. const uint8_t *in, size_t in_len,
  1419. const uint8_t *ad, size_t ad_len) {
  1420. const uint64_t ad_len_64 = ad_len;
  1421. if (ad_len_64 >= (UINT64_C(1) << 61)) {
  1422. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
  1423. return 0;
  1424. }
  1425. const uint64_t in_len_64 = in_len;
  1426. if (in_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN ||
  1427. in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) {
  1428. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  1429. return 0;
  1430. }
  1431. const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state;
  1432. const size_t plaintext_len = in_len - EVP_AEAD_AES_GCM_SIV_TAG_LEN;
  1433. if (max_out_len < plaintext_len) {
  1434. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
  1435. return 0;
  1436. }
  1437. struct gcm_siv_record_keys keys;
  1438. gcm_siv_keys(gcm_siv_ctx, &keys, nonce);
  1439. gcm_siv_crypt(out, in, plaintext_len, &in[plaintext_len], keys.enc_block,
  1440. &keys.enc_key.ks);
  1441. uint8_t expected_tag[EVP_AEAD_AES_GCM_SIV_TAG_LEN];
  1442. gcm_siv_polyval(expected_tag, out, plaintext_len, ad, ad_len, keys.auth_key);
  1443. keys.enc_block(expected_tag, expected_tag, &keys.enc_key.ks);
  1444. if (CRYPTO_memcmp(expected_tag, &in[plaintext_len], sizeof(expected_tag)) !=
  1445. 0) {
  1446. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  1447. return 0;
  1448. }
  1449. *out_len = plaintext_len;
  1450. return 1;
  1451. }
  1452. static const EVP_AEAD aead_aes_128_gcm_siv = {
  1453. 16, /* key length */
  1454. AES_BLOCK_SIZE, /* nonce length */
  1455. EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
  1456. EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
  1457. aead_aes_gcm_siv_init,
  1458. NULL /* init_with_direction */,
  1459. aead_aes_gcm_siv_cleanup,
  1460. aead_aes_gcm_siv_seal,
  1461. aead_aes_gcm_siv_open,
  1462. NULL /* get_iv */,
  1463. };
  1464. static const EVP_AEAD aead_aes_256_gcm_siv = {
  1465. 32, /* key length */
  1466. AES_BLOCK_SIZE, /* nonce length */
  1467. EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */
  1468. EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */
  1469. aead_aes_gcm_siv_init,
  1470. NULL /* init_with_direction */,
  1471. aead_aes_gcm_siv_cleanup,
  1472. aead_aes_gcm_siv_seal,
  1473. aead_aes_gcm_siv_open,
  1474. NULL /* get_iv */,
  1475. };
  1476. const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) {
  1477. return &aead_aes_128_gcm_siv;
  1478. }
  1479. const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) {
  1480. return &aead_aes_256_gcm_siv;
  1481. }
  1482. #endif /* !OPENSSL_SMALL */
  1483. int EVP_has_aes_hardware(void) {
  1484. #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
  1485. return aesni_capable() && crypto_gcm_clmul_enabled();
  1486. #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
  1487. return hwaes_capable() && CRYPTO_is_ARMv8_PMULL_capable();
  1488. #else
  1489. return 0;
  1490. #endif
  1491. }