Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.
 
 
 
 
 
 

616 rader
16 KiB

  1. /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
  2. * All rights reserved.
  3. *
  4. * This package is an SSL implementation written
  5. * by Eric Young (eay@cryptsoft.com).
  6. * The implementation was written so as to conform with Netscapes SSL.
  7. *
  8. * This library is free for commercial and non-commercial use as long as
  9. * the following conditions are aheared to. The following conditions
  10. * apply to all code found in this distribution, be it the RC4, RSA,
  11. * lhash, DES, etc., code; not just the SSL code. The SSL documentation
  12. * included with this distribution is covered by the same copyright terms
  13. * except that the holder is Tim Hudson (tjh@cryptsoft.com).
  14. *
  15. * Copyright remains Eric Young's, and as such any Copyright notices in
  16. * the code are not to be removed.
  17. * If this package is used in a product, Eric Young should be given attribution
  18. * as the author of the parts of the library used.
  19. * This can be in the form of a textual message at program startup or
  20. * in documentation (online or textual) provided with the package.
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. * 1. Redistributions of source code must retain the copyright
  26. * notice, this list of conditions and the following disclaimer.
  27. * 2. Redistributions in binary form must reproduce the above copyright
  28. * notice, this list of conditions and the following disclaimer in the
  29. * documentation and/or other materials provided with the distribution.
  30. * 3. All advertising materials mentioning features or use of this software
  31. * must display the following acknowledgement:
  32. * "This product includes cryptographic software written by
  33. * Eric Young (eay@cryptsoft.com)"
  34. * The word 'cryptographic' can be left out if the rouines from the library
  35. * being used are not cryptographic related :-).
  36. * 4. If you include any Windows specific code (or a derivative thereof) from
  37. * the apps directory (application code) you must include an acknowledgement:
  38. * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
  39. *
  40. * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  41. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  42. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  43. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  44. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  45. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  46. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  47. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  48. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  49. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  50. * SUCH DAMAGE.
  51. *
  52. * The licence and distribution terms for any publically available version or
  53. * derivative of this code cannot be changed. i.e. this code cannot simply be
  54. * copied and put under another distribution licence
  55. * [including the GNU Public Licence.] */
  56. #include <openssl/cipher.h>
  57. #include <assert.h>
  58. #include <string.h>
  59. #include <openssl/err.h>
  60. #include <openssl/mem.h>
  61. #include <openssl/nid.h>
  62. #include "internal.h"
  63. #include "../../internal.h"
  64. void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx) {
  65. OPENSSL_memset(ctx, 0, sizeof(EVP_CIPHER_CTX));
  66. }
  67. EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) {
  68. EVP_CIPHER_CTX *ctx = OPENSSL_malloc(sizeof(EVP_CIPHER_CTX));
  69. if (ctx) {
  70. EVP_CIPHER_CTX_init(ctx);
  71. }
  72. return ctx;
  73. }
  74. int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *c) {
  75. if (c->cipher != NULL && c->cipher->cleanup) {
  76. c->cipher->cleanup(c);
  77. }
  78. OPENSSL_free(c->cipher_data);
  79. OPENSSL_memset(c, 0, sizeof(EVP_CIPHER_CTX));
  80. return 1;
  81. }
  82. void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) {
  83. if (ctx) {
  84. EVP_CIPHER_CTX_cleanup(ctx);
  85. OPENSSL_free(ctx);
  86. }
  87. }
  88. int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) {
  89. if (in == NULL || in->cipher == NULL) {
  90. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INPUT_NOT_INITIALIZED);
  91. return 0;
  92. }
  93. EVP_CIPHER_CTX_cleanup(out);
  94. OPENSSL_memcpy(out, in, sizeof(EVP_CIPHER_CTX));
  95. if (in->cipher_data && in->cipher->ctx_size) {
  96. out->cipher_data = OPENSSL_malloc(in->cipher->ctx_size);
  97. if (!out->cipher_data) {
  98. out->cipher = NULL;
  99. OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
  100. return 0;
  101. }
  102. OPENSSL_memcpy(out->cipher_data, in->cipher_data, in->cipher->ctx_size);
  103. }
  104. if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY) {
  105. if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out)) {
  106. out->cipher = NULL;
  107. return 0;
  108. }
  109. }
  110. return 1;
  111. }
  112. void EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx) {
  113. EVP_CIPHER_CTX_cleanup(ctx);
  114. EVP_CIPHER_CTX_init(ctx);
  115. }
  116. int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  117. ENGINE *engine, const uint8_t *key, const uint8_t *iv,
  118. int enc) {
  119. if (enc == -1) {
  120. enc = ctx->encrypt;
  121. } else {
  122. if (enc) {
  123. enc = 1;
  124. }
  125. ctx->encrypt = enc;
  126. }
  127. if (cipher) {
  128. // Ensure a context left from last time is cleared (the previous check
  129. // attempted to avoid this if the same ENGINE and EVP_CIPHER could be
  130. // used).
  131. if (ctx->cipher) {
  132. EVP_CIPHER_CTX_cleanup(ctx);
  133. // Restore encrypt and flags
  134. ctx->encrypt = enc;
  135. }
  136. ctx->cipher = cipher;
  137. if (ctx->cipher->ctx_size) {
  138. ctx->cipher_data = OPENSSL_malloc(ctx->cipher->ctx_size);
  139. if (!ctx->cipher_data) {
  140. ctx->cipher = NULL;
  141. OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
  142. return 0;
  143. }
  144. } else {
  145. ctx->cipher_data = NULL;
  146. }
  147. ctx->key_len = cipher->key_len;
  148. ctx->flags = 0;
  149. if (ctx->cipher->flags & EVP_CIPH_CTRL_INIT) {
  150. if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) {
  151. ctx->cipher = NULL;
  152. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INITIALIZATION_ERROR);
  153. return 0;
  154. }
  155. }
  156. } else if (!ctx->cipher) {
  157. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET);
  158. return 0;
  159. }
  160. // we assume block size is a power of 2 in *cryptUpdate
  161. assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 ||
  162. ctx->cipher->block_size == 16);
  163. if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_CUSTOM_IV)) {
  164. switch (EVP_CIPHER_CTX_mode(ctx)) {
  165. case EVP_CIPH_STREAM_CIPHER:
  166. case EVP_CIPH_ECB_MODE:
  167. break;
  168. case EVP_CIPH_CFB_MODE:
  169. ctx->num = 0;
  170. // fall-through
  171. case EVP_CIPH_CBC_MODE:
  172. assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv));
  173. if (iv) {
  174. OPENSSL_memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx));
  175. }
  176. OPENSSL_memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx));
  177. break;
  178. case EVP_CIPH_CTR_MODE:
  179. case EVP_CIPH_OFB_MODE:
  180. ctx->num = 0;
  181. // Don't reuse IV for CTR mode
  182. if (iv) {
  183. OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx));
  184. }
  185. break;
  186. default:
  187. return 0;
  188. }
  189. }
  190. if (key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) {
  191. if (!ctx->cipher->init(ctx, key, iv, enc)) {
  192. return 0;
  193. }
  194. }
  195. ctx->buf_len = 0;
  196. ctx->final_used = 0;
  197. ctx->block_mask = ctx->cipher->block_size - 1;
  198. return 1;
  199. }
  200. int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  201. ENGINE *impl, const uint8_t *key, const uint8_t *iv) {
  202. return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 1);
  203. }
  204. int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  205. ENGINE *impl, const uint8_t *key, const uint8_t *iv) {
  206. return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0);
  207. }
  208. int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
  209. const uint8_t *in, int in_len) {
  210. int i, j, bl;
  211. if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
  212. i = ctx->cipher->cipher(ctx, out, in, in_len);
  213. if (i < 0) {
  214. return 0;
  215. } else {
  216. *out_len = i;
  217. }
  218. return 1;
  219. }
  220. if (in_len <= 0) {
  221. *out_len = 0;
  222. return in_len == 0;
  223. }
  224. if (ctx->buf_len == 0 && (in_len & ctx->block_mask) == 0) {
  225. if (ctx->cipher->cipher(ctx, out, in, in_len)) {
  226. *out_len = in_len;
  227. return 1;
  228. } else {
  229. *out_len = 0;
  230. return 0;
  231. }
  232. }
  233. i = ctx->buf_len;
  234. bl = ctx->cipher->block_size;
  235. assert(bl <= (int)sizeof(ctx->buf));
  236. if (i != 0) {
  237. if (bl - i > in_len) {
  238. OPENSSL_memcpy(&ctx->buf[i], in, in_len);
  239. ctx->buf_len += in_len;
  240. *out_len = 0;
  241. return 1;
  242. } else {
  243. j = bl - i;
  244. OPENSSL_memcpy(&ctx->buf[i], in, j);
  245. if (!ctx->cipher->cipher(ctx, out, ctx->buf, bl)) {
  246. return 0;
  247. }
  248. in_len -= j;
  249. in += j;
  250. out += bl;
  251. *out_len = bl;
  252. }
  253. } else {
  254. *out_len = 0;
  255. }
  256. i = in_len & ctx->block_mask;
  257. in_len -= i;
  258. if (in_len > 0) {
  259. if (!ctx->cipher->cipher(ctx, out, in, in_len)) {
  260. return 0;
  261. }
  262. *out_len += in_len;
  263. }
  264. if (i != 0) {
  265. OPENSSL_memcpy(ctx->buf, &in[in_len], i);
  266. }
  267. ctx->buf_len = i;
  268. return 1;
  269. }
  270. int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
  271. int n, ret;
  272. unsigned int i, b, bl;
  273. if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
  274. ret = ctx->cipher->cipher(ctx, out, NULL, 0);
  275. if (ret < 0) {
  276. return 0;
  277. } else {
  278. *out_len = ret;
  279. }
  280. return 1;
  281. }
  282. b = ctx->cipher->block_size;
  283. assert(b <= sizeof(ctx->buf));
  284. if (b == 1) {
  285. *out_len = 0;
  286. return 1;
  287. }
  288. bl = ctx->buf_len;
  289. if (ctx->flags & EVP_CIPH_NO_PADDING) {
  290. if (bl) {
  291. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
  292. return 0;
  293. }
  294. *out_len = 0;
  295. return 1;
  296. }
  297. n = b - bl;
  298. for (i = bl; i < b; i++) {
  299. ctx->buf[i] = n;
  300. }
  301. ret = ctx->cipher->cipher(ctx, out, ctx->buf, b);
  302. if (ret) {
  303. *out_len = b;
  304. }
  305. return ret;
  306. }
  307. int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
  308. const uint8_t *in, int in_len) {
  309. int fix_len;
  310. unsigned int b;
  311. if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
  312. int r = ctx->cipher->cipher(ctx, out, in, in_len);
  313. if (r < 0) {
  314. *out_len = 0;
  315. return 0;
  316. } else {
  317. *out_len = r;
  318. }
  319. return 1;
  320. }
  321. if (in_len <= 0) {
  322. *out_len = 0;
  323. return in_len == 0;
  324. }
  325. if (ctx->flags & EVP_CIPH_NO_PADDING) {
  326. return EVP_EncryptUpdate(ctx, out, out_len, in, in_len);
  327. }
  328. b = ctx->cipher->block_size;
  329. assert(b <= sizeof(ctx->final));
  330. if (ctx->final_used) {
  331. OPENSSL_memcpy(out, ctx->final, b);
  332. out += b;
  333. fix_len = 1;
  334. } else {
  335. fix_len = 0;
  336. }
  337. if (!EVP_EncryptUpdate(ctx, out, out_len, in, in_len)) {
  338. return 0;
  339. }
  340. // if we have 'decrypted' a multiple of block size, make sure
  341. // we have a copy of this last block
  342. if (b > 1 && !ctx->buf_len) {
  343. *out_len -= b;
  344. ctx->final_used = 1;
  345. OPENSSL_memcpy(ctx->final, &out[*out_len], b);
  346. } else {
  347. ctx->final_used = 0;
  348. }
  349. if (fix_len) {
  350. *out_len += b;
  351. }
  352. return 1;
  353. }
  354. int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) {
  355. int i, n;
  356. unsigned int b;
  357. *out_len = 0;
  358. if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
  359. i = ctx->cipher->cipher(ctx, out, NULL, 0);
  360. if (i < 0) {
  361. return 0;
  362. } else {
  363. *out_len = i;
  364. }
  365. return 1;
  366. }
  367. b = ctx->cipher->block_size;
  368. if (ctx->flags & EVP_CIPH_NO_PADDING) {
  369. if (ctx->buf_len) {
  370. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
  371. return 0;
  372. }
  373. *out_len = 0;
  374. return 1;
  375. }
  376. if (b > 1) {
  377. if (ctx->buf_len || !ctx->final_used) {
  378. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_WRONG_FINAL_BLOCK_LENGTH);
  379. return 0;
  380. }
  381. assert(b <= sizeof(ctx->final));
  382. // The following assumes that the ciphertext has been authenticated.
  383. // Otherwise it provides a padding oracle.
  384. n = ctx->final[b - 1];
  385. if (n == 0 || n > (int)b) {
  386. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  387. return 0;
  388. }
  389. for (i = 0; i < n; i++) {
  390. if (ctx->final[--b] != n) {
  391. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
  392. return 0;
  393. }
  394. }
  395. n = ctx->cipher->block_size - n;
  396. for (i = 0; i < n; i++) {
  397. out[i] = ctx->final[i];
  398. }
  399. *out_len = n;
  400. } else {
  401. *out_len = 0;
  402. }
  403. return 1;
  404. }
  405. int EVP_Cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
  406. size_t in_len) {
  407. return ctx->cipher->cipher(ctx, out, in, in_len);
  408. }
  409. int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
  410. const uint8_t *in, int in_len) {
  411. if (ctx->encrypt) {
  412. return EVP_EncryptUpdate(ctx, out, out_len, in, in_len);
  413. } else {
  414. return EVP_DecryptUpdate(ctx, out, out_len, in, in_len);
  415. }
  416. }
  417. int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
  418. if (ctx->encrypt) {
  419. return EVP_EncryptFinal_ex(ctx, out, out_len);
  420. } else {
  421. return EVP_DecryptFinal_ex(ctx, out, out_len);
  422. }
  423. }
  424. const EVP_CIPHER *EVP_CIPHER_CTX_cipher(const EVP_CIPHER_CTX *ctx) {
  425. return ctx->cipher;
  426. }
  427. int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx) {
  428. return ctx->cipher->nid;
  429. }
  430. unsigned EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx) {
  431. return ctx->cipher->block_size;
  432. }
  433. unsigned EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx) {
  434. return ctx->key_len;
  435. }
  436. unsigned EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx) {
  437. return ctx->cipher->iv_len;
  438. }
  439. void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx) {
  440. return ctx->app_data;
  441. }
  442. void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data) {
  443. ctx->app_data = data;
  444. }
  445. uint32_t EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx) {
  446. return ctx->cipher->flags & ~EVP_CIPH_MODE_MASK;
  447. }
  448. uint32_t EVP_CIPHER_CTX_mode(const EVP_CIPHER_CTX *ctx) {
  449. return ctx->cipher->flags & EVP_CIPH_MODE_MASK;
  450. }
  451. int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int command, int arg, void *ptr) {
  452. int ret;
  453. if (!ctx->cipher) {
  454. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET);
  455. return 0;
  456. }
  457. if (!ctx->cipher->ctrl) {
  458. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED);
  459. return 0;
  460. }
  461. ret = ctx->cipher->ctrl(ctx, command, arg, ptr);
  462. if (ret == -1) {
  463. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED);
  464. return 0;
  465. }
  466. return ret;
  467. }
  468. int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) {
  469. if (pad) {
  470. ctx->flags &= ~EVP_CIPH_NO_PADDING;
  471. } else {
  472. ctx->flags |= EVP_CIPH_NO_PADDING;
  473. }
  474. return 1;
  475. }
  476. int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, unsigned key_len) {
  477. if (c->key_len == key_len) {
  478. return 1;
  479. }
  480. if (key_len == 0 || !(c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) {
  481. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_KEY_LENGTH);
  482. return 0;
  483. }
  484. c->key_len = key_len;
  485. return 1;
  486. }
  487. int EVP_CIPHER_nid(const EVP_CIPHER *cipher) { return cipher->nid; }
  488. unsigned EVP_CIPHER_block_size(const EVP_CIPHER *cipher) {
  489. return cipher->block_size;
  490. }
  491. unsigned EVP_CIPHER_key_length(const EVP_CIPHER *cipher) {
  492. return cipher->key_len;
  493. }
  494. unsigned EVP_CIPHER_iv_length(const EVP_CIPHER *cipher) {
  495. return cipher->iv_len;
  496. }
  497. uint32_t EVP_CIPHER_flags(const EVP_CIPHER *cipher) {
  498. return cipher->flags & ~EVP_CIPH_MODE_MASK;
  499. }
  500. uint32_t EVP_CIPHER_mode(const EVP_CIPHER *cipher) {
  501. return cipher->flags & EVP_CIPH_MODE_MASK;
  502. }
  503. int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  504. const uint8_t *key, const uint8_t *iv, int enc) {
  505. if (cipher) {
  506. EVP_CIPHER_CTX_init(ctx);
  507. }
  508. return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc);
  509. }
  510. int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  511. const uint8_t *key, const uint8_t *iv) {
  512. return EVP_CipherInit(ctx, cipher, key, iv, 1);
  513. }
  514. int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
  515. const uint8_t *key, const uint8_t *iv) {
  516. return EVP_CipherInit(ctx, cipher, key, iv, 0);
  517. }
  518. int EVP_add_cipher_alias(const char *a, const char *b) {
  519. return 1;
  520. }
  521. void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, uint32_t flags) {}