You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

671 lines
14 KiB

  1. #! /usr/bin/env perl
  2. # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # GHASH for for PowerISA v2.07.
  17. #
  18. # July 2014
  19. #
  20. # Accurate performance measurements are problematic, because it's
  21. # always virtualized setup with possibly throttled processor.
  22. # Relative comparison is therefore more informative. This initial
  23. # version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
  24. # faster than "4-bit" integer-only compiler-generated 64-bit code.
  25. # "Initial version" means that there is room for futher improvement.
  26. # May 2016
  27. #
  28. # 2x aggregated reduction improves performance by 50% (resulting
  29. # performance on POWER8 is 1 cycle per processed byte), and 4x
  30. # aggregated reduction - by 170% or 2.7x (resulting in 0.55 cpb).
  31. $flavour=shift;
  32. $output =shift;
  33. if ($flavour =~ /64/) {
  34. $SIZE_T=8;
  35. $LRSAVE=2*$SIZE_T;
  36. $STU="stdu";
  37. $POP="ld";
  38. $PUSH="std";
  39. $UCMP="cmpld";
  40. $SHRI="srdi";
  41. } elsif ($flavour =~ /32/) {
  42. $SIZE_T=4;
  43. $LRSAVE=$SIZE_T;
  44. $STU="stwu";
  45. $POP="lwz";
  46. $PUSH="stw";
  47. $UCMP="cmplw";
  48. $SHRI="srwi";
  49. } else { die "nonsense $flavour"; }
  50. $sp="r1";
  51. $FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload
  52. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  53. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  54. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  55. die "can't locate ppc-xlate.pl";
  56. open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
  57. my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
  58. my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
  59. my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
  60. my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
  61. my $vrsave="r12";
  62. $code=<<___;
  63. .machine "any"
  64. .text
  65. .globl .gcm_init_p8
  66. .align 5
  67. .gcm_init_p8:
  68. li r0,-4096
  69. li r8,0x10
  70. mfspr $vrsave,256
  71. li r9,0x20
  72. mtspr 256,r0
  73. li r10,0x30
  74. lvx_u $H,0,r4 # load H
  75. vspltisb $xC2,-16 # 0xf0
  76. vspltisb $t0,1 # one
  77. vaddubm $xC2,$xC2,$xC2 # 0xe0
  78. vxor $zero,$zero,$zero
  79. vor $xC2,$xC2,$t0 # 0xe1
  80. vsldoi $xC2,$xC2,$zero,15 # 0xe1...
  81. vsldoi $t1,$zero,$t0,1 # ...1
  82. vaddubm $xC2,$xC2,$xC2 # 0xc2...
  83. vspltisb $t2,7
  84. vor $xC2,$xC2,$t1 # 0xc2....01
  85. vspltb $t1,$H,0 # most significant byte
  86. vsl $H,$H,$t0 # H<<=1
  87. vsrab $t1,$t1,$t2 # broadcast carry bit
  88. vand $t1,$t1,$xC2
  89. vxor $IN,$H,$t1 # twisted H
  90. vsldoi $H,$IN,$IN,8 # twist even more ...
  91. vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
  92. vsldoi $Hl,$zero,$H,8 # ... and split
  93. vsldoi $Hh,$H,$zero,8
  94. stvx_u $xC2,0,r3 # save pre-computed table
  95. stvx_u $Hl,r8,r3
  96. li r8,0x40
  97. stvx_u $H, r9,r3
  98. li r9,0x50
  99. stvx_u $Hh,r10,r3
  100. li r10,0x60
  101. vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
  102. vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
  103. vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
  104. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  105. vsldoi $t0,$Xm,$zero,8
  106. vsldoi $t1,$zero,$Xm,8
  107. vxor $Xl,$Xl,$t0
  108. vxor $Xh,$Xh,$t1
  109. vsldoi $Xl,$Xl,$Xl,8
  110. vxor $Xl,$Xl,$t2
  111. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  112. vpmsumd $Xl,$Xl,$xC2
  113. vxor $t1,$t1,$Xh
  114. vxor $IN1,$Xl,$t1
  115. vsldoi $H2,$IN1,$IN1,8
  116. vsldoi $H2l,$zero,$H2,8
  117. vsldoi $H2h,$H2,$zero,8
  118. stvx_u $H2l,r8,r3 # save H^2
  119. li r8,0x70
  120. stvx_u $H2,r9,r3
  121. li r9,0x80
  122. stvx_u $H2h,r10,r3
  123. li r10,0x90
  124. ___
  125. {
  126. my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
  127. $code.=<<___;
  128. vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
  129. vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
  130. vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
  131. vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
  132. vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
  133. vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
  134. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  135. vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
  136. vsldoi $t0,$Xm,$zero,8
  137. vsldoi $t1,$zero,$Xm,8
  138. vsldoi $t4,$Xm1,$zero,8
  139. vsldoi $t5,$zero,$Xm1,8
  140. vxor $Xl,$Xl,$t0
  141. vxor $Xh,$Xh,$t1
  142. vxor $Xl1,$Xl1,$t4
  143. vxor $Xh1,$Xh1,$t5
  144. vsldoi $Xl,$Xl,$Xl,8
  145. vsldoi $Xl1,$Xl1,$Xl1,8
  146. vxor $Xl,$Xl,$t2
  147. vxor $Xl1,$Xl1,$t6
  148. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  149. vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
  150. vpmsumd $Xl,$Xl,$xC2
  151. vpmsumd $Xl1,$Xl1,$xC2
  152. vxor $t1,$t1,$Xh
  153. vxor $t5,$t5,$Xh1
  154. vxor $Xl,$Xl,$t1
  155. vxor $Xl1,$Xl1,$t5
  156. vsldoi $H,$Xl,$Xl,8
  157. vsldoi $H2,$Xl1,$Xl1,8
  158. vsldoi $Hl,$zero,$H,8
  159. vsldoi $Hh,$H,$zero,8
  160. vsldoi $H2l,$zero,$H2,8
  161. vsldoi $H2h,$H2,$zero,8
  162. stvx_u $Hl,r8,r3 # save H^3
  163. li r8,0xa0
  164. stvx_u $H,r9,r3
  165. li r9,0xb0
  166. stvx_u $Hh,r10,r3
  167. li r10,0xc0
  168. stvx_u $H2l,r8,r3 # save H^4
  169. stvx_u $H2,r9,r3
  170. stvx_u $H2h,r10,r3
  171. mtspr 256,$vrsave
  172. blr
  173. .long 0
  174. .byte 0,12,0x14,0,0,0,2,0
  175. .long 0
  176. .size .gcm_init_p8,.-.gcm_init_p8
  177. ___
  178. }
  179. $code.=<<___;
  180. .globl .gcm_gmult_p8
  181. .align 5
  182. .gcm_gmult_p8:
  183. lis r0,0xfff8
  184. li r8,0x10
  185. mfspr $vrsave,256
  186. li r9,0x20
  187. mtspr 256,r0
  188. li r10,0x30
  189. lvx_u $IN,0,$Xip # load Xi
  190. lvx_u $Hl,r8,$Htbl # load pre-computed table
  191. le?lvsl $lemask,r0,r0
  192. lvx_u $H, r9,$Htbl
  193. le?vspltisb $t0,0x07
  194. lvx_u $Hh,r10,$Htbl
  195. le?vxor $lemask,$lemask,$t0
  196. lvx_u $xC2,0,$Htbl
  197. le?vperm $IN,$IN,$IN,$lemask
  198. vxor $zero,$zero,$zero
  199. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  200. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  201. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  202. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  203. vsldoi $t0,$Xm,$zero,8
  204. vsldoi $t1,$zero,$Xm,8
  205. vxor $Xl,$Xl,$t0
  206. vxor $Xh,$Xh,$t1
  207. vsldoi $Xl,$Xl,$Xl,8
  208. vxor $Xl,$Xl,$t2
  209. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  210. vpmsumd $Xl,$Xl,$xC2
  211. vxor $t1,$t1,$Xh
  212. vxor $Xl,$Xl,$t1
  213. le?vperm $Xl,$Xl,$Xl,$lemask
  214. stvx_u $Xl,0,$Xip # write out Xi
  215. mtspr 256,$vrsave
  216. blr
  217. .long 0
  218. .byte 0,12,0x14,0,0,0,2,0
  219. .long 0
  220. .size .gcm_gmult_p8,.-.gcm_gmult_p8
  221. .globl .gcm_ghash_p8
  222. .align 5
  223. .gcm_ghash_p8:
  224. li r0,-4096
  225. li r8,0x10
  226. mfspr $vrsave,256
  227. li r9,0x20
  228. mtspr 256,r0
  229. li r10,0x30
  230. lvx_u $Xl,0,$Xip # load Xi
  231. lvx_u $Hl,r8,$Htbl # load pre-computed table
  232. li r8,0x40
  233. le?lvsl $lemask,r0,r0
  234. lvx_u $H, r9,$Htbl
  235. li r9,0x50
  236. le?vspltisb $t0,0x07
  237. lvx_u $Hh,r10,$Htbl
  238. li r10,0x60
  239. le?vxor $lemask,$lemask,$t0
  240. lvx_u $xC2,0,$Htbl
  241. le?vperm $Xl,$Xl,$Xl,$lemask
  242. vxor $zero,$zero,$zero
  243. ${UCMP}i $len,64
  244. bge Lgcm_ghash_p8_4x
  245. lvx_u $IN,0,$inp
  246. addi $inp,$inp,16
  247. subic. $len,$len,16
  248. le?vperm $IN,$IN,$IN,$lemask
  249. vxor $IN,$IN,$Xl
  250. beq Lshort
  251. lvx_u $H2l,r8,$Htbl # load H^2
  252. li r8,16
  253. lvx_u $H2, r9,$Htbl
  254. add r9,$inp,$len # end of input
  255. lvx_u $H2h,r10,$Htbl
  256. be?b Loop_2x
  257. .align 5
  258. Loop_2x:
  259. lvx_u $IN1,0,$inp
  260. le?vperm $IN1,$IN1,$IN1,$lemask
  261. subic $len,$len,32
  262. vpmsumd $Xl,$IN,$H2l # H^2.lo·Xi.lo
  263. vpmsumd $Xl1,$IN1,$Hl # H.lo·Xi+1.lo
  264. subfe r0,r0,r0 # borrow?-1:0
  265. vpmsumd $Xm,$IN,$H2 # H^2.hi·Xi.lo+H^2.lo·Xi.hi
  266. vpmsumd $Xm1,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+1.hi
  267. and r0,r0,$len
  268. vpmsumd $Xh,$IN,$H2h # H^2.hi·Xi.hi
  269. vpmsumd $Xh1,$IN1,$Hh # H.hi·Xi+1.hi
  270. add $inp,$inp,r0
  271. vxor $Xl,$Xl,$Xl1
  272. vxor $Xm,$Xm,$Xm1
  273. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  274. vsldoi $t0,$Xm,$zero,8
  275. vsldoi $t1,$zero,$Xm,8
  276. vxor $Xh,$Xh,$Xh1
  277. vxor $Xl,$Xl,$t0
  278. vxor $Xh,$Xh,$t1
  279. vsldoi $Xl,$Xl,$Xl,8
  280. vxor $Xl,$Xl,$t2
  281. lvx_u $IN,r8,$inp
  282. addi $inp,$inp,32
  283. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  284. vpmsumd $Xl,$Xl,$xC2
  285. le?vperm $IN,$IN,$IN,$lemask
  286. vxor $t1,$t1,$Xh
  287. vxor $IN,$IN,$t1
  288. vxor $IN,$IN,$Xl
  289. $UCMP r9,$inp
  290. bgt Loop_2x # done yet?
  291. cmplwi $len,0
  292. bne Leven
  293. Lshort:
  294. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  295. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  296. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  297. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  298. vsldoi $t0,$Xm,$zero,8
  299. vsldoi $t1,$zero,$Xm,8
  300. vxor $Xl,$Xl,$t0
  301. vxor $Xh,$Xh,$t1
  302. vsldoi $Xl,$Xl,$Xl,8
  303. vxor $Xl,$Xl,$t2
  304. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  305. vpmsumd $Xl,$Xl,$xC2
  306. vxor $t1,$t1,$Xh
  307. Leven:
  308. vxor $Xl,$Xl,$t1
  309. le?vperm $Xl,$Xl,$Xl,$lemask
  310. stvx_u $Xl,0,$Xip # write out Xi
  311. mtspr 256,$vrsave
  312. blr
  313. .long 0
  314. .byte 0,12,0x14,0,0,0,4,0
  315. .long 0
  316. ___
  317. {
  318. my ($Xl3,$Xm2,$IN2,$H3l,$H3,$H3h,
  319. $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
  320. my $IN0=$IN;
  321. my ($H21l,$H21h,$loperm,$hiperm) = ($Hl,$Hh,$H2l,$H2h);
  322. $code.=<<___;
  323. .align 5
  324. .gcm_ghash_p8_4x:
  325. Lgcm_ghash_p8_4x:
  326. $STU $sp,-$FRAME($sp)
  327. li r10,`15+6*$SIZE_T`
  328. li r11,`31+6*$SIZE_T`
  329. stvx v20,r10,$sp
  330. addi r10,r10,32
  331. stvx v21,r11,$sp
  332. addi r11,r11,32
  333. stvx v22,r10,$sp
  334. addi r10,r10,32
  335. stvx v23,r11,$sp
  336. addi r11,r11,32
  337. stvx v24,r10,$sp
  338. addi r10,r10,32
  339. stvx v25,r11,$sp
  340. addi r11,r11,32
  341. stvx v26,r10,$sp
  342. addi r10,r10,32
  343. stvx v27,r11,$sp
  344. addi r11,r11,32
  345. stvx v28,r10,$sp
  346. addi r10,r10,32
  347. stvx v29,r11,$sp
  348. addi r11,r11,32
  349. stvx v30,r10,$sp
  350. li r10,0x60
  351. stvx v31,r11,$sp
  352. li r0,-1
  353. stw $vrsave,`$FRAME-4`($sp) # save vrsave
  354. mtspr 256,r0 # preserve all AltiVec registers
  355. lvsl $t0,0,r8 # 0x0001..0e0f
  356. #lvx_u $H2l,r8,$Htbl # load H^2
  357. li r8,0x70
  358. lvx_u $H2, r9,$Htbl
  359. li r9,0x80
  360. vspltisb $t1,8 # 0x0808..0808
  361. #lvx_u $H2h,r10,$Htbl
  362. li r10,0x90
  363. lvx_u $H3l,r8,$Htbl # load H^3
  364. li r8,0xa0
  365. lvx_u $H3, r9,$Htbl
  366. li r9,0xb0
  367. lvx_u $H3h,r10,$Htbl
  368. li r10,0xc0
  369. lvx_u $H4l,r8,$Htbl # load H^4
  370. li r8,0x10
  371. lvx_u $H4, r9,$Htbl
  372. li r9,0x20
  373. lvx_u $H4h,r10,$Htbl
  374. li r10,0x30
  375. vsldoi $t2,$zero,$t1,8 # 0x0000..0808
  376. vaddubm $hiperm,$t0,$t2 # 0x0001..1617
  377. vaddubm $loperm,$t1,$hiperm # 0x0809..1e1f
  378. $SHRI $len,$len,4 # this allows to use sign bit
  379. # as carry
  380. lvx_u $IN0,0,$inp # load input
  381. lvx_u $IN1,r8,$inp
  382. subic. $len,$len,8
  383. lvx_u $IN2,r9,$inp
  384. lvx_u $IN3,r10,$inp
  385. addi $inp,$inp,0x40
  386. le?vperm $IN0,$IN0,$IN0,$lemask
  387. le?vperm $IN1,$IN1,$IN1,$lemask
  388. le?vperm $IN2,$IN2,$IN2,$lemask
  389. le?vperm $IN3,$IN3,$IN3,$lemask
  390. vxor $Xh,$IN0,$Xl
  391. vpmsumd $Xl1,$IN1,$H3l
  392. vpmsumd $Xm1,$IN1,$H3
  393. vpmsumd $Xh1,$IN1,$H3h
  394. vperm $H21l,$H2,$H,$hiperm
  395. vperm $t0,$IN2,$IN3,$loperm
  396. vperm $H21h,$H2,$H,$loperm
  397. vperm $t1,$IN2,$IN3,$hiperm
  398. vpmsumd $Xm2,$IN2,$H2 # H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo
  399. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+2.lo+H.lo·Xi+3.lo
  400. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  401. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+2.hi+H.hi·Xi+3.hi
  402. vxor $Xm2,$Xm2,$Xm1
  403. vxor $Xl3,$Xl3,$Xl1
  404. vxor $Xm3,$Xm3,$Xm2
  405. vxor $Xh3,$Xh3,$Xh1
  406. blt Ltail_4x
  407. Loop_4x:
  408. lvx_u $IN0,0,$inp
  409. lvx_u $IN1,r8,$inp
  410. subic. $len,$len,4
  411. lvx_u $IN2,r9,$inp
  412. lvx_u $IN3,r10,$inp
  413. addi $inp,$inp,0x40
  414. le?vperm $IN1,$IN1,$IN1,$lemask
  415. le?vperm $IN2,$IN2,$IN2,$lemask
  416. le?vperm $IN3,$IN3,$IN3,$lemask
  417. le?vperm $IN0,$IN0,$IN0,$lemask
  418. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  419. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  420. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  421. vpmsumd $Xl1,$IN1,$H3l
  422. vpmsumd $Xm1,$IN1,$H3
  423. vpmsumd $Xh1,$IN1,$H3h
  424. vxor $Xl,$Xl,$Xl3
  425. vxor $Xm,$Xm,$Xm3
  426. vxor $Xh,$Xh,$Xh3
  427. vperm $t0,$IN2,$IN3,$loperm
  428. vperm $t1,$IN2,$IN3,$hiperm
  429. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  430. vpmsumd $Xl3,$t0,$H21l # H.lo·Xi+3.lo +H^2.lo·Xi+2.lo
  431. vpmsumd $Xh3,$t1,$H21h # H.hi·Xi+3.hi +H^2.hi·Xi+2.hi
  432. vsldoi $t0,$Xm,$zero,8
  433. vsldoi $t1,$zero,$Xm,8
  434. vxor $Xl,$Xl,$t0
  435. vxor $Xh,$Xh,$t1
  436. vsldoi $Xl,$Xl,$Xl,8
  437. vxor $Xl,$Xl,$t2
  438. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  439. vpmsumd $Xm2,$IN2,$H2 # H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi
  440. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  441. vpmsumd $Xl,$Xl,$xC2
  442. vxor $Xl3,$Xl3,$Xl1
  443. vxor $Xh3,$Xh3,$Xh1
  444. vxor $Xh,$Xh,$IN0
  445. vxor $Xm2,$Xm2,$Xm1
  446. vxor $Xh,$Xh,$t1
  447. vxor $Xm3,$Xm3,$Xm2
  448. vxor $Xh,$Xh,$Xl
  449. bge Loop_4x
  450. Ltail_4x:
  451. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  452. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  453. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  454. vxor $Xl,$Xl,$Xl3
  455. vxor $Xm,$Xm,$Xm3
  456. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  457. vsldoi $t0,$Xm,$zero,8
  458. vsldoi $t1,$zero,$Xm,8
  459. vxor $Xh,$Xh,$Xh3
  460. vxor $Xl,$Xl,$t0
  461. vxor $Xh,$Xh,$t1
  462. vsldoi $Xl,$Xl,$Xl,8
  463. vxor $Xl,$Xl,$t2
  464. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  465. vpmsumd $Xl,$Xl,$xC2
  466. vxor $t1,$t1,$Xh
  467. vxor $Xl,$Xl,$t1
  468. addic. $len,$len,4
  469. beq Ldone_4x
  470. lvx_u $IN0,0,$inp
  471. ${UCMP}i $len,2
  472. li $len,-4
  473. blt Lone
  474. lvx_u $IN1,r8,$inp
  475. beq Ltwo
  476. Lthree:
  477. lvx_u $IN2,r9,$inp
  478. le?vperm $IN0,$IN0,$IN0,$lemask
  479. le?vperm $IN1,$IN1,$IN1,$lemask
  480. le?vperm $IN2,$IN2,$IN2,$lemask
  481. vxor $Xh,$IN0,$Xl
  482. vmr $H4l,$H3l
  483. vmr $H4, $H3
  484. vmr $H4h,$H3h
  485. vperm $t0,$IN1,$IN2,$loperm
  486. vperm $t1,$IN1,$IN2,$hiperm
  487. vpmsumd $Xm2,$IN1,$H2 # H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo
  488. vpmsumd $Xm3,$IN2,$H # H.hi·Xi+2.lo +H.lo·Xi+2.hi
  489. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+1.lo+H.lo·Xi+2.lo
  490. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+1.hi+H.hi·Xi+2.hi
  491. vxor $Xm3,$Xm3,$Xm2
  492. b Ltail_4x
  493. .align 4
  494. Ltwo:
  495. le?vperm $IN0,$IN0,$IN0,$lemask
  496. le?vperm $IN1,$IN1,$IN1,$lemask
  497. vxor $Xh,$IN0,$Xl
  498. vperm $t0,$zero,$IN1,$loperm
  499. vperm $t1,$zero,$IN1,$hiperm
  500. vsldoi $H4l,$zero,$H2,8
  501. vmr $H4, $H2
  502. vsldoi $H4h,$H2,$zero,8
  503. vpmsumd $Xl3,$t0, $H21l # H.lo·Xi+1.lo
  504. vpmsumd $Xm3,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+2.hi
  505. vpmsumd $Xh3,$t1, $H21h # H.hi·Xi+1.hi
  506. b Ltail_4x
  507. .align 4
  508. Lone:
  509. le?vperm $IN0,$IN0,$IN0,$lemask
  510. vsldoi $H4l,$zero,$H,8
  511. vmr $H4, $H
  512. vsldoi $H4h,$H,$zero,8
  513. vxor $Xh,$IN0,$Xl
  514. vxor $Xl3,$Xl3,$Xl3
  515. vxor $Xm3,$Xm3,$Xm3
  516. vxor $Xh3,$Xh3,$Xh3
  517. b Ltail_4x
  518. Ldone_4x:
  519. le?vperm $Xl,$Xl,$Xl,$lemask
  520. stvx_u $Xl,0,$Xip # write out Xi
  521. li r10,`15+6*$SIZE_T`
  522. li r11,`31+6*$SIZE_T`
  523. mtspr 256,$vrsave
  524. lvx v20,r10,$sp
  525. addi r10,r10,32
  526. lvx v21,r11,$sp
  527. addi r11,r11,32
  528. lvx v22,r10,$sp
  529. addi r10,r10,32
  530. lvx v23,r11,$sp
  531. addi r11,r11,32
  532. lvx v24,r10,$sp
  533. addi r10,r10,32
  534. lvx v25,r11,$sp
  535. addi r11,r11,32
  536. lvx v26,r10,$sp
  537. addi r10,r10,32
  538. lvx v27,r11,$sp
  539. addi r11,r11,32
  540. lvx v28,r10,$sp
  541. addi r10,r10,32
  542. lvx v29,r11,$sp
  543. addi r11,r11,32
  544. lvx v30,r10,$sp
  545. lvx v31,r11,$sp
  546. addi $sp,$sp,$FRAME
  547. blr
  548. .long 0
  549. .byte 0,12,0x04,0,0x80,0,4,0
  550. .long 0
  551. ___
  552. }
  553. $code.=<<___;
  554. .size .gcm_ghash_p8,.-.gcm_ghash_p8
  555. .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
  556. .align 2
  557. ___
  558. foreach (split("\n",$code)) {
  559. s/\`([^\`]*)\`/eval $1/geo;
  560. if ($flavour =~ /le$/o) { # little-endian
  561. s/le\?//o or
  562. s/be\?/#be#/o;
  563. } else {
  564. s/le\?/#le#/o or
  565. s/be\?//o;
  566. }
  567. print $_,"\n";
  568. }
  569. close STDOUT; # enforce flush