sha/asm/sha256-armv4.pl: fix compile issue in kernel and eliminate little-endian dependency.

(Imported from upstream's 51f8d095562f36cdaa6893597b5c609e943b0565.)

I don't see why we'd care, but just to minimize divergence.

Change-Id: I4b07e72c88fcb04654ad28d8fd371e13d59a61b5
Reviewed-on: https://boringssl-review.googlesource.com/4466
Reviewed-by: Adam Langley <agl@google.com>
This commit is contained in:
David Benjamin 2015-04-20 15:45:47 -04:00 committed by Adam Langley
parent 90da8c8817
commit 0fd37062b6

View File

@ -73,7 +73,9 @@ $code.=<<___ if ($i<16);
eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
# ifndef __ARMEB__
rev $t1,$t1 rev $t1,$t1
# endif
#else #else
@ ldrb $t1,[$inp,#3] @ $i @ ldrb $t1,[$inp,#3] @ $i
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
@ -166,6 +168,7 @@ $code=<<___;
#else #else
.syntax unified .syntax unified
# ifdef __thumb2__ # ifdef __thumb2__
# define adrl adr
.thumb .thumb
# else # else
.code 32 .code 32
@ -460,7 +463,7 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr} stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16 sub $H,sp,#16*4+16
adr $Ktbl,K256 adrl $Ktbl,K256
bic $H,$H,#15 @ align for 128-bit stores bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp mov $t2,sp
mov sp,$H @ alloca mov sp,$H @ alloca