sha/asm/sha256-armv4.pl: fix compile issue in kernel and eliminate little-endian dependency.
(Imported from upstream's 51f8d095562f36cdaa6893597b5c609e943b0565.) I don't see why we'd care, but just to minimize divergence. Change-Id: I4b07e72c88fcb04654ad28d8fd371e13d59a61b5 Reviewed-on: https://boringssl-review.googlesource.com/4466 Reviewed-by: Adam Langley <agl@google.com>
This commit is contained in:
parent
90da8c8817
commit
0fd37062b6
@ -73,7 +73,9 @@ $code.=<<___ if ($i<16);
|
||||
eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
|
||||
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
|
||||
eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
|
||||
# ifndef __ARMEB__
|
||||
rev $t1,$t1
|
||||
# endif
|
||||
#else
|
||||
@ ldrb $t1,[$inp,#3] @ $i
|
||||
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
|
||||
@ -166,6 +168,7 @@ $code=<<___;
|
||||
#else
|
||||
.syntax unified
|
||||
# ifdef __thumb2__
|
||||
# define adrl adr
|
||||
.thumb
|
||||
# else
|
||||
.code 32
|
||||
@ -460,7 +463,7 @@ sha256_block_data_order_neon:
|
||||
stmdb sp!,{r4-r12,lr}
|
||||
|
||||
sub $H,sp,#16*4+16
|
||||
adr $Ktbl,K256
|
||||
adrl $Ktbl,K256
|
||||
bic $H,$H,#15 @ align for 128-bit stores
|
||||
mov $t2,sp
|
||||
mov sp,$H @ alloca
|
||||
|
Loading…
Reference in New Issue
Block a user