The ADRL pseudo instruction is not an architectural construct, but a convenience macro that was supported by the ARM proprietary assembler and adopted by binutils GAS as well, but only when assembling in 32-bit ARM mode. Therefore, it can only be used in assembler code that is known to assemble in ARM mode only, but as it turns out, the Clang assembler does not implement ADRL at all, and so it is better to get rid of it entirely.
So replace the ADRL instruction with a ADR instruction that refers to a nearer symbol, and apply the delta explicitly using an additional instruction. Cc: Nick Desaulniers <ndesaulni...@google.com> Cc: Stefan Agner <ste...@agner.ch> Cc: Peter Smith <peter.sm...@arm.com> Signed-off-by: Ard Biesheuvel <a...@kernel.org> --- I will leave it to the Clang folks to decide whether this needs to be backported and how far, but a Cc stable seems reasonable here. arch/arm/crypto/sha256-armv4.pl | 4 ++-- arch/arm/crypto/sha256-core.S_shipped | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index 9f96ff48e4a8..8aeb2e82f915 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -175,7 +175,6 @@ $code=<<___; #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -471,7 +470,8 @@ sha256_block_data_order_neon: stmdb sp!,{r4-r12,lr} sub $H,sp,#16*4+16 - adrl $Ktbl,K256 + adr $Ktbl,.Lsha256_block_data_order + add $Ktbl,$Ktbl,#K256-.Lsha256_block_data_order bic $H,$H,#15 @ align for 128-bit stores mov $t2,sp mov sp,$H @ alloca diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index ea04b2ab0c33..1861c4e8a5ba 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -56,7 +56,6 @@ #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -1885,7 +1884,8 @@ sha256_block_data_order_neon: stmdb sp!,{r4-r12,lr} sub r11,sp,#16*4+16 - adrl r14,K256 + adr r14,.Lsha256_block_data_order + add r14,r14,#K256-.Lsha256_block_data_order bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca -- 2.17.1