[PATCH] crypto: arm64 - Drop asm fallback macros for older binutils
Ard Biesheuvel
ardb+git at google.com
Thu May 15 07:27:03 PDT 2025
From: Ard Biesheuvel <ardb at kernel.org>
Now that the oldest supported binutils version is 2.30, the asm macros
to implement the various crypto opcodes for SHA-512, SHA-3, SM-3 and
SM-4 are no longer needed. So drop them.
Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
The binutils version bump is queued up in -next, so I suppose this could
be queued up for the next cycle too.
arch/arm64/crypto/sha3-ce-core.S | 24 +------------
arch/arm64/crypto/sha512-ce-core.S | 21 +-----------
arch/arm64/crypto/sm3-ce-core.S | 36 ++------------------
arch/arm64/crypto/sm4-ce-ccm-core.S | 10 +-----
arch/arm64/crypto/sm4-ce-core.S | 15 +-------
arch/arm64/crypto/sm4-ce-gcm-core.S | 10 +-----
6 files changed, 8 insertions(+), 108 deletions(-)
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
index 9c77313f5a60..61623c7ad3a1 100644
--- a/arch/arm64/crypto/sha3-ce-core.S
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -12,29 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
- .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
- .set .Lv\b\().2d, \b
- .set .Lv\b\().16b, \b
- .endr
-
- /*
- * ARMv8.2 Crypto Extensions instructions
- */
- .macro eor3, rd, rn, rm, ra
- .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
- .endm
-
- .macro rax1, rd, rn, rm
- .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
-
- .macro bcax, rd, rn, rm, ra
- .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
- .endm
-
- .macro xar, rd, rn, rm, imm6
- .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16)
- .endm
+ .arch armv8-a+sha3
/*
* int sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index 91ef68b15fcc..deb2469ab631 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -12,26 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
- .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19
- .set .Lq\b, \b
- .set .Lv\b\().2d, \b
- .endr
-
- .macro sha512h, rd, rn, rm
- .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
-
- .macro sha512h2, rd, rn, rm
- .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
-
- .macro sha512su0, rd, rn
- .inst 0xcec08000 | .L\rd | (.L\rn << 5)
- .endm
-
- .macro sha512su1, rd, rn, rm
- .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
+ .arch armv8-a+sha3
/*
* The SHA-512 round constants
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S
index ca70cfacd0d0..94a97ca367f0 100644
--- a/arch/arm64/crypto/sm3-ce-core.S
+++ b/arch/arm64/crypto/sm3-ce-core.S
@@ -9,44 +9,14 @@
#include <linux/cfi_types.h>
#include <asm/assembler.h>
- .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
- .set .Lv\b\().4s, \b
- .endr
-
- .macro sm3partw1, rd, rn, rm
- .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
-
- .macro sm3partw2, rd, rn, rm
- .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
- .endm
-
- .macro sm3ss1, rd, rn, rm, ra
- .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
- .endm
-
- .macro sm3tt1a, rd, rn, rm, imm2
- .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
- .endm
-
- .macro sm3tt1b, rd, rn, rm, imm2
- .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
- .endm
-
- .macro sm3tt2a, rd, rn, rm, imm2
- .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
- .endm
-
- .macro sm3tt2b, rd, rn, rm, imm2
- .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
- .endm
+ .arch armv8-a+sm4
.macro round, ab, s0, t0, t1, i
sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s
shl \t1\().4s, \t0\().4s, #1
sri \t1\().4s, \t0\().4s, #31
- sm3tt1\ab v8.4s, v5.4s, v10.4s, \i
- sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i
+ sm3tt1\ab v8.4s, v5.4s, v10.s[\i]
+ sm3tt2\ab v9.4s, v5.4s, \s0\().s[\i]
.endm
.macro qround, ab, s0, s1, s2, s3, s4
diff --git a/arch/arm64/crypto/sm4-ce-ccm-core.S b/arch/arm64/crypto/sm4-ce-ccm-core.S
index fa85856f33ce..b658cf2577d1 100644
--- a/arch/arm64/crypto/sm4-ce-ccm-core.S
+++ b/arch/arm64/crypto/sm4-ce-ccm-core.S
@@ -12,15 +12,7 @@
#include <asm/assembler.h>
#include "sm4-ce-asm.h"
-.arch armv8-a+crypto
-
-.irp b, 0, 1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31
- .set .Lv\b\().4s, \b
-.endr
-
-.macro sm4e, vd, vn
- .inst 0xcec08400 | (.L\vn << 5) | .L\vd
-.endm
+.arch armv8-a+sm4
/* Register macros */
diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S
index 1f3625c2c67e..dd4e86b0a526 100644
--- a/arch/arm64/crypto/sm4-ce-core.S
+++ b/arch/arm64/crypto/sm4-ce-core.S
@@ -12,20 +12,7 @@
#include <asm/assembler.h>
#include "sm4-ce-asm.h"
-.arch armv8-a+crypto
-
-.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
- 20, 24, 25, 26, 27, 28, 29, 30, 31
- .set .Lv\b\().4s, \b
-.endr
-
-.macro sm4e, vd, vn
- .inst 0xcec08400 | (.L\vn << 5) | .L\vd
-.endm
-
-.macro sm4ekey, vd, vn, vm
- .inst 0xce60c800 | (.L\vm << 16) | (.L\vn << 5) | .L\vd
-.endm
+.arch armv8-a+sm4
/* Register macros */
diff --git a/arch/arm64/crypto/sm4-ce-gcm-core.S b/arch/arm64/crypto/sm4-ce-gcm-core.S
index 347f25d75727..92d26d8a9254 100644
--- a/arch/arm64/crypto/sm4-ce-gcm-core.S
+++ b/arch/arm64/crypto/sm4-ce-gcm-core.S
@@ -13,15 +13,7 @@
#include <asm/assembler.h>
#include "sm4-ce-asm.h"
-.arch armv8-a+crypto
-
-.irp b, 0, 1, 2, 3, 24, 25, 26, 27, 28, 29, 30, 31
- .set .Lv\b\().4s, \b
-.endr
-
-.macro sm4e, vd, vn
- .inst 0xcec08400 | (.L\vn << 5) | .L\vd
-.endm
+ .arch armv8-a+sm4+aes
/* Register macros */
--
2.49.0.1101.gccaa498523-goog
More information about the linux-arm-kernel
mailing list