[PATCH v2 2/6] arm64: Add support for FEAT_{LS64, LS64_V}
Yicong Yang
yangyicong at huawei.com
Mon Mar 31 02:43:16 PDT 2025
From: Yicong Yang <yangyicong at hisilicon.com>
Armv8.7 introduces single-copy atomic 64-byte loads and stores
instructions and its variants named under FEAT_{LS64, LS64_V}.
These features are identified by ID_AA64ISAR1_EL1.LS64 and the
use of such instructions in userspace (EL0) can be trapped. In
order to support the use of corresponding instructions in userspace:
- Make ID_AA64ISAR1_EL1.LS64 visbile to userspace
- Add identifying and enabling in the cpufeature list
- Expose these support of these features to userspace through HWCAP3
and cpuinfo
Signed-off-by: Yicong Yang <yangyicong at hisilicon.com>
---
Documentation/arch/arm64/booting.rst | 12 ++++++
Documentation/arch/arm64/elf_hwcaps.rst | 6 +++
arch/arm64/include/asm/hwcap.h | 2 +
arch/arm64/include/uapi/asm/hwcap.h | 2 +
arch/arm64/kernel/cpufeature.c | 51 +++++++++++++++++++++++++
arch/arm64/kernel/cpuinfo.c | 2 +
arch/arm64/tools/cpucaps | 2 +
7 files changed, 77 insertions(+)
diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst
index dee7b6de864f..cd26e6326146 100644
--- a/Documentation/arch/arm64/booting.rst
+++ b/Documentation/arch/arm64/booting.rst
@@ -483,6 +483,18 @@ Before jumping into the kernel, the following conditions must be met:
- MDCR_EL3.TPM (bit 6) must be initialized to 0b0
+ For CPUs support for 64-byte loads and stores without status (FEAT_LS64):
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HCRX_EL2.EnALS (bit 1) must be initialised to 0b1.
+
+ For CPUs support for 64-byte loads and stores with status (FEAT_LS64_V):
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HCRX_EL2.EnASR (bit 2) must be initialised to 0b1.
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level. Where the values documented
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
index 69d7afe56853..9e6db258ff48 100644
--- a/Documentation/arch/arm64/elf_hwcaps.rst
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
@@ -435,6 +435,12 @@ HWCAP2_SME_SF8DP4
HWCAP2_POE
Functionality implied by ID_AA64MMFR3_EL1.S1POE == 0b0001.
+HWCAP3_LS64
+ Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0001.
+
+HWCAP3_LS64_V
+ Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0010.
+
4. Unused AT_HWCAP bits
-----------------------
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 1c3f9617d54f..f45ab66d3466 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -176,6 +176,8 @@
#define KERNEL_HWCAP_POE __khwcap2_feature(POE)
#define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128)
+#define KERNEL_HWCAP_LS64 __khwcap3_feature(LS64)
+#define KERNEL_HWCAP_LS64_V __khwcap3_feature(LS64_V)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 705a7afa8e58..88579dad778d 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -143,5 +143,7 @@
/*
* HWCAP3 flags - for AT_HWCAP3
*/
+#define HWCAP3_LS64 (1UL << 0)
+#define HWCAP3_LS64_V (1UL << 1)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9c4d6d552b25..61d9d9959269 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -231,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LS64_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
@@ -2278,6 +2279,38 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
}
#endif /* CONFIG_ARM64_E0PD */
+static bool has_ls64(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ u64 ls64;
+
+ ls64 = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+ entry->field_pos, entry->sign);
+
+ if (ls64 == ID_AA64ISAR1_EL1_LS64_NI ||
+ ls64 > ID_AA64ISAR1_EL1_LS64_LS64_ACCDATA)
+ return false;
+
+ if (entry->capability == ARM64_HAS_LS64 &&
+ ls64 >= ID_AA64ISAR1_EL1_LS64_LS64)
+ return true;
+
+ if (entry->capability == ARM64_HAS_LS64_V &&
+ ls64 >= ID_AA64ISAR1_EL1_LS64_LS64_V)
+ return true;
+
+ return false;
+}
+
+static void cpu_enable_ls64(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnALS, SCTLR_EL1_EnALS);
+}
+
+static void cpu_enable_ls64_v(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnASR, SCTLR_EL1_EnASR);
+}
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
@@ -3041,6 +3074,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_pmuv3,
},
#endif
+ {
+ .desc = "LS64",
+ .capability = ARM64_HAS_LS64,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_ls64,
+ .cpu_enable = cpu_enable_ls64,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64)
+ },
+ {
+ .desc = "LS64_V",
+ .capability = ARM64_HAS_LS64_V,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_ls64,
+ .cpu_enable = cpu_enable_ls64_v,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V)
+ },
{},
};
@@ -3153,6 +3202,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM),
+ HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64, CAP_HWCAP, KERNEL_HWCAP_LS64),
+ HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64_V, CAP_HWCAP, KERNEL_HWCAP_LS64_V),
HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT),
HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX),
HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 285d7d538342..a56c313e6474 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -81,6 +81,8 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_PACA] = "paca",
[KERNEL_HWCAP_PACG] = "pacg",
[KERNEL_HWCAP_GCS] = "gcs",
+ [KERNEL_HWCAP_LS64] = "ls64",
+ [KERNEL_HWCAP_LS64_V] = "ls64_v",
[KERNEL_HWCAP_DCPODP] = "dcpodp",
[KERNEL_HWCAP_SVE2] = "sve2",
[KERNEL_HWCAP_SVEAES] = "sveaes",
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 772c1b008e43..4a6a508073d3 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -42,6 +42,8 @@ HAS_HCX
HAS_LDAPR
HAS_LPA2
HAS_LSE_ATOMICS
+HAS_LS64
+HAS_LS64_V
HAS_MOPS
HAS_NESTED_VIRT
HAS_PAN
--
2.24.0
More information about the linux-arm-kernel
mailing list