[PATCH v9 18/39] arm64/mm: Handle GCS data aborts
Mark Brown
broonie at kernel.org
Tue Jun 25 07:57:46 PDT 2024
All GCS operations at EL0 must happen on a page which is marked as
having UnprivGCS access, including read operations. If a GCS operation
attempts to access a page without this then it will generate a data
abort with the GCS bit set in ESR_EL1.ISS2.
EL0 may validly generate such faults, for example due to copy on write
which will cause the GCS data to be stored in a read only page with no
GCS permissions until the actual copy happens. Since UnprivGCS allows
both reads and writes to the GCS (though only through GCS operations) we
need to ensure that the memory management subsystem handles GCS accesses
as writes at all times. Do this by adding FAULT_FLAG_WRITE to any GCS
page faults, adding handling to ensure that invalid cases are identfied
as such early so the memory management core does not think they will
succeed. The core cannot distinguish between VMAs which are generally
writeable and VMAs which are only writeable through GCS operations.
EL1 may validly write to EL0 GCS for management purposes (eg, while
initialising with cap tokens).
We also report any GCS faults in VMAs not marked as part of a GCS as
access violations, causing a fault to be delivered to userspace if it
attempts to do GCS operations outside a GCS.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/mm/fault.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 451ba7cbd5ad..bdc28588163d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -486,6 +486,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
+static bool is_gcs_fault(unsigned long esr)
+{
+ if (!esr_is_data_abort(esr))
+ return false;
+
+ return ESR_ELx_ISS2(esr) & ESR_ELx_GCS;
+}
+
static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -500,6 +508,25 @@ static bool is_write_abort(unsigned long esr)
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
+static bool is_invalid_gcs_access(struct vm_area_struct *vma, u64 esr)
+{
+ if (!system_supports_gcs())
+ return false;
+
+ if (unlikely(is_gcs_fault(esr))) {
+ /* GCS accesses must be performed on a GCS page */
+ if (!(vma->vm_flags & VM_SHADOW_STACK))
+ return true;
+ if (!(vma->vm_flags & VM_WRITE))
+ return true;
+ } else if (unlikely(vma->vm_flags & VM_SHADOW_STACK)) {
+ /* Only GCS operations can write to a GCS page */
+ return is_write_abort(esr);
+ }
+
+ return false;
+}
+
static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
@@ -535,6 +562,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
/* It was exec fault */
vm_flags = VM_EXEC;
mm_flags |= FAULT_FLAG_INSTRUCTION;
+ } else if (is_gcs_fault(esr)) {
+ /*
+ * The GCS permission on a page implies both read and
+ * write so always handle any GCS fault as a write fault,
+ * we need to trigger CoW even for GCS reads.
+ */
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
} else if (is_write_abort(esr)) {
/* It was write fault */
vm_flags = VM_WRITE;
@@ -568,6 +603,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
if (!vma)
goto lock_mmap;
+ if (is_invalid_gcs_access(vma, esr)) {
+ pr_crit("INVALID GCS\n");
+ vma_end_read(vma);
+ fault = 0;
+ si_code = SEGV_CPERR;
+ goto bad_area;
+ }
+
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
fault = 0;
--
2.39.2
More information about the linux-arm-kernel
mailing list