[PATCH rfc -next 10/10] loongarch: mm: try VMA lock-based page fault handling first
Kefeng Wang
wangkefeng.wang at huawei.com
Thu Jul 13 02:53:38 PDT 2023
Attempt VMA lock-based page fault handling first, and fall back
to the existing mmap_lock-based handling if that fails.
Signed-off-by: Kefeng Wang <wangkefeng.wang at huawei.com>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/mm/fault.c | 26 ++++++++++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 397203e18800..afb0ccabab97 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -53,6 +53,7 @@ config LOONGARCH
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index cde2ea0119fa..7e54bc48813e 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -136,6 +136,17 @@ static inline bool access_error(unsigned int flags, struct pt_regs *regs,
return false;
}
+#ifdef CONFIG_PER_VMA_LOCK
+int arch_vma_check_access(struct vm_area_struct *vma,
+ struct vm_locked_fault *vmlf)
+{
+ if (unlikely(access_error(vmlf->fault_flags, vmlf->regs, vmlf->address,
+ vma)))
+ return -EINVAL;
+ return 0;
+}
+#endif
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -149,6 +160,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
struct vm_area_struct *vma = NULL;
+ struct vm_locked_fault vmlf;
vm_fault_t fault;
if (kprobe_page_fault(regs, current->thread.trap_nr))
@@ -183,6 +195,19 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_WRITE;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ VM_LOCKED_FAULT_INIT(vmlf, mm, address, flags, 0, regs, 0);
+ if (try_vma_locked_page_fault(&vmlf, &fault))
+ goto retry;
+ else if (!(fault | VM_FAULT_RETRY))
+ goto done;
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, address);
+ return;
+ }
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -223,6 +248,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
mmap_read_unlock(mm);
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, address);
--
2.27.0
More information about the linux-arm-kernel
mailing list