[PATCH 4/6] KVM: arm64: Move nVHE-only helpers into kvm/stacktrace.c

Marc Zyngier maz at kernel.org
Wed Jul 27 07:29:04 PDT 2022


kvm_nvhe_stack_kern_va() only makes sense as part of the nVHE
unwinder, so simply move it there.

Signed-off-by: Marc Zyngier <maz at kernel.org>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 41 ------------------------
 arch/arm64/kvm/stacktrace.c              | 41 ++++++++++++++++++++++++
 2 files changed, 41 insertions(+), 41 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index a096216d8970..d5527b600390 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -49,47 +49,6 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overf
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
 DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 
-/*
- * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
- *
- * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
- * allow for guard pages below the stack. Consequently, the fixed offset address
- * translation macros won't work here.
- *
- * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
- * stack base.
- *
- * Returns true on success and updates @addr to its corresponding kernel VA;
- * otherwise returns false.
- */
-static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
-					  enum stack_type type)
-{
-	struct kvm_nvhe_stacktrace_info *stacktrace_info;
-	unsigned long hyp_base, kern_base, hyp_offset;
-
-	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
-
-	switch (type) {
-	case STACK_TYPE_HYP:
-		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
-		hyp_base = (unsigned long)stacktrace_info->stack_base;
-		break;
-	case STACK_TYPE_OVERFLOW:
-		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
-		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
-		break;
-	default:
-		return false;
-	}
-
-	hyp_offset = *addr - hyp_base;
-
-	*addr = kern_base + hyp_offset;
-
-	return true;
-}
-
 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
 
 #endif	/* __KVM_NVHE_HYPERVISOR__ */
diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c
index 4d5fec3175ff..417665854f86 100644
--- a/arch/arm64/kvm/stacktrace.c
+++ b/arch/arm64/kvm/stacktrace.c
@@ -21,6 +21,47 @@
 
 #include <asm/stacktrace/nvhe.h>
 
+/*
+ * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
+ *
+ * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
+ * allow for guard pages below the stack. Consequently, the fixed offset address
+ * translation macros won't work here.
+ *
+ * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
+ * stack base.
+ *
+ * Returns true on success and updates @addr to its corresponding kernel VA;
+ * otherwise returns false.
+ */
+static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
+				   enum stack_type type)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	unsigned long hyp_base, kern_base, hyp_offset;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	switch (type) {
+	case STACK_TYPE_HYP:
+		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
+		hyp_base = (unsigned long)stacktrace_info->stack_base;
+		break;
+	case STACK_TYPE_OVERFLOW:
+		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
+		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
+		break;
+	default:
+		return false;
+	}
+
+	hyp_offset = *addr - hyp_base;
+
+	*addr = kern_base + hyp_offset;
+
+	return true;
+}
+
 static bool on_overflow_stack(unsigned long sp, unsigned long size,
 			      struct stack_info *info)
 {
-- 
2.34.1




More information about the linux-arm-kernel mailing list