[PATCH 10/11] Drivers: hv: Add support for arm64 in MSHV_VTL

Naman Jain namjain at linux.microsoft.com
Mon Mar 16 05:12:40 PDT 2026


Add necessary support to make MSHV_VTL work for arm64 architecture.
* Add stub implementation for mshv_vtl_return_call_init(): not required
  for arm64
* Remove fpu/legacy.h header inclusion, as this is not required
* handle HV_REGISTER_VSM_CODE_PAGE_OFFSETS register: not supported
  in arm64
* Configure custom percpu_vmbus_handler by using
  hv_setup_percpu_vmbus_handler()
* Handle hugepage functions by config checks

Signed-off-by: Roman Kisel <romank at linux.microsoft.com>
Signed-off-by: Naman Jain <namjain at linux.microsoft.com>
---
 arch/arm64/include/asm/mshyperv.h |  2 ++
 drivers/hv/mshv_vtl_main.c        | 21 ++++++++++++++-------
 2 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h
index 36803f0386cc..027a7f062d70 100644
--- a/arch/arm64/include/asm/mshyperv.h
+++ b/arch/arm64/include/asm/mshyperv.h
@@ -83,6 +83,8 @@ static inline int hv_vtl_get_set_reg(struct hv_register_assoc *regs, bool set, u
 	return 1;
 }
 
+static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {}
+
 void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0);
 bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu);
 #endif
diff --git a/drivers/hv/mshv_vtl_main.c b/drivers/hv/mshv_vtl_main.c
index 4c9ae65ad3e8..5702fe258500 100644
--- a/drivers/hv/mshv_vtl_main.c
+++ b/drivers/hv/mshv_vtl_main.c
@@ -23,8 +23,6 @@
 #include <trace/events/ipi.h>
 #include <uapi/linux/mshv.h>
 #include <hyperv/hvhdk.h>
-
-#include "../../kernel/fpu/legacy.h"
 #include "mshv.h"
 #include "mshv_vtl.h"
 #include "hyperv_vmbus.h"
@@ -206,18 +204,21 @@ static void mshv_vtl_synic_enable_regs(unsigned int cpu)
 static int mshv_vtl_get_vsm_regs(void)
 {
 	struct hv_register_assoc registers[2];
-	int ret, count = 2;
+	int ret, count = 0;
 
-	registers[0].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
-	registers[1].name = HV_REGISTER_VSM_CAPABILITIES;
+	registers[count++].name = HV_REGISTER_VSM_CAPABILITIES;
+	/* Code page offset register is not supported on ARM */
+	if (IS_ENABLED(CONFIG_X86_64))
+		registers[count++].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
 
 	ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
 				       count, input_vtl_zero, registers);
 	if (ret)
 		return ret;
 
-	mshv_vsm_page_offsets.as_uint64 = registers[0].value.reg64;
-	mshv_vsm_capabilities.as_uint64 = registers[1].value.reg64;
+	mshv_vsm_capabilities.as_uint64 = registers[0].value.reg64;
+	if (IS_ENABLED(CONFIG_X86_64))
+		mshv_vsm_page_offsets.as_uint64 = registers[1].value.reg64;
 
 	return ret;
 }
@@ -280,10 +281,13 @@ static int hv_vtl_setup_synic(void)
 
 	/* Use our isr to first filter out packets destined for userspace */
 	hv_setup_vmbus_handler(mshv_vtl_vmbus_isr);
+	/* hv_setup_vmbus_handler() is stubbed for ARM64, add per-cpu VMBus handlers instead */
+	hv_setup_percpu_vmbus_handler(mshv_vtl_vmbus_isr);
 
 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vtl:online",
 				mshv_vtl_alloc_context, NULL);
 	if (ret < 0) {
+		hv_setup_percpu_vmbus_handler(vmbus_isr);
 		hv_setup_vmbus_handler(vmbus_isr);
 		return ret;
 	}
@@ -296,6 +300,7 @@ static int hv_vtl_setup_synic(void)
 static void hv_vtl_remove_synic(void)
 {
 	cpuhp_remove_state(mshv_vtl_cpuhp_online);
+	hv_setup_percpu_vmbus_handler(vmbus_isr);
 	hv_setup_vmbus_handler(vmbus_isr);
 }
 
@@ -1080,10 +1085,12 @@ static vm_fault_t mshv_vtl_low_huge_fault(struct vm_fault *vmf, unsigned int ord
 			ret = vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
 		return ret;
 
+#if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
 	case PUD_ORDER:
 		if (can_fault(vmf, PUD_SIZE, &pfn))
 			ret = vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
 		return ret;
+#endif
 
 	default:
 		return VM_FAULT_SIGBUS;
-- 
2.43.0




More information about the linux-riscv mailing list