diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index c07c15d7cc9a..a7cced5c3e67 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -16,8 +16,13 @@ #include "guest_modes.h" #include "processor.h" +#if defined(__x86_64__) || defined(__aarch64__) +extern void *skip_page; +#endif + static bool mprotect_ro_done; + static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) { uint64_t gpa; @@ -40,18 +45,21 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) * prematurely. * * For architectures that support skipping the faulting instruction, - * generate the store via inline assembly to ensure the exact length - * of the instruction is known and stable (vcpu_arch_put_guest() on - * fixed-length architectures should work, but the cost of paranoia - * is low in this case). For x86, hand-code the exact opcode so that - * there is no room for variability in the generated instruction. + * generate the store via inline assembly to ensure we can correctly + * adjust the PC upon faulting. */ do { for (gpa = start_gpa; gpa < end_gpa; gpa += stride) #ifdef __x86_64__ - asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */ + asm volatile(".global skip_page;" + "mov %0, (%0);" + "skip_page:;" + :: "r" (gpa) : "memory"); #elif defined(__aarch64__) - asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); + asm volatile(".global skip_page;" + "str %0, [%0];" + "skip_page:;" + :: "r" (gpa) : "memory"); #else vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); #endif @@ -170,10 +178,10 @@ static void *vcpu_worker(void *data) TEST_ASSERT_EQ(errno, EFAULT); #if defined(__x86_64__) WRITE_ONCE(vcpu->run->kvm_dirty_regs, KVM_SYNC_X86_REGS); - vcpu->run->s.regs.regs.rip += 3; + vcpu->run->s.regs.regs.rip = (vm_vaddr_t)&skip_page; #elif defined(__aarch64__) vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)) + 4); + (vm_vaddr_t)&skip_page); #endif }