[RFC PATCH v2 3/9] KVM: arm64: nv: selftests: Enable hypervisor timer tests to run in vEL2
Ganapatrao Kulkarni
gankulkarni at os.amperecomputing.com
Mon May 12 03:52:45 PDT 2025
Adding required changes to enable and test HVTIMER and HPTIMER
in vEL2. In default case, PTIMER and VTIMER are validated and with
NV enabled (with argument "-g 1"), HPTIMER and HVTIMER are validated
by injecting respective timer interrupts.
Signed-off-by: Ganapatrao Kulkarni <gankulkarni at os.amperecomputing.com>
---
tools/testing/selftests/kvm/arch_timer.c | 8 +-
.../testing/selftests/kvm/arm64/arch_timer.c | 118 +++++++++++++++---
.../selftests/kvm/include/arm64/arch_timer.h | 16 +++
.../selftests/kvm/include/timer_test.h | 1 +
4 files changed, 123 insertions(+), 20 deletions(-)
diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c
index acb2cb596332..5c30bda0462b 100644
--- a/tools/testing/selftests/kvm/arch_timer.c
+++ b/tools/testing/selftests/kvm/arch_timer.c
@@ -35,6 +35,7 @@ struct test_args test_args = {
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US,
.reserved = 1,
+ .is_nested = false,
};
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
@@ -43,6 +44,7 @@ struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
+bool is_nested;
static void *test_vcpu_run(void *arg)
{
@@ -193,6 +195,7 @@ static void test_print_help(char *name)
pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n");
pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n",
TIMER_TEST_ERR_MARGIN_US);
+ pr_info("\t-g: Enable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n");
pr_info("\t-h: print this help screen\n");
}
@@ -200,7 +203,7 @@ static bool parse_args(int argc, char *argv[])
{
int opt;
- while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) {
+ while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:g:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
@@ -226,6 +229,9 @@ static bool parse_args(int argc, char *argv[])
test_args.counter_offset = strtol(optarg, NULL, 0);
test_args.reserved = 0;
break;
+ case 'g':
+ test_args.is_nested = atoi_non_negative("Is Nested", optarg);
+ break;
case 'h':
default:
goto err;
diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c
index eeba1cc87ff8..50bf05bb6f85 100644
--- a/tools/testing/selftests/kvm/arm64/arch_timer.c
+++ b/tools/testing/selftests/kvm/arm64/arch_timer.c
@@ -12,16 +12,22 @@
#include "timer_test.h"
#include "ucall_common.h"
#include "vgic.h"
+#include <nv_util.h>
enum guest_stage {
GUEST_STAGE_VTIMER_CVAL = 1,
GUEST_STAGE_VTIMER_TVAL,
GUEST_STAGE_PTIMER_CVAL,
GUEST_STAGE_PTIMER_TVAL,
+ GUEST_STAGE_HVTIMER_CVAL,
+ GUEST_STAGE_HVTIMER_TVAL,
+ GUEST_STAGE_HPTIMER_CVAL,
+ GUEST_STAGE_HPTIMER_TVAL,
GUEST_STAGE_MAX,
};
static int vtimer_irq, ptimer_irq;
+static int hvtimer_irq, hptimer_irq;
static void
guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
@@ -47,6 +53,26 @@ guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
shared_data->xcnt = timer_get_cntct(PHYSICAL);
timer_set_ctl(PHYSICAL, CTL_ENABLE);
break;
+ case GUEST_STAGE_HVTIMER_CVAL:
+ timer_set_next_cval_ms(HVIRTUAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(HVIRTUAL);
+ timer_set_ctl(HVIRTUAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_HVTIMER_TVAL:
+ timer_set_next_tval_ms(HVIRTUAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(HVIRTUAL);
+ timer_set_ctl(HVIRTUAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_HPTIMER_CVAL:
+ timer_set_next_cval_ms(HPHYSICAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(HPHYSICAL);
+ timer_set_ctl(HPHYSICAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_HPTIMER_TVAL:
+ timer_set_next_tval_ms(HPHYSICAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(HPHYSICAL);
+ timer_set_ctl(HPHYSICAL, CTL_ENABLE);
+ break;
default:
GUEST_ASSERT(0);
}
@@ -75,6 +101,16 @@ static void guest_validate_irq(unsigned int intid,
accessor = PHYSICAL;
timer_irq = ptimer_irq;
break;
+ case GUEST_STAGE_HVTIMER_CVAL:
+ case GUEST_STAGE_HVTIMER_TVAL:
+ accessor = HVIRTUAL;
+ timer_irq = hvtimer_irq;
+ break;
+ case GUEST_STAGE_HPTIMER_CVAL:
+ case GUEST_STAGE_HPTIMER_TVAL:
+ accessor = HPHYSICAL;
+ timer_irq = hptimer_irq;
+ break;
default:
GUEST_ASSERT(0);
return;
@@ -142,38 +178,79 @@ static void guest_code(void)
{
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+ bool is_nested = false;
+ enum arch_timer vtimer, ptimer;
+ int vtmr_irq, ptmr_irq;
+ enum guest_stage stage_vtimer_cval, stage_vtimer_tval;
+ enum guest_stage stage_ptimer_cval, stage_ptimer_tval;
- local_irq_disable();
+ if (read_sysreg(CurrentEL) == CurrentEL_EL2)
+ is_nested = true;
+ local_irq_disable();
gic_init(GIC_V3, test_args.nr_vcpus);
- timer_set_ctl(VIRTUAL, CTL_IMASK);
- timer_set_ctl(PHYSICAL, CTL_IMASK);
+ if (is_nested) {
+
+ vtimer = HVIRTUAL;
+ ptimer = HPHYSICAL;
+ vtmr_irq = hvtimer_irq;
+ ptmr_irq = hptimer_irq;
+ stage_vtimer_cval = GUEST_STAGE_HVTIMER_CVAL;
+ stage_vtimer_tval = GUEST_STAGE_HVTIMER_TVAL;
+ stage_ptimer_cval = GUEST_STAGE_HPTIMER_CVAL;
+ stage_ptimer_tval = GUEST_STAGE_HPTIMER_TVAL;
+ } else {
+ vtimer = VIRTUAL;
+ ptimer = PHYSICAL;
+ vtmr_irq = vtimer_irq;
+ ptmr_irq = ptimer_irq;
+ stage_vtimer_cval = GUEST_STAGE_VTIMER_CVAL;
+ stage_vtimer_tval = GUEST_STAGE_VTIMER_TVAL;
+ stage_ptimer_cval = GUEST_STAGE_PTIMER_CVAL;
+ stage_ptimer_tval = GUEST_STAGE_PTIMER_TVAL;
+ }
+
+ timer_set_ctl(vtimer, CTL_IMASK);
+ timer_set_ctl(ptimer, CTL_IMASK);
+ gic_irq_enable(vtmr_irq);
+ gic_irq_enable(ptmr_irq);
- gic_irq_enable(vtimer_irq);
- gic_irq_enable(ptimer_irq);
local_irq_enable();
- guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
- guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
- guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
- guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
+ guest_run_stage(shared_data, stage_vtimer_cval);
+ guest_run_stage(shared_data, stage_vtimer_tval);
+ guest_run_stage(shared_data, stage_ptimer_cval);
+ guest_run_stage(shared_data, stage_ptimer_tval);
GUEST_DONE();
}
static void test_init_timer_irq(struct kvm_vm *vm)
{
- /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
- sync_global_to_guest(vm, ptimer_irq);
- sync_global_to_guest(vm, vtimer_irq);
-
- pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
+ /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
+ if (is_vcpu_nested(vcpus[0])) {
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_HPTIMER, &hptimer_irq);
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_HVTIMER, &hvtimer_irq);
+
+ sync_global_to_guest(vm, hptimer_irq);
+ sync_global_to_guest(vm, hvtimer_irq);
+
+ pr_debug("hptimer_irq: %d; hvtimer_irq: %d\n", hptimer_irq, hvtimer_irq);
+ } else {
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
+
+ sync_global_to_guest(vm, ptimer_irq);
+ sync_global_to_guest(vm, vtimer_irq);
+
+ pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
+ }
}
static int gic_fd;
@@ -184,7 +261,10 @@ struct kvm_vm *test_vm_create(void)
unsigned int i;
int nr_vcpus = test_args.nr_vcpus;
- vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
+ if (test_args.is_nested)
+ vm = nv_vm_create_with_vcpus_gic(nr_vcpus, vcpus, NULL, guest_code);
+ else
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
index bf461de34785..82ba908fba8b 100644
--- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
@@ -11,6 +11,8 @@
enum arch_timer {
VIRTUAL,
PHYSICAL,
+ HVIRTUAL,
+ HPHYSICAL,
};
#define CTL_ENABLE (1 << 0)
@@ -37,8 +39,10 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
return read_sysreg(cntvct_el0);
case PHYSICAL:
+ case HPHYSICAL:
return read_sysreg(cntpct_el0);
default:
GUEST_FAIL("Unexpected timer type = %u", timer);
@@ -52,9 +56,11 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
{
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
write_sysreg(cval, cntv_cval_el0);
break;
case PHYSICAL:
+ case HPHYSICAL:
write_sysreg(cval, cntp_cval_el0);
break;
default:
@@ -68,8 +74,10 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
return read_sysreg(cntv_cval_el0);
case PHYSICAL:
+ case HPHYSICAL:
return read_sysreg(cntp_cval_el0);
default:
GUEST_FAIL("Unexpected timer type = %u", timer);
@@ -83,9 +91,11 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
{
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
write_sysreg(tval, cntv_tval_el0);
break;
case PHYSICAL:
+ case HPHYSICAL:
write_sysreg(tval, cntp_tval_el0);
break;
default:
@@ -100,8 +110,10 @@ static inline int32_t timer_get_tval(enum arch_timer timer)
isb();
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
return read_sysreg(cntv_tval_el0);
case PHYSICAL:
+ case HPHYSICAL:
return read_sysreg(cntp_tval_el0);
default:
GUEST_FAIL("Could not get timer %d\n", timer);
@@ -115,9 +127,11 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
{
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
write_sysreg(ctl, cntv_ctl_el0);
break;
case PHYSICAL:
+ case HPHYSICAL:
write_sysreg(ctl, cntp_ctl_el0);
break;
default:
@@ -131,8 +145,10 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
{
switch (timer) {
case VIRTUAL:
+ case HVIRTUAL:
return read_sysreg(cntv_ctl_el0);
case PHYSICAL:
+ case HPHYSICAL:
return read_sysreg(cntp_ctl_el0);
default:
GUEST_FAIL("Unexpected timer type = %u", timer);
diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h
index 9b6edaafe6d4..95f61c4a8a80 100644
--- a/tools/testing/selftests/kvm/include/timer_test.h
+++ b/tools/testing/selftests/kvm/include/timer_test.h
@@ -26,6 +26,7 @@ struct test_args {
/* Members of struct kvm_arm_counter_offset */
uint64_t counter_offset;
uint64_t reserved;
+ bool is_nested;
};
/* Shared variables between host and guest */
--
2.48.1
More information about the linux-arm-kernel
mailing list