[PATCH 3/5] RISC-V: KVM: Support lazy enabling of siselect and aia bits
Atish Patra
atishp at rivosinc.com
Mon May 5 14:39:28 PDT 2025
Smstateen extension controls the SISELECT and SIPH/SIEH register
through hstateen.AIA bit (58). Add lazy enabling support for those
bits.
Signed-off-by: Atish Patra <atishp at rivosinc.com>
---
arch/riscv/include/asm/kvm_aia.h | 13 ++++++++++++-
arch/riscv/kvm/aia.c | 34 ++++++++++++++++++++++++++++++++++
arch/riscv/kvm/vcpu_insn.c | 3 +++
3 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 760a1aef09f7..9e39b0e15169 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -142,12 +142,23 @@ int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
unsigned long *val,
unsigned long new_val,
unsigned long wr_mask);
+int kvm_riscv_vcpu_aia_hstateen_enable(struct kvm_vcpu *vcpu,
+ unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask);
+int kvm_riscv_vcpu_aia_rmw_isel(struct kvm_vcpu *vcpu, unsigned int csr_num, unsigned long *val,
+ unsigned long new_val, unsigned long wr_mask);
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask);
#define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
-{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
+{ .base = CSR_SISELECT, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_isel }, \
+{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei }, \
+{ .base = CSR_STOPI, .count = 1, .func = kvm_riscv_vcpu_aia_hstateen_enable }, \
+
+#define KVM_RISCV_VCPU_AIA_CSR_32BIT_FUNCS \
+{ .base = CSR_SIPH, .count = 1, .func = kvm_riscv_vcpu_aia_hstateen_enable }, \
+{ .base = CSR_SIEH, .count = 1, .func = kvm_riscv_vcpu_aia_hstateen_enable }, \
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 1e0d2217ade7..3dfabf51a4d2 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -235,6 +235,40 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_riscv_vcpu_aia_hstateen_enable(struct kvm_vcpu *vcpu,
+ unsigned int csr_num,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask)
+{
+ /* If AIA not available then redirect trap */
+ if (!kvm_riscv_aia_available())
+ return KVM_INSN_ILLEGAL_TRAP;
+
+ /* If AIA not initialized then forward to user space */
+ if (!kvm_riscv_aia_initialized(vcpu->kvm))
+ return KVM_INSN_EXIT_TO_USER_SPACE;
+
+ return kvm_riscv_vcpu_hstateen_lazy_enable(vcpu, csr_num, SMSTATEEN0_AIA);
+}
+
+int kvm_riscv_vcpu_aia_rmw_isel(struct kvm_vcpu *vcpu,
+ unsigned int csr_num,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask)
+{
+ /* If AIA not available then redirect trap */
+ if (!kvm_riscv_aia_available())
+ return KVM_INSN_ILLEGAL_TRAP;
+
+ /* If AIA not initialized then forward to user space */
+ if (!kvm_riscv_aia_initialized(vcpu->kvm))
+ return KVM_INSN_EXIT_TO_USER_SPACE;
+
+ return kvm_riscv_vcpu_hstateen_lazy_enable(vcpu, csr_num, SMSTATEEN0_AIA_ISEL);
+}
+
int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
unsigned int csr_num,
unsigned long *val,
diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c
index 3bc39572b79d..c46907bfe42f 100644
--- a/arch/riscv/kvm/vcpu_insn.c
+++ b/arch/riscv/kvm/vcpu_insn.c
@@ -260,6 +260,9 @@ static const struct csr_func csr_funcs[] = {
KVM_RISCV_VCPU_AIA_CSR_FUNCS
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
+#ifdef CONFIG_32BIT
+ KVM_RISCV_VCPU_AIA_CSR_32BIT_FUNCS
+#endif
};
/**
--
2.43.0
More information about the kvm-riscv
mailing list