[PATCH 2/5] KVM: RISC-V: refactor sbi reset request

Anup Patel anup at brainfault.org
Wed May 7 05:01:33 PDT 2025


On Thu, Apr 3, 2025 at 5:02 PM Radim Krčmář <rkrcmar at ventanamicro.com> wrote:
>
> The same code is used twice and SBI reset sets only two variables.
>
> Signed-off-by: Radim Krčmář <rkrcmar at ventanamicro.com>
> ---
>  arch/riscv/include/asm/kvm_vcpu_sbi.h |  2 ++
>  arch/riscv/kvm/vcpu_sbi.c             | 12 ++++++++++++
>  arch/riscv/kvm/vcpu_sbi_hsm.c         | 13 +------------
>  arch/riscv/kvm/vcpu_sbi_system.c      | 10 +---------
>  4 files changed, 16 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
> index 4ed6203cdd30..aaaa81355276 100644
> --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
> +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
> @@ -55,6 +55,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
>  void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
>                                      struct kvm_run *run,
>                                      u32 type, u64 flags);
> +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
> +                                      unsigned long pc, unsigned long a1);

Use tabs for alignment instead of spaces.

>  int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
>  int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
>                                    const struct kvm_one_reg *reg);
> diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
> index d1c83a77735e..f58368f7df1d 100644
> --- a/arch/riscv/kvm/vcpu_sbi.c
> +++ b/arch/riscv/kvm/vcpu_sbi.c
> @@ -156,6 +156,18 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
>         run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
>  }
>
> +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
> +                                      unsigned long pc, unsigned long a1)
> +{
> +       spin_lock(&vcpu->arch.reset_cntx_lock);
> +       vcpu->arch.guest_reset_context.sepc = pc;
> +       vcpu->arch.guest_reset_context.a0 = vcpu->vcpu_id;
> +       vcpu->arch.guest_reset_context.a1 = a1;
> +       spin_unlock(&vcpu->arch.reset_cntx_lock);
> +
> +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> +}
> +
>  int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  {
>         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
> index 3070bb31745d..f26207f84bab 100644
> --- a/arch/riscv/kvm/vcpu_sbi_hsm.c
> +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
> @@ -15,7 +15,6 @@
>
>  static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
>  {
> -       struct kvm_cpu_context *reset_cntx;
>         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
>         struct kvm_vcpu *target_vcpu;
>         unsigned long target_vcpuid = cp->a0;
> @@ -32,17 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
>                 goto out;
>         }
>
> -       spin_lock(&target_vcpu->arch.reset_cntx_lock);
> -       reset_cntx = &target_vcpu->arch.guest_reset_context;
> -       /* start address */
> -       reset_cntx->sepc = cp->a1;
> -       /* target vcpu id to start */
> -       reset_cntx->a0 = target_vcpuid;
> -       /* private data passed from kernel */
> -       reset_cntx->a1 = cp->a2;
> -       spin_unlock(&target_vcpu->arch.reset_cntx_lock);
> -
> -       kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
> +       kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2);
>
>         __kvm_riscv_vcpu_power_on(target_vcpu);
>
> diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
> index bc0ebba89003..359be90b0fc5 100644
> --- a/arch/riscv/kvm/vcpu_sbi_system.c
> +++ b/arch/riscv/kvm/vcpu_sbi_system.c
> @@ -13,7 +13,6 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
>                                     struct kvm_vcpu_sbi_return *retdata)
>  {
>         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> -       struct kvm_cpu_context *reset_cntx;
>         unsigned long funcid = cp->a6;
>         unsigned long hva, i;
>         struct kvm_vcpu *tmp;
> @@ -45,14 +44,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
>                         }
>                 }
>
> -               spin_lock(&vcpu->arch.reset_cntx_lock);
> -               reset_cntx = &vcpu->arch.guest_reset_context;
> -               reset_cntx->sepc = cp->a1;
> -               reset_cntx->a0 = vcpu->vcpu_id;
> -               reset_cntx->a1 = cp->a2;
> -               spin_unlock(&vcpu->arch.reset_cntx_lock);
> -
> -               kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> +               kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);
>
>                 /* userspace provides the suspend implementation */
>                 kvm_riscv_vcpu_sbi_forward(vcpu, run);
> --
> 2.48.1
>

Otherwise, it looks good to me.
I have taken care of the above comment at the time
of merging this patch.

Queued this patch for Linux-6.16

Thanks,
Anup



More information about the kvm-riscv mailing list