[PATCH v6 23/43] KVM: arm64: Handle Realm PSCI requests
Gavin Shan
gshan at redhat.com
Sat Feb 1 18:06:44 PST 2025
On 12/13/24 1:55 AM, Steven Price wrote:
> The RMM needs to be informed of the target REC when a PSCI call is made
> with an MPIDR argument. Expose an ioctl to the userspace in case the PSCI
> is handled by it.
>
> Co-developed-by: Suzuki K Poulose <suzuki.poulose at arm.com>
> Signed-off-by: Suzuki K Poulose <suzuki.poulose at arm.com>
> Signed-off-by: Steven Price <steven.price at arm.com>
> ---
> arch/arm64/include/asm/kvm_rme.h | 3 +++
> arch/arm64/kvm/arm.c | 25 +++++++++++++++++++++++++
> arch/arm64/kvm/psci.c | 29 +++++++++++++++++++++++++++++
> arch/arm64/kvm/rme.c | 15 +++++++++++++++
> 4 files changed, 72 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
> index 158f77e24a26..90a4537ad38d 100644
> --- a/arch/arm64/include/asm/kvm_rme.h
> +++ b/arch/arm64/include/asm/kvm_rme.h
> @@ -113,6 +113,9 @@ int realm_set_ipa_state(struct kvm_vcpu *vcpu,
> unsigned long addr, unsigned long end,
> unsigned long ripas,
> unsigned long *top_ipa);
> +int realm_psci_complete(struct kvm_vcpu *calling,
> + struct kvm_vcpu *target,
> + unsigned long status);
>
The 'calling' may be renamed to 'source', consistent to the names in arch/arm64/kvm/psci.c.
Other nitpicks can be found below.
> #define RMM_RTT_BLOCK_LEVEL 2
> #define RMM_RTT_MAX_LEVEL 3
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index f588b528c3f9..eff1a4ec892b 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1745,6 +1745,22 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
> return __kvm_arm_vcpu_set_events(vcpu, events);
> }
>
> +static int kvm_arm_vcpu_rmm_psci_complete(struct kvm_vcpu *vcpu,
> + struct kvm_arm_rmm_psci_complete *arg)
> +{
> + struct kvm_vcpu *target = kvm_mpidr_to_vcpu(vcpu->kvm, arg->target_mpidr);
> +
> + if (!target)
> + return -EINVAL;
> +
> + /*
> + * RMM v1.0 only supports PSCI_RET_SUCCESS or PSCI_RET_DENIED
> + * for the status. But, let us leave it to the RMM to filter
> + * for making this future proof.
> + */
> + return realm_psci_complete(vcpu, target, arg->psci_status);
> +}
> +
> long kvm_arch_vcpu_ioctl(struct file *filp,
> unsigned int ioctl, unsigned long arg)
> {
> @@ -1867,6 +1883,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
>
> return kvm_arm_vcpu_finalize(vcpu, what);
> }
> + case KVM_ARM_VCPU_RMM_PSCI_COMPLETE: {
> + struct kvm_arm_rmm_psci_complete req;
> +
> + if (!kvm_is_realm(vcpu->kvm))
> + return -EINVAL;
We probably need to check with vcpu_is_rec(). -EPERM seems more precise
than -EINVAL.
> + if (copy_from_user(&req, argp, sizeof(req)))
> + return -EFAULT;
> + return kvm_arm_vcpu_rmm_psci_complete(vcpu, &req);
> + }
> default:
> r = -EINVAL;
> }
> diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> index 3b5dbe9a0a0e..9dc161abc30c 100644
> --- a/arch/arm64/kvm/psci.c
> +++ b/arch/arm64/kvm/psci.c
> @@ -103,6 +103,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
>
> reset_state->reset = true;
> kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> + /*
> + * Make sure we issue PSCI_COMPLETE before the VCPU can be
> + * scheduled.
> + */
> + if (vcpu_is_rec(vcpu))
> + realm_psci_complete(source_vcpu, vcpu, PSCI_RET_SUCCESS);
>
> /*
> * Make sure the reset request is observed if the RUNNABLE mp_state is
> @@ -115,6 +121,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
>
> out_unlock:
> spin_unlock(&vcpu->arch.mp_state_lock);
> + if (vcpu_is_rec(vcpu) && ret != PSCI_RET_SUCCESS)
> + realm_psci_complete(source_vcpu, vcpu,
> + ret == PSCI_RET_ALREADY_ON ?
> + PSCI_RET_SUCCESS : PSCI_RET_DENIED);
{} is needed here.
> return ret;
> }
>
> @@ -142,6 +152,25 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
> /* Ignore other bits of target affinity */
> target_affinity &= target_affinity_mask;
>
> + if (vcpu_is_rec(vcpu)) {
> + struct kvm_vcpu *target_vcpu;
> +
> + /* RMM supports only zero affinity level */
> + if (lowest_affinity_level != 0)
> + return PSCI_RET_INVALID_PARAMS;
> +
> + target_vcpu = kvm_mpidr_to_vcpu(kvm, target_affinity);
> + if (!target_vcpu)
> + return PSCI_RET_INVALID_PARAMS;
> +
> + /*
> + * Provide the references of running and target RECs to the RMM
^^^^^^^
the source
> + * so that the RMM can complete the PSCI request.
> + */
> + realm_psci_complete(vcpu, target_vcpu, PSCI_RET_SUCCESS);
> + return PSCI_RET_SUCCESS;
> + }
> +
> /*
> * If one or more VCPU matching target affinity are running
> * then ON else OFF
> diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
> index 146ef598a581..5831d379760a 100644
> --- a/arch/arm64/kvm/rme.c
> +++ b/arch/arm64/kvm/rme.c
> @@ -118,6 +118,21 @@ static void free_delegated_granule(phys_addr_t phys)
> free_page((unsigned long)phys_to_virt(phys));
> }
>
> +int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target,
> + unsigned long status)
> +{
> + int ret;
> +
> + ret = rmi_psci_complete(virt_to_phys(calling->arch.rec.rec_page),
> + virt_to_phys(target->arch.rec.rec_page),
> + status);
> +
^^^^^
Unnecessary blank line.
> + if (ret)
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> static int realm_rtt_create(struct realm *realm,
> unsigned long addr,
> int level,
Thanks,
Gavin
More information about the linux-arm-kernel
mailing list