[PATCH v5 58/69] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation

Ganapatrao Kulkarni gankulkarni at os.amperecomputing.com
Tue Jan 18 03:35:48 PST 2022



On 30-11-2021 01:31 am, Marc Zyngier wrote:
> Support guest-provided information information to find out about

Typo: information written twice.

> the range of required invalidation.
> 
> Signed-off-by: Marc Zyngier <maz at kernel.org>
> ---
>   arch/arm64/include/asm/kvm_nested.h |  1 +
>   arch/arm64/kvm/nested.c             | 57 +++++++++++++++++++++
>   arch/arm64/kvm/sys_regs.c           | 78 ++++++++++++++++++-----------
>   3 files changed, 108 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 5fa3c634c8e1..7c47ad655e2e 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -123,6 +123,7 @@ extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
>   			    u64 control_bit);
>   extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
>   extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
> +unsigned int ttl_to_size(u8 ttl);
>   
>   struct sys_reg_params;
>   struct sys_reg_desc;
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 198169648c3c..6f738b5f57dd 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -363,6 +363,63 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>   	return ret;
>   }
>   
> +
> +unsigned int ttl_to_size(u8 ttl)
> +{
> +	int level = ttl & 3;
> +	int gran = (ttl >> 2) & 3;
> +	unsigned int max_size = 0;
> +
> +	switch (gran) {
> +	case TLBI_TTL_TG_4K:
> +		switch (level) {
> +		case 0:
			/* No 52bit IPA support */
> +			break;
> +		case 1:
> +			max_size = SZ_1G;
> +			break;
> +		case 2:
> +			max_size = SZ_2M;
> +			break;
> +		case 3:
> +			max_size = SZ_4K;
> +			break;
> +		}
> +		break;
> +	case TLBI_TTL_TG_16K:
> +		switch (level) {
> +		case 0:
> +		case 1:
			/* No 52bit IPA support */
> +			break;
> +		case 2:
> +			max_size = SZ_32M;
> +			break;
> +		case 3:
> +			max_size = SZ_16K;
> +			break;
> +		}
> +		break;
> +	case TLBI_TTL_TG_64K:
> +		switch (level) {
> +		case 0:
> +		case 1:
> +			/* No 52bit IPA support */
> +			break;
> +		case 2:
> +			max_size = SZ_512M;
> +			break;
> +		case 3:
> +			max_size = SZ_64K;
> +			break;
> +		}
> +		break;
> +	default:			/* No size information */
> +		break;
> +	}
> +
> +	return max_size;
> +}
> +
>   /* Must be called with kvm->lock held */
>   struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
>   {
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 55f3e94c24f1..e0f088de2cad 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -2699,59 +2699,81 @@ static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>   	return true;
>   }
>   
> +static unsigned long compute_tlb_inval_range(struct kvm_vcpu *vcpu,
> +					     struct kvm_s2_mmu *mmu,
> +					     u64 val)
> +{
> +	unsigned long max_size;
> +	u8 ttl = 0;
> +
> +	if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL)) {
> +		ttl = FIELD_GET(GENMASK_ULL(47, 44), val);
> +	}
> +
> +	max_size = ttl_to_size(ttl);
> +
> +	if (!max_size) {
> +		u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
> +
> +		/* Compute the maximum extent of the invalidation */
> +		switch ((vtcr & VTCR_EL2_TG0_MASK)) {
> +		case VTCR_EL2_TG0_4K:
> +			max_size = SZ_1G;
> +			break;
> +		case VTCR_EL2_TG0_16K:
> +			max_size = SZ_32M;
> +			break;
> +		case VTCR_EL2_TG0_64K:
> +			/*
> +			 * No, we do not support 52bit IPA in nested yet. Once
> +			 * we do, this should be 4TB.
> +			 */
> +			/* FIXME: remove the 52bit PA support from the IDregs */
> +			max_size = SZ_512M;
> +			break;
> +		default:
> +			BUG();
> +		}
> +	}
> +
> +	WARN_ON(!max_size);
> +	return max_size;
> +}
> +
>   static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>   			     const struct sys_reg_desc *r)
>   {
>   	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
> -	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
>   	struct kvm_s2_mmu *mmu;
>   	u64 base_addr;
> -	int max_size;
> +	unsigned long max_size;
>   
>   	/*
>   	 * We drop a number of things from the supplied value:
>   	 *
>   	 * - NS bit: we're non-secure only.
>   	 *
> -	 * - TTL field: We already have the granule size from the
> -	 *   VTCR_EL2.TG0 field, and the level is only relevant to the
> -	 *   guest's S2PT.
> -	 *
>   	 * - IPA[51:48]: We don't support 52bit IPA just yet...
>   	 *
>   	 * And of course, adjust the IPA to be on an actual address.
>   	 */
>   	base_addr = (p->regval & GENMASK_ULL(35, 0)) << 12;
>   
> -	/* Compute the maximum extent of the invalidation */
> -	switch ((vtcr & VTCR_EL2_TG0_MASK)) {
> -	case VTCR_EL2_TG0_4K:
> -		max_size = SZ_1G;
> -		break;
> -	case VTCR_EL2_TG0_16K:
> -		max_size = SZ_32M;
> -		break;
> -	case VTCR_EL2_TG0_64K:
> -		/*
> -		 * No, we do not support 52bit IPA in nested yet. Once
> -		 * we do, this should be 4TB.
> -		 */
> -		/* FIXME: remove the 52bit PA support from the IDregs */
> -		max_size = SZ_512M;
> -		break;
> -	default:
> -		BUG();
> -	}
> -
>   	spin_lock(&vcpu->kvm->mmu_lock);
>   
>   	mmu = lookup_s2_mmu(vcpu->kvm, vttbr, HCR_VM);
> -	if (mmu)
> +	if (mmu) {
> +		max_size = compute_tlb_inval_range(vcpu, mmu, p->regval);
> +		base_addr &= ~(max_size - 1);
>   		kvm_unmap_stage2_range(mmu, base_addr, max_size);
> +	}
>   
>   	mmu = lookup_s2_mmu(vcpu->kvm, vttbr, 0);
> -	if (mmu)
> +	if (mmu) {
> +		max_size = compute_tlb_inval_range(vcpu, mmu, p->regval);
> +		base_addr &= ~(max_size - 1);
>   		kvm_unmap_stage2_range(mmu, base_addr, max_size);
> +	}
>   
>   	spin_unlock(&vcpu->kvm->mmu_lock);
>   

It looks good to me, please feel free to add.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni at os.amperecomputing.com>

Thanks,
Ganapat



More information about the linux-arm-kernel mailing list