[PATCH v4 04/12] KVM: arm64: Add ARM64_HAS_LPA2 CPU capability

Ryan Roberts ryan.roberts at arm.com
Mon Nov 13 03:57:45 PST 2023


On 20/10/2023 09:16, Marc Zyngier wrote:
> On Mon, 09 Oct 2023 19:50:00 +0100,
> Ryan Roberts <ryan.roberts at arm.com> wrote:
>>
>> Expose FEAT_LPA2 as a capability so that we can take advantage of
>> alternatives patching in both the kernel and hypervisor.
>>
>> Although FEAT_LPA2 presence is advertised separately for stage1 and
>> stage2, the expectation is that in practice both stages will either
>> support or not support it. Therefore, for the case where KVM is present,
>> we combine both into a single capability, allowing us to simplify the
>> implementation. For the case where KVM is not present, we only care
>> about stage1.
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts at arm.com>
>> ---
>>  arch/arm64/include/asm/cpufeature.h |  5 ++++
>>  arch/arm64/kernel/cpufeature.c      | 46 +++++++++++++++++++++++++++++
>>  arch/arm64/tools/cpucaps            |  1 +
>>  3 files changed, 52 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
>> index 5bba39376055..b1292ec88538 100644
>> --- a/arch/arm64/include/asm/cpufeature.h
>> +++ b/arch/arm64/include/asm/cpufeature.h
>> @@ -831,6 +831,11 @@ static inline bool system_supports_tlb_range(void)
>>  		cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
>>  }
>>  
>> +static inline bool system_supports_lpa2(void)
>> +{
>> +	return cpus_have_const_cap(ARM64_HAS_LPA2);
> 
> cpus_have_const_cap() is going away. You may want to look at Mark's
> series to see how to replace this one.
> 
>> +}
>> +
>>  int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
>>  bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
>>  
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index 444a73c2e638..1ccb1fe0e310 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -1746,6 +1746,46 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>>  	return !meltdown_safe;
>>  }
>>  
>> +static inline bool has_lpa2_at_stage1(u64 mmfr0)
> 
> Why inline? It isn't like this has any performance implication...
> 
>> +{
>> +#if defined(CONFIG_ARM64_4K_PAGES) || defined(CONFIG_ARM64_16K_PAGES)
>> +	unsigned int tgran;
>> +
>> +	tgran = cpuid_feature_extract_unsigned_field(mmfr0,
>> +						ID_AA64MMFR0_EL1_TGRAN_SHIFT);
>> +	return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
>> +#else
>> +	return false;
>> +#endif
> 
> Writing this using IS_ENABLED() would be slightly more pleasing to my
> tired eyes... ;-)

Unfortunately this doesn't work because ID_AA64MMFR0_EL1_TGRAN_LPA2 is only
defined for 4K and 16K configs (there is no field ofr 64K). So proposing to do
it this way instead. Please shout if you have a better idea:

#if defined(CONFIG_ARM64_4K_PAGES) || defined(CONFIG_ARM64_16K_PAGES)
static bool has_lpa2_at_stage1(u64 mmfr0)
{
	unsigned int tgran;

	tgran = cpuid_feature_extract_unsigned_field(mmfr0,
					ID_AA64MMFR0_EL1_TGRAN_SHIFT);
	return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
}

static bool has_lpa2_at_stage2(u64 mmfr0)
{
	unsigned int tgran;

	tgran = cpuid_feature_extract_unsigned_field(mmfr0,
					ID_AA64MMFR0_EL1_TGRAN_2_SHIFT);
	return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2;
}

static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
{
	u64 mmfr0;

	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
	return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0);
}
#else
static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
{
	return false;
}
#endif




More information about the linux-arm-kernel mailing list