[PATCH v3] mm,kfence: decouple kfence from page granularity mapping judgement

Kefeng Wang wangkefeng.wang at huawei.com
Thu Mar 9 18:56:16 PST 2023


Hi Zhenhua,

On 2023/3/10 10:02, Zhenhua Huang wrote:
> Kfence only needs its pool to be mapped as page granularity, previous
> judgement was a bit over protected. Decouple it from judgement and do
> page granularity mapping for kfence pool only [1].
> 
> To implement this, also relocate the kfence pool allocation before the
> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
> addr, __kfence_pool is to be set after linear mapping set up.
> 
We do the same way in our 5.10 kernel, a minor comment below,

> LINK: [1] https://lore.kernel.org/linux-arm-kernel/1675750519-1064-1-git-send-email-quic_zhenhuah@quicinc.com/T/
> Suggested-by: Mark Rutland <mark.rutland at arm.com>
> Signed-off-by: Zhenhua Huang <quic_zhenhuah at quicinc.com>
> ---
>   arch/arm64/mm/mmu.c      | 44 ++++++++++++++++++++++++++++++++++++++++++++
>   arch/arm64/mm/pageattr.c |  5 ++---
>   include/linux/kfence.h   |  8 ++++++++
>   mm/kfence/core.c         |  9 +++++++++
>   4 files changed, 63 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6f9d889..9f06a29e 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -24,6 +24,7 @@
>   #include <linux/mm.h>
>   #include <linux/vmalloc.h>
>   #include <linux/set_memory.h>
> +#include <linux/kfence.h>
>   
>   #include <asm/barrier.h>
>   #include <asm/cputype.h>
> @@ -525,6 +526,33 @@ static int __init enable_crash_mem_map(char *arg)
>   }
>   early_param("crashkernel", enable_crash_mem_map);
>   
> +#ifdef CONFIG_KFENCE
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> +	phys_addr_t kfence_pool = 0;

The kfence_pool is no need to be initialized.

> +
> +	if (!kfence_sample_interval)
> +		return (phys_addr_t)NULL;

And one more missing case, kfence support late int, see commit
b33f778bba5e ("kfence: alloc kfence_pool after system startup"),
this changes will break this feature, we add a new cmdline to alloc
kfence_pool regardless of kfence_sample_interval value, maybe there some
other way to deal with this issue.

> +
> +	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
> +	if (!kfence_pool) {
> +		pr_err("failed to allocate kfence pool\n");
> +		return (phys_addr_t)NULL;

no need this return;

> +	}

> +
> +	return kfence_pool;
> +}
> +
> +#else
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> +	return (phys_addr_t)NULL;
> +}
> +
> +#endif
> +

I like all of '(phys_addr_t)NULL' to 0

>   static void __init map_mem(pgd_t *pgdp)
>   {
>   	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
> @@ -532,6 +560,7 @@ static void __init map_mem(pgd_t *pgdp)
>   	phys_addr_t kernel_end = __pa_symbol(__init_begin);
>   	phys_addr_t start, end;
>   	int flags = NO_EXEC_MAPPINGS;
> +	phys_addr_t kfence_pool = 0;

it's no need to be initialized too.

>   	u64 i;
>   
>   	/*
> @@ -564,6 +593,10 @@ static void __init map_mem(pgd_t *pgdp)
>   	}
>   #endif
>   
> +	kfence_pool = arm64_kfence_alloc_pool();
> +	if (kfence_pool)
> +		memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
> +
>   	/* map all the memory banks */
>   	for_each_mem_range(i, &start, &end) {
>   		if (start >= end)
> @@ -608,6 +641,17 @@ static void __init map_mem(pgd_t *pgdp)
>   		}
>   	}
>   #endif
> +
> +	/* Kfence pool needs page-level mapping */
> +	if (kfence_pool) {
> +		__map_memblock(pgdp, kfence_pool,
> +			kfence_pool + KFENCE_POOL_SIZE,
> +			pgprot_tagged(PAGE_KERNEL),
> +			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> +		memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
> +		/* kfence_pool really mapped now */
> +		kfence_set_pool(kfence_pool);
> +	}
>   }
>   




More information about the linux-arm-kernel mailing list