[PATCH v2 3/3] ARM: Support KFENCE for ARM

Alexander Potapenko glider at google.com
Wed Nov 3 09:22:10 PDT 2021


On Wed, Nov 3, 2021 at 2:26 PM Kefeng Wang <wangkefeng.wang at huawei.com> wrote:
>
> Add architecture specific implementation details for KFENCE and enable
> KFENCE on ARM. In particular, this implements the required interface in
>  <asm/kfence.h>.
>
> KFENCE requires that attributes for pages from its memory pool can
> individually be set. Therefore, force the kfence pool to be mapped
> at page granularity.
>
> Testing this patch using the testcases in kfence_test.c and all passed
> with or without ARM_LPAE.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang at huawei.com>
> ---
>  arch/arm/Kconfig              |  1 +
>  arch/arm/include/asm/kfence.h | 53 +++++++++++++++++++++++++++++++++++
>  arch/arm/mm/fault.c           | 19 ++++++++-----
>  3 files changed, 66 insertions(+), 7 deletions(-)
>  create mode 100644 arch/arm/include/asm/kfence.h
>
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index b9f72337224c..6d1f6f48995c 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -69,6 +69,7 @@ config ARM
>         select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
>         select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
>         select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
> +       select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
>         select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
>         select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
>         select HAVE_ARCH_MMAP_RND_BITS if MMU
> diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
> new file mode 100644
> index 000000000000..7980d0f2271f
> --- /dev/null
> +++ b/arch/arm/include/asm/kfence.h
> @@ -0,0 +1,53 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __ASM_ARM_KFENCE_H
> +#define __ASM_ARM_KFENCE_H
> +
> +#include <linux/kfence.h>
> +
> +#include <asm/pgalloc.h>
> +#include <asm/set_memory.h>
> +
> +static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
> +{
> +       int i;
> +       unsigned long pfn = PFN_DOWN(__pa(addr));
> +       pte_t *pte = pte_alloc_one_kernel(&init_mm);
> +
> +       if (!pte)
> +               return -ENOMEM;
> +
> +       for (i = 0; i < PTRS_PER_PTE; i++)
> +               set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
> +       pmd_populate_kernel(&init_mm, pmd, pte);
> +
> +       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
> +       return 0;
> +}
> +
> +static inline bool arch_kfence_init_pool(void)
> +{
> +       unsigned long addr;
> +       pmd_t *pmd;
> +
> +       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
> +            addr += PAGE_SIZE) {
> +               pmd = pmd_off_k(addr);
> +
> +               if (pmd_leaf(*pmd)) {
> +                       if (split_pmd_page(pmd, addr & PMD_MASK))
> +                               return false;
> +               }
> +       }
> +
> +       return true;
> +}
> +
> +static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +{
> +       set_memory_valid(addr, 1, !protect);
> +
> +       return true;
> +}
> +
> +#endif /* __ASM_ARM_KFENCE_H */
> diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
> index f7ab6dabe89f..49148b675b43 100644
> --- a/arch/arm/mm/fault.c
> +++ b/arch/arm/mm/fault.c
> @@ -17,6 +17,7 @@
>  #include <linux/sched/debug.h>
>  #include <linux/highmem.h>
>  #include <linux/perf_event.h>
> +#include <linux/kfence.h>
>
>  #include <asm/system_misc.h>
>  #include <asm/system_info.h>
> @@ -99,6 +100,11 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
>  { }
>  #endif                                 /* CONFIG_MMU */
>
> +static inline bool is_write_fault(unsigned int fsr)
> +{
> +       return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
> +}

Please don't increase the diff by moving the code around. Consider
putting is_write_fault() in the right place in "ARM: mm: Provide
is_write_fault()" instead.



More information about the linux-arm-kernel mailing list