[PATCH mm v3 25/38] kasan, vmalloc, arm64: mark vmalloc mappings as pgprot_tagged
Catalin Marinas
catalin.marinas at arm.com
Tue Dec 14 09:11:17 PST 2021
On Mon, Dec 13, 2021 at 10:54:21PM +0100, andrey.konovalov at linux.dev wrote:
> diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
> index b9185503feae..3d35adf365bf 100644
> --- a/arch/arm64/include/asm/vmalloc.h
> +++ b/arch/arm64/include/asm/vmalloc.h
> @@ -25,4 +25,14 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
>
> #endif
>
> +#define arch_vmalloc_pgprot_modify arch_vmalloc_pgprot_modify
> +static inline pgprot_t arch_vmalloc_pgprot_modify(pgprot_t prot)
> +{
> + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&
> + (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)))
> + prot = pgprot_tagged(prot);
> +
> + return prot;
> +}
> +
> #endif /* _ASM_ARM64_VMALLOC_H */
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 28becb10d013..760caeedd749 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -115,6 +115,13 @@ static inline int arch_vmap_pte_supported_shift(unsigned long size)
> }
> #endif
>
> +#ifndef arch_vmalloc_pgprot_modify
> +static inline pgprot_t arch_vmalloc_pgprot_modify(pgprot_t prot)
> +{
> + return prot;
> +}
> +#endif
> +
> /*
> * Highlevel APIs for driver use
> */
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 837ed355bfc6..58bd2f7f86d7 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3060,6 +3060,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> return NULL;
> }
>
> + prot = arch_vmalloc_pgprot_modify(prot);
> +
> if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
> unsigned long size_per_node;
I wonder whether we could fix the prot bits in the caller instead and we
won't need to worry about the exec or the module_alloc() case. Something
like:
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d2a00ad4e1dd..4e8c61255b92 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3112,7 +3112,7 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
- gfp_mask, PAGE_KERNEL, 0, node, caller);
+ gfp_mask, pgprot_hwasan(PAGE_KERNEL), 0, node, caller);
}
/*
* This is only for performance analysis of vmalloc and stress purpose.
@@ -3161,7 +3161,7 @@ EXPORT_SYMBOL(vmalloc);
void *vmalloc_no_huge(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+ GFP_KERNEL, pgprot_hwasan(PAGE_KERNEL), VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_no_huge);
with pgprot_hwasan() defined to pgprot_tagged() only if KASAN_HW_TAGS is
enabled.
--
Catalin
More information about the linux-arm-kernel
mailing list