[PATCH v3 4/4] arch, mm: make kernel_page_present() always available
David Hildenbrand
david at redhat.com
Mon Nov 2 04:28:14 EST 2020
On 01.11.20 18:08, Mike Rapoport wrote:
> From: Mike Rapoport <rppt at linux.ibm.com>
>
> For architectures that enable ARCH_HAS_SET_MEMORY having the ability to
> verify that a page is mapped in the kernel direct map can be useful
> regardless of hibernation.
>
> Add RISC-V implementation of kernel_page_present(), update its forward
> declarations and stubs to be a part of set_memory API and remove ugly
> ifdefery in inlcude/linux/mm.h around current declarations of
> kernel_page_present().
>
> Signed-off-by: Mike Rapoport <rppt at linux.ibm.com>
> ---
> arch/arm64/include/asm/cacheflush.h | 1 +
> arch/arm64/mm/pageattr.c | 4 +---
> arch/riscv/include/asm/set_memory.h | 1 +
> arch/riscv/mm/pageattr.c | 29 +++++++++++++++++++++++++++++
> arch/x86/include/asm/set_memory.h | 1 +
> arch/x86/mm/pat/set_memory.c | 4 +---
> include/linux/mm.h | 7 -------
> include/linux/set_memory.h | 5 +++++
> 8 files changed, 39 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
> index 9384fd8fc13c..45217f21f1fe 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -140,6 +140,7 @@ int set_memory_valid(unsigned long addr, int numpages, int enable);
>
> int set_direct_map_invalid_noflush(struct page *page);
> int set_direct_map_default_noflush(struct page *page);
> +bool kernel_page_present(struct page *page);
>
> #include <asm-generic/cacheflush.h>
>
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 439325532be1..92eccaf595c8 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -186,8 +186,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
>
> set_memory_valid((unsigned long)page_address(page), numpages, enable);
> }
> +#endif /* CONFIG_DEBUG_PAGEALLOC */
>
> -#ifdef CONFIG_HIBERNATION
> /*
> * This function is used to determine if a linear map page has been marked as
> * not-valid. Walk the page table and check the PTE_VALID bit. This is based
> @@ -234,5 +234,3 @@ bool kernel_page_present(struct page *page)
> ptep = pte_offset_kernel(pmdp, addr);
> return pte_valid(READ_ONCE(*ptep));
> }
> -#endif /* CONFIG_HIBERNATION */
> -#endif /* CONFIG_DEBUG_PAGEALLOC */
> diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
> index 4c5bae7ca01c..d690b08dff2a 100644
> --- a/arch/riscv/include/asm/set_memory.h
> +++ b/arch/riscv/include/asm/set_memory.h
> @@ -24,6 +24,7 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
>
> int set_direct_map_invalid_noflush(struct page *page);
> int set_direct_map_default_noflush(struct page *page);
> +bool kernel_page_present(struct page *page);
>
> #endif /* __ASSEMBLY__ */
>
> diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
> index 321b09d2e2ea..87ba5a68bbb8 100644
> --- a/arch/riscv/mm/pageattr.c
> +++ b/arch/riscv/mm/pageattr.c
> @@ -198,3 +198,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
> __pgprot(0), __pgprot(_PAGE_PRESENT));
> }
> #endif
> +
> +bool kernel_page_present(struct page *page)
> +{
> + unsigned long addr = (unsigned long)page_address(page);
> + pgd_t *pgd;
> + pud_t *pud;
> + p4d_t *p4d;
> + pmd_t *pmd;
> + pte_t *pte;
> +
> + pgd = pgd_offset_k(addr);
> + if (!pgd_present(*pgd))
> + return false;
> +
> + p4d = p4d_offset(pgd, addr);
> + if (!p4d_present(*p4d))
> + return false;
> +
> + pud = pud_offset(p4d, addr);
> + if (!pud_present(*pud))
> + return false;
> +
> + pmd = pmd_offset(pud, addr);
> + if (!pmd_present(*pmd))
> + return false;
> +
> + pte = pte_offset_kernel(pmd, addr);
> + return pte_present(*pte);
> +}
> diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
> index 5948218f35c5..4352f08bfbb5 100644
> --- a/arch/x86/include/asm/set_memory.h
> +++ b/arch/x86/include/asm/set_memory.h
> @@ -82,6 +82,7 @@ int set_pages_rw(struct page *page, int numpages);
>
> int set_direct_map_invalid_noflush(struct page *page);
> int set_direct_map_default_noflush(struct page *page);
> +bool kernel_page_present(struct page *page);
>
> extern int kernel_set_to_readonly;
>
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index bc9be96b777f..16f878c26667 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -2226,8 +2226,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
>
> arch_flush_lazy_mmu_mode();
> }
> +#endif /* CONFIG_DEBUG_PAGEALLOC */
>
> -#ifdef CONFIG_HIBERNATION
> bool kernel_page_present(struct page *page)
> {
> unsigned int level;
> @@ -2239,8 +2239,6 @@ bool kernel_page_present(struct page *page)
> pte = lookup_address((unsigned long)page_address(page), &level);
> return (pte_val(*pte) & _PAGE_PRESENT);
> }
> -#endif /* CONFIG_HIBERNATION */
> -#endif /* CONFIG_DEBUG_PAGEALLOC */
>
> int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
> unsigned numpages, unsigned long page_flags)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index ab0ef6bd351d..44b82f22e76a 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2937,16 +2937,9 @@ static inline void debug_pagealloc_map_pages(struct page *page,
> if (debug_pagealloc_enabled_static())
> __kernel_map_pages(page, numpages, enable);
> }
> -
> -#ifdef CONFIG_HIBERNATION
> -extern bool kernel_page_present(struct page *page);
> -#endif /* CONFIG_HIBERNATION */
> #else /* CONFIG_DEBUG_PAGEALLOC */
> static inline void debug_pagealloc_map_pages(struct page *page,
> int numpages, int enable) {}
> -#ifdef CONFIG_HIBERNATION
> -static inline bool kernel_page_present(struct page *page) { return true; }
> -#endif /* CONFIG_HIBERNATION */
> #endif /* CONFIG_DEBUG_PAGEALLOC */
>
> #ifdef __HAVE_ARCH_GATE_AREA
> diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
> index 860e0f843c12..fe1aa4e54680 100644
> --- a/include/linux/set_memory.h
> +++ b/include/linux/set_memory.h
> @@ -23,6 +23,11 @@ static inline int set_direct_map_default_noflush(struct page *page)
> {
> return 0;
> }
> +
> +static inline bool kernel_page_present(struct page *page)
> +{
> + return true;
> +}
> #endif
>
> #ifndef set_mce_nospec
>
It's somewhat weird to move this to set_memory.h - it's only one
possible user. I think include/linux/mm.h is a better fit. Ack to making
it independent of CONFIG_HIBERNATION.
in include/linux/mm.h , I'd prefer:
#if defined(CONFIG_DEBUG_PAGEALLOC) || \
defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
bool kernel_page_present(struct page *page);
#else
static inline bool kernel_page_present(struct page *page)
{
return true;
}
#endif
--
Thanks,
David / dhildenb
More information about the linux-riscv
mailing list