[PATCH RFC v7 12/24] mm: kpkeys: Protect regular page tables
Kevin Brodsky
kevin.brodsky at arm.com
Tue May 5 09:06:01 PDT 2026
If the kpkeys_hardened_pgtables feature is enabled, page table pages
(PTPs) should be protected by modifying the linear mapping to map
them with a privileged pkey (KPKEYS_PKEY_PGTABLES). This patch
introduces a new page allocator for that purpose:
* kpkeys_pgtable_alloc() allocates a new PTP and sets the linear
mapping to KPKEYS_PKEY_PGTABLES for that page
* kpkeys_pgtable_free() frees such a PTP and restores the linear
mapping to the default pkey
This interface is then hooked into pagetable_alloc() and
pagetable_free(), protecting all page tables created once the buddy
allocator is available. Early page tables are allocated in other
ways and will be protected in subsequent patches.
This implementation of kpkeys_pgtable_{alloc,free}() is minimal and
relies on the linear map being fully PTE-mapped - otherwise
calling set_memory_pkey() on a single page may result in splitting a
block mapping, which in turn requires allocating a new PTP. A more
elaborate implementation could be added later to handle this
situation.
Signed-off-by: Kevin Brodsky <kevin.brodsky at arm.com>
---
include/linux/kpkeys.h | 10 +++++++++
include/linux/mm.h | 14 +++++++++++--
mm/kpkeys_hardened_pgtables.c | 47 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 69 insertions(+), 2 deletions(-)
diff --git a/include/linux/kpkeys.h b/include/linux/kpkeys.h
index 1ed0299ad5ac..c9f63415162b 100644
--- a/include/linux/kpkeys.h
+++ b/include/linux/kpkeys.h
@@ -131,6 +131,9 @@ static inline bool kpkeys_hardened_pgtables_early_enabled(void)
return arch_supports_kpkeys_early();
}
+struct page *kpkeys_pgtable_alloc(gfp_t gfp, unsigned int order);
+void kpkeys_pgtable_free(struct page *page, unsigned int order);
+
/*
* Should be called from mem_init(): as soon as the buddy allocator becomes
* available and before any call to pagetable_alloc().
@@ -149,6 +152,13 @@ static inline bool kpkeys_hardened_pgtables_early_enabled(void)
return false;
}
+static inline struct page *kpkeys_pgtable_alloc(gfp_t gfp, unsigned int order)
+{
+ return NULL;
+}
+
+static inline void kpkeys_pgtable_free(struct page *page, unsigned int order) {}
+
static inline void kpkeys_hardened_pgtables_init(void) {}
#endif /* CONFIG_KPKEYS_HARDENED_PGTABLES */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index af23453e9dbd..7b95b2351763 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -37,6 +37,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/iommu-debug-pagealloc.h>
+#include <linux/kpkeys.h>
struct mempolicy;
struct anon_vma;
@@ -3648,7 +3649,12 @@ static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
*/
static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
{
- struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
+ struct page *page;
+
+ if (kpkeys_hardened_pgtables_enabled())
+ page = kpkeys_pgtable_alloc(gfp | __GFP_COMP, order);
+ else
+ page = alloc_pages_noprof(gfp | __GFP_COMP, order);
return page_ptdesc(page);
}
@@ -3657,8 +3663,12 @@ static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int orde
static inline void __pagetable_free(struct ptdesc *pt)
{
struct page *page = ptdesc_page(pt);
+ unsigned int order = compound_order(page);
- __free_pages(page, compound_order(page));
+ if (kpkeys_hardened_pgtables_enabled())
+ kpkeys_pgtable_free(page, order);
+ else
+ __free_pages(page, order);
}
#ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
diff --git a/mm/kpkeys_hardened_pgtables.c b/mm/kpkeys_hardened_pgtables.c
index 763f267bbfe4..fff7e2a64b64 100644
--- a/mm/kpkeys_hardened_pgtables.c
+++ b/mm/kpkeys_hardened_pgtables.c
@@ -1,12 +1,59 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kpkeys.h>
#include <linux/mm.h>
+#include <linux/set_memory.h>
#include <kunit/visibility.h>
__ro_after_init DEFINE_STATIC_KEY_FALSE(kpkeys_hardened_pgtables_key);
EXPORT_SYMBOL_IF_KUNIT(kpkeys_hardened_pgtables_key);
+static int set_pkey_pgtable(struct page *page, unsigned int nr_pages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ int ret;
+
+ ret = set_memory_pkey(addr, nr_pages, KPKEYS_PKEY_PGTABLES);
+
+ WARN_ON(ret);
+ return ret;
+}
+
+static int set_pkey_default(struct page *page, unsigned int nr_pages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ int ret;
+
+ ret = set_memory_pkey(addr, nr_pages, KPKEYS_PKEY_DEFAULT);
+
+ WARN_ON(ret);
+ return ret;
+}
+
+struct page *kpkeys_pgtable_alloc(gfp_t gfp, unsigned int order)
+{
+ struct page *page;
+ int ret;
+
+ page = alloc_pages_noprof(gfp, order);
+ if (!page)
+ return page;
+
+ ret = set_pkey_pgtable(page, 1 << order);
+ if (ret) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ return page;
+}
+
+void kpkeys_pgtable_free(struct page *page, unsigned int order)
+{
+ set_pkey_default(page, 1 << order);
+ __free_pages(page, order);
+}
+
void __init kpkeys_hardened_pgtables_init(void)
{
if (!kpkeys_enabled())
--
2.51.2
More information about the linux-arm-kernel
mailing list