[PATCH v5 06/33] mm: Convert ptlock_alloc() to use ptdescs
Vishal Moola (Oracle)
vishal.moola at gmail.com
Thu Jun 22 13:57:18 PDT 2023
This removes some direct accesses to struct page, working towards
splitting out struct ptdesc from struct page.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola at gmail.com>
Acked-by: Mike Rapoport (IBM) <rppt at kernel.org>
---
include/linux/mm.h | 6 +++---
mm/memory.c | 4 ++--
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1511faf0263c..39b0a4661e44 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2798,7 +2798,7 @@ static inline void pagetable_free(struct ptdesc *pt)
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
-extern bool ptlock_alloc(struct page *page);
+bool ptlock_alloc(struct ptdesc *ptdesc);
extern void ptlock_free(struct page *page);
static inline spinlock_t *ptlock_ptr(struct page *page)
@@ -2810,7 +2810,7 @@ static inline void ptlock_cache_init(void)
{
}
-static inline bool ptlock_alloc(struct page *page)
+static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
@@ -2840,7 +2840,7 @@ static inline bool ptlock_init(struct page *page)
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
- if (!ptlock_alloc(page))
+ if (!ptlock_alloc(page_ptdesc(page)))
return false;
spin_lock_init(ptlock_ptr(page));
return true;
diff --git a/mm/memory.c b/mm/memory.c
index 80faf3e76232..2ff14f50c7b3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5920,14 +5920,14 @@ void __init ptlock_cache_init(void)
SLAB_PANIC, NULL);
}
-bool ptlock_alloc(struct page *page)
+bool ptlock_alloc(struct ptdesc *ptdesc)
{
spinlock_t *ptl;
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
if (!ptl)
return false;
- page->ptl = ptl;
+ ptdesc->ptl = ptl;
return true;
}
--
2.40.1
More information about the linux-arm-kernel
mailing list