[RFC PATCH 05/18] mm/thp: use ptdesc in do_huge_pmd_anonymous_page

alexs at kernel.org alexs at kernel.org
Mon Jul 29 23:46:59 PDT 2024


From: Alex Shi <alexs at kernel.org>

ince we have ptdesc struct now, better to use replace pgtable_t, aka
'struct page *'. It's alaos a preparation for return ptdesc pointer
in pte_alloc_one series function.

Signed-off-by: Alex Shi <alexs at kernel.org>
Cc: linux-kernel at vger.kernel.org
Cc: linux-mm at kvack.org
Cc: Andrew Morton <akpm at linux-foundation.org>
---
 mm/huge_memory.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0ee104093121..d86108d81a99 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1087,16 +1087,16 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
 			!mm_forbids_zeropage(vma->vm_mm) &&
 			transparent_hugepage_use_zero_page()) {
-		pgtable_t pgtable;
+		struct ptdesc *ptdesc;
 		struct folio *zero_folio;
 		vm_fault_t ret;
 
-		pgtable = pte_alloc_one(vma->vm_mm);
-		if (unlikely(!pgtable))
+		ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+		if (unlikely(!ptdesc))
 			return VM_FAULT_OOM;
 		zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
 		if (unlikely(!zero_folio)) {
-			pte_free(vma->vm_mm, pgtable);
+			pte_free(vma->vm_mm, ptdesc_page(ptdesc));
 			count_vm_event(THP_FAULT_FALLBACK);
 			return VM_FAULT_FALLBACK;
 		}
@@ -1106,21 +1106,21 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 			ret = check_stable_address_space(vma->vm_mm);
 			if (ret) {
 				spin_unlock(vmf->ptl);
-				pte_free(vma->vm_mm, pgtable);
+				pte_free(vma->vm_mm, ptdesc_page(ptdesc));
 			} else if (userfaultfd_missing(vma)) {
 				spin_unlock(vmf->ptl);
-				pte_free(vma->vm_mm, pgtable);
+				pte_free(vma->vm_mm, ptdesc_page(ptdesc));
 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
 			} else {
-				set_huge_zero_folio(pgtable, vma->vm_mm, vma,
+				set_huge_zero_folio(ptdesc_page(ptdesc), vma->vm_mm, vma,
 						   haddr, vmf->pmd, zero_folio);
 				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
 				spin_unlock(vmf->ptl);
 			}
 		} else {
 			spin_unlock(vmf->ptl);
-			pte_free(vma->vm_mm, pgtable);
+			pte_free(vma->vm_mm, ptdesc_page(ptdesc));
 		}
 		return ret;
 	}
-- 
2.43.0




More information about the linux-riscv mailing list