[PATCH 1/2] mm: Abstract THP allocation
Dev Jain
dev.jain at arm.com
Fri Aug 30 01:41:16 PDT 2024
In preparation for the second patch, abstract away the THP allocation
logic present in the create_huge_pmd() path, which corresponds to the
faulting case when no page is present.
There should be no functional change as a result of applying
this patch.
Signed-off-by: Dev Jain <dev.jain at arm.com>
---
mm/huge_memory.c | 113 +++++++++++++++++++++++++++++------------------
1 file changed, 69 insertions(+), 44 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 67c86a5d64a6..e5b568e2bb34 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -943,47 +943,92 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
}
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
-static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
- struct page *page, gfp_t gfp)
+static vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long haddr, struct folio **foliop,
+ unsigned long addr)
{
- struct vm_area_struct *vma = vmf->vma;
- struct folio *folio = page_folio(page);
- pgtable_t pgtable;
- unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- vm_fault_t ret = 0;
+ struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, true);
- VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ *foliop = folio;
+ if (unlikely(!folio)) {
+ count_vm_event(THP_FAULT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+ }
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
folio_put(folio);
count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE);
- count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
- count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
return VM_FAULT_FALLBACK;
}
folio_throttle_swaprate(folio, gfp);
- pgtable = pte_alloc_one(vma->vm_mm);
- if (unlikely(!pgtable)) {
- ret = VM_FAULT_OOM;
- goto release;
- }
-
- folio_zero_user(folio, vmf->address);
+ folio_zero_user(folio, addr);
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
* folio_zero_user writes become visible before the set_pmd_at()
* write.
*/
__folio_mark_uptodate(folio);
+ return 0;
+}
+
+static void __thp_fault_success_stats(struct vm_area_struct *vma, int order)
+{
+ count_vm_event(THP_FAULT_ALLOC);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC);
+ count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
+}
+
+static void map_pmd_thp(struct folio *folio, struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long haddr,
+ pgtable_t pgtable)
+ __releases(vmf->ptl)
+{
+ pmd_t entry;
+
+ entry = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+ set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm_inc_nr_ptes(vma->vm_mm);
+ spin_unlock(vmf->ptl);
+ __thp_fault_success_stats(vma, HPAGE_PMD_ORDER);
+}
+
+static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio = NULL;
+ pgtable_t pgtable;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ vm_fault_t ret = 0;
+ gfp_t gfp = vma_thp_gfp_mask(vma);
+
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (unlikely(!pgtable)) {
+ ret = VM_FAULT_OOM;
+ goto release;
+ }
+
+ ret = thp_fault_alloc(gfp, HPAGE_PMD_ORDER, vma, haddr, &folio,
+ vmf->address);
+ if (ret)
+ goto release;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+
if (unlikely(!pmd_none(*vmf->pmd))) {
goto unlock_release;
} else {
- pmd_t entry;
-
ret = check_stable_address_space(vma->vm_mm);
if (ret)
goto unlock_release;
@@ -997,20 +1042,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
return ret;
}
-
- entry = mk_huge_pmd(page, vma->vm_page_prot);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
- folio_add_lru_vma(folio, vma);
- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
- set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
- update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
- add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
- mm_inc_nr_ptes(vma->vm_mm);
- spin_unlock(vmf->ptl);
- count_vm_event(THP_FAULT_ALLOC);
- count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
- count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
+ map_pmd_thp(folio, vmf, vma, haddr, pgtable);
}
return 0;
@@ -1019,7 +1051,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
release:
if (pgtable)
pte_free(vma->vm_mm, pgtable);
- folio_put(folio);
+ if (folio)
+ folio_put(folio);
return ret;
}
@@ -1077,8 +1110,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- gfp_t gfp;
- struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
vm_fault_t ret;
@@ -1129,14 +1160,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
}
return ret;
}
- gfp = vma_thp_gfp_mask(vma);
- folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
- if (unlikely(!folio)) {
- count_vm_event(THP_FAULT_FALLBACK);
- count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
- return VM_FAULT_FALLBACK;
- }
- return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
+
+ return __do_huge_pmd_anonymous_page(vmf);
}
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
--
2.30.2
More information about the linux-arm-kernel
mailing list