[PATCH v1 7/9] mm: Introduce unmap_desc struct to reduce function arguments

Liam R. Howlett Liam.Howlett at oracle.com
Tue Sep 9 12:09:43 PDT 2025


The unmap_region code uses a number of arguments that could use better
documentation.  With the addition of a descriptor for unmap (called
unmap_desc), the arguments can be more self-documenting and increase the
descriptions within the declaration.

No functional change intended

Signed-off-by: Liam R. Howlett <Liam.Howlett at oracle.com>
---
 mm/mmap.c | 12 ++++++++----
 mm/vma.c  | 27 ++++++++++++---------------
 mm/vma.h  | 35 ++++++++++++++++++++++++++++++++---
 3 files changed, 52 insertions(+), 22 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index aa4770b8d7f1e..5c9bd3f20e53f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1883,11 +1883,15 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 		if (max) {
 			vma_iter_set(&vmi, 0);
 			tmp = vma_next(&vmi);
+			UNMAP_REGION(unmap, &vmi, /* first vma = */ tmp,
+				     /* min vma addr = */ 0,
+				     /* max vma addr = */ max,
+				     /* prev = */ NULL, /* next = */ NULL);
+
+			/* Don't free the pgtables higher than the failure */
+			unmap.tree_max = max;
 			flush_cache_mm(mm);
-			unmap_region(&vmi.mas, /* vma = */ tmp,
-				     /*vma_min = */ 0, /* vma_max = */ max,
-				     /* pg_max = */ max, /* prev = */ NULL,
-				     /* next = */ NULL);
+			unmap_region(&unmap);
 			charge = tear_down_vmas(mm, &vmi, tmp, max);
 			vm_unacct_memory(charge);
 		}
diff --git a/mm/vma.c b/mm/vma.c
index 4c850ffd83a4b..c92384975cbb2 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -473,22 +473,20 @@ void remove_vma(struct vm_area_struct *vma)
  *
  * Called with the mm semaphore held.
  */
-void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
-		unsigned long vma_min, unsigned long vma_max, unsigned long pg_max,
-		struct vm_area_struct *prev, struct vm_area_struct *next)
+void unmap_region(struct unmap_desc *desc)
 {
-	struct mm_struct *mm = vma->vm_mm;
+	struct mm_struct *mm = desc->first->vm_mm;
+	struct ma_state *mas = desc->mas;
 	struct mmu_gather tlb;
 
 	tlb_gather_mmu(&tlb, mm);
 	update_hiwater_rss(mm);
-	unmap_vmas(&tlb, mas, vma, vma_min, vma_max, vma_max,
-		   /* mm_wr_locked = */ true);
-	mas_set(mas, vma->vm_end);
-	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
-		      next ? next->vm_start : USER_PGTABLES_CEILING,
-		      pg_max,
-		      /* mm_wr_locked = */ true);
+	unmap_vmas(&tlb, mas, desc->first, desc->vma_min, desc->vma_max,
+		   desc->vma_max, desc->mm_wr_locked);
+	mas_set(mas, desc->tree_reset);
+	free_pgtables(&tlb, mas, desc->first, desc->first_pgaddr,
+		      desc->last_pgaddr, desc->tree_max,
+		      desc->mm_wr_locked);
 	tlb_finish_mmu(&tlb);
 }
 
@@ -2414,15 +2412,14 @@ static int __mmap_new_file_vma(struct mmap_state *map,
 
 	error = mmap_file(vma->vm_file, vma);
 	if (error) {
+		UNMAP_REGION(unmap, vmi, vma, vma->vm_start, vma->vm_end,
+			     map->prev, map->next);
 		fput(vma->vm_file);
 		vma->vm_file = NULL;
 
 		vma_iter_set(vmi, vma->vm_end);
 		/* Undo any partial mapping done by a device driver. */
-		unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
-			     map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
-			     map->prev, map->next);
-
+		unmap_region(&unmap);
 		return error;
 	}
 
diff --git a/mm/vma.h b/mm/vma.h
index b0ebc81d5862e..4edd5d26ffcfc 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -152,6 +152,37 @@ struct vma_merge_struct {
 
 };
 
+struct unmap_desc {
+	struct  ma_state *mas;        /* the maple state point to the first vma */
+	struct vm_area_struct *first; /* The first vma */
+	unsigned long first_pgaddr;   /* The first pagetable address to free */
+	unsigned long last_pgaddr;    /* The last pagetable address to free */
+	unsigned long vma_min;        /* The min vma address */
+	unsigned long vma_max;        /* The max vma address */
+	unsigned long tree_max;       /* Maximum for the vma tree search */
+	unsigned long tree_reset;     /* Where to reset the vma tree walk */
+	bool mm_wr_locked;            /* If the mmap write lock is held */
+};
+
+#define UNMAP_REGION(name, _vmi, _vma, _vma_min, _vma_max, _prev, _next)      \
+	struct unmap_desc name = {                                          \
+		.mas = &(_vmi)->mas,                                          \
+		.first = _vma,                                                \
+		.first_pgaddr = _prev ?                                       \
+			((struct vm_area_struct *)_prev)->vm_end :            \
+			FIRST_USER_ADDRESS,                                   \
+		.last_pgaddr = _next ?                                        \
+			((struct vm_area_struct *)_next)->vm_start :          \
+			USER_PGTABLES_CEILING,                                \
+		.vma_min = _vma_min,                                          \
+		.vma_max = _vma_max,                                          \
+		.tree_max = _next ?                                           \
+			((struct vm_area_struct *)_next)->vm_start :          \
+			USER_PGTABLES_CEILING,                                \
+		.tree_reset = _vma->vm_end,                                   \
+		.mm_wr_locked = true,                                         \
+	}
+
 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
 {
 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
@@ -260,9 +291,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
 
 void remove_vma(struct vm_area_struct *vma);
 
-void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
-		unsigned long min, unsigned long max, unsigned long pg_max,
-		struct vm_area_struct *prev, struct vm_area_struct *next);
+void unmap_region(struct unmap_desc *desc);
 
 /* We are about to modify the VMA's flags. */
 __must_check struct vm_area_struct
-- 
2.47.2




More information about the maple-tree mailing list