[PATCH RFC v3 1/9] mm/slub: fix bulk alloc and free stats
Vlastimil Babka
vbabka at suse.cz
Wed Nov 29 01:53:26 PST 2023
The SLUB sysfs stats enabled CONFIG_SLUB_STATS have two deficiencies
identified wrt bulk alloc/free operations:
- Bulk allocations from cpu freelist are not counted. Add the
ALLOC_FASTPATH counter there.
- Bulk fastpath freeing will count a list of multiple objects with a
single FREE_FASTPATH inc. Add a stat_add() variant to count them all.
Signed-off-by: Vlastimil Babka <vbabka at suse.cz>
---
mm/slub.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/mm/slub.c b/mm/slub.c
index 63d281dfacdb..f0cd55bb4e11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -341,6 +341,14 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
#endif
}
+static inline void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
+{
+#ifdef CONFIG_SLUB_STATS
+ raw_cpu_add(s->cpu_slab->stat[si], v);
+#endif
+}
+
+
/*
* Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
* Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
@@ -3784,7 +3792,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
local_unlock(&s->cpu_slab->lock);
}
- stat(s, FREE_FASTPATH);
+ stat_add(s, FREE_FASTPATH, cnt);
}
#else /* CONFIG_SLUB_TINY */
static void do_slab_free(struct kmem_cache *s,
@@ -3986,6 +3994,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
c->freelist = get_freepointer(s, object);
p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
+ stat(s, ALLOC_FASTPATH);
}
c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
--
2.43.0
More information about the maple-tree
mailing list