slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Linux-MTD Mailing List
linux-mtd at lists.infradead.org
Fri Nov 22 17:59:02 EST 2013
Gitweb: http://git.infradead.org/?p=mtd-2.6.git;a=commit;h=d56791b38e34e480d869d1b88735df16c81aa684
Commit: d56791b38e34e480d869d1b88735df16c81aa684
Parent: 6e4664525b1db28f8c4e1130957f70a94c19213e
Author: Roman Bobniev <Roman.Bobniev at sonymobile.com>
AuthorDate: Tue Oct 8 15:58:57 2013 -0700
Committer: Pekka Enberg <penberg at iki.fi>
CommitDate: Thu Oct 24 20:25:10 2013 +0300
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so
that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG)
call the appropriate kmemleak routines. This allows for kmemleak
to be configured independently of slub debug features.
It also fixes a bug where kmemleak was only partially enabled in some
configurations.
Acked-by: Catalin Marinas <catalin.marinas at arm.com>
Acked-by: Christoph Lameter <cl at linux.com>
Signed-off-by: Roman Bobniev <Roman.Bobniev at sonymobile.com>
Signed-off-by: Tim Bird <tim.bird at sonymobile.com>
Signed-off-by: Pekka Enberg <penberg at iki.fi>
---
mm/slub.c | 35 +++++++++++++++++++++++++++++++----
1 file changed, 31 insertions(+), 4 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index e3ba1f2..250062c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
- kmemleak_alloc(ptr, size, 1, flags);
+ kmalloc_large_node_hook(ptr, size, flags);
return ptr;
}
@@ -3365,7 +3392,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kmemleak_free(x);
+ kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
More information about the linux-mtd-cvs
mailing list