[PATCH v4 04/48] mm: shrinker: add infrastructure for dynamically allocating shrinker
Qi Zheng
zhengqi.arch at bytedance.com
Mon Aug 7 04:08:52 PDT 2023
Currently, the shrinker instances can be divided into the following three
types:
a) global shrinker instance statically defined in the kernel, such as
workingset_shadow_shrinker.
b) global shrinker instance statically defined in the kernel modules, such
as mmu_shrinker in x86.
c) shrinker instance embedded in other structures.
For case a, the memory of shrinker instance is never freed. For case b,
the memory of shrinker instance will be freed after synchronize_rcu() when
the module is unloaded. For case c, the memory of shrinker instance will
be freed along with the structure it is embedded in.
In preparation for implementing lockless slab shrink, we need to
dynamically allocate those shrinker instances in case c, then the memory
can be dynamically freed alone by calling kfree_rcu().
So this commit adds the following new APIs for dynamically allocating
shrinker, and add a private_data field to struct shrinker to record and
get the original embedded structure.
1. shrinker_alloc()
Used to allocate shrinker instance itself and related memory, it will
return a pointer to the shrinker instance on success and NULL on failure.
2. shrinker_register()
Used to register the shrinker instance, which is same as the current
register_shrinker_prepared().
3. shrinker_free()
Used to unregister (if needed) and free the shrinker instance.
In order to simplify shrinker-related APIs and make shrinker more
independent of other kernel mechanisms, subsequent submissions will use
the above API to convert all shrinkers (including case a and b) to
dynamically allocated, and then remove all existing APIs.
This will also have another advantage mentioned by Dave Chinner:
```
The other advantage of this is that it will break all the existing
out of tree code and third party modules using the old API and will
no longer work with a kernel using lockless slab shrinkers. They
need to break (both at the source and binary levels) to stop bad
things from happening due to using unconverted shrinkers in the new
setup.
```
Signed-off-by: Qi Zheng <zhengqi.arch at bytedance.com>
---
include/linux/shrinker.h | 7 +++
mm/internal.h | 11 +++++
mm/shrinker.c | 101 +++++++++++++++++++++++++++++++++++++++
mm/shrinker_debug.c | 17 ++++++-
4 files changed, 134 insertions(+), 2 deletions(-)
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 8dc15aa37410..cc23ff0aee20 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -70,6 +70,8 @@ struct shrinker {
int seeks; /* seeks to recreate an obj */
unsigned flags;
+ void *private_data;
+
/* These are for internal use */
struct list_head list;
#ifdef CONFIG_MEMCG
@@ -95,6 +97,11 @@ struct shrinker {
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
#define SHRINKER_NONSLAB (1 << 3)
+#define SHRINKER_ALLOCATED (1 << 4)
+
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
const char *fmt, ...);
diff --git a/mm/internal.h b/mm/internal.h
index b98c29f0a471..7b882b903b82 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1152,6 +1152,9 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
#ifdef CONFIG_SHRINKER_DEBUG
extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
+ const char *fmt, va_list ap);
+extern void shrinker_debugfs_name_free(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id);
extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
@@ -1161,6 +1164,14 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
+static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
+ const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
+{
+}
static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id)
{
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 043c87ccfab4..43a375f954f3 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -550,6 +550,107 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
return freed;
}
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...)
+{
+ struct shrinker *shrinker;
+ unsigned int size;
+ va_list ap;
+ int err;
+
+ shrinker = kzalloc(sizeof(struct shrinker), GFP_KERNEL);
+ if (!shrinker)
+ return NULL;
+
+ va_start(ap, fmt);
+ err = shrinker_debugfs_name_alloc(shrinker, fmt, ap);
+ va_end(ap);
+ if (err)
+ goto err_name;
+
+ shrinker->flags = flags | SHRINKER_ALLOCATED;
+
+ if (flags & SHRINKER_MEMCG_AWARE) {
+ err = prealloc_memcg_shrinker(shrinker);
+ if (err == -ENOSYS)
+ shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
+ else if (err == 0)
+ goto done;
+ else
+ goto err_flags;
+ }
+
+ /*
+ * The nr_deferred is available on per memcg level for memcg aware
+ * shrinkers, so only allocate nr_deferred in the following cases:
+ * - non memcg aware shrinkers
+ * - !CONFIG_MEMCG
+ * - memcg is disabled by kernel command line
+ */
+ size = sizeof(*shrinker->nr_deferred);
+ if (flags & SHRINKER_NUMA_AWARE)
+ size *= nr_node_ids;
+
+ shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
+ if (!shrinker->nr_deferred)
+ goto err_flags;
+
+done:
+ return shrinker;
+
+err_flags:
+ shrinker_debugfs_name_free(shrinker);
+err_name:
+ kfree(shrinker);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(shrinker_alloc);
+
+void shrinker_register(struct shrinker *shrinker)
+{
+ if (unlikely(!(shrinker->flags & SHRINKER_ALLOCATED))) {
+ pr_warn("Must use shrinker_alloc() to dynamically allocate the shrinker");
+ return;
+ }
+
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ shrinker->flags |= SHRINKER_REGISTERED;
+ shrinker_debugfs_add(shrinker);
+ up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL_GPL(shrinker_register);
+
+void shrinker_free(struct shrinker *shrinker)
+{
+ struct dentry *debugfs_entry = NULL;
+ int debugfs_id;
+
+ if (!shrinker)
+ return;
+
+ down_write(&shrinker_rwsem);
+ if (shrinker->flags & SHRINKER_REGISTERED) {
+ list_del(&shrinker->list);
+ debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
+ shrinker->flags &= ~SHRINKER_REGISTERED;
+ } else {
+ shrinker_debugfs_name_free(shrinker);
+ }
+
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+ unregister_memcg_shrinker(shrinker);
+ up_write(&shrinker_rwsem);
+
+ if (debugfs_entry)
+ shrinker_debugfs_remove(debugfs_entry, debugfs_id);
+
+ kfree(shrinker->nr_deferred);
+ shrinker->nr_deferred = NULL;
+
+ kfree(shrinker);
+}
+EXPORT_SYMBOL_GPL(shrinker_free);
+
/*
* Add a shrinker callback to be called from the vm.
*/
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 61702bdc1af4..aa2027075ed9 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -191,6 +191,20 @@ int shrinker_debugfs_add(struct shrinker *shrinker)
return 0;
}
+int shrinker_debugfs_name_alloc(struct shrinker *shrinker, const char *fmt,
+ va_list ap)
+{
+ shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+
+ return shrinker->name ? 0 : -ENOMEM;
+}
+
+void shrinker_debugfs_name_free(struct shrinker *shrinker)
+{
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+}
+
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
struct dentry *entry;
@@ -239,8 +253,7 @@ struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
lockdep_assert_held(&shrinker_rwsem);
- kfree_const(shrinker->name);
- shrinker->name = NULL;
+ shrinker_debugfs_name_free(shrinker);
*debugfs_id = entry ? shrinker->debugfs_id : -1;
shrinker->debugfs_entry = NULL;
--
2.30.2
More information about the linux-mtd
mailing list