[PATCH RFC 6/6] mm, slub: sheaf prefilling for guaranteed allocations
Hyeonggon Yoo
42.hyeyoo at gmail.com
Mon Nov 18 05:13:49 PST 2024
On Wed, Nov 13, 2024 at 1:39 AM Vlastimil Babka <vbabka at suse.cz> wrote:
>
> Add three functions for efficient guaranteed allocations in a critical
> section (that cannot sleep) when the exact number of allocations is not
> known beforehand, but an upper limit can be calculated.
>
> kmem_cache_prefill_sheaf() returns a sheaf containing at least given
> number of objects.
>
> kmem_cache_alloc_from_sheaf() will allocate an object from the sheaf
> and is guaranteed not to fail until depleted.
>
> kmem_cache_return_sheaf() is for giving the sheaf back to the slab
> allocator after the critical section. This will also attempt to refill
> it to cache's sheaf capacity for better efficiency of sheaves handling,
> but it's not stricly necessary to succeed.
>
> TODO: the current implementation is limited to cache's sheaf_capacity
>
> Signed-off-by: Vlastimil Babka <vbabka at suse.cz>
> ---
> include/linux/slab.h | 11 ++++
> mm/slub.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 160 insertions(+)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 23904321992ad2eeb9389d0883cf4d5d5d71d896..a87dc3c6392fe235de2eabe1792df86d40c3bbf9 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -820,6 +820,17 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
> int node) __assume_slab_alignment __malloc;
> #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
>
> +struct slab_sheaf *
> +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count);
> +
> +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
> + struct slab_sheaf *sheaf);
> +
> +void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
> + struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
> +#define kmem_cache_alloc_from_sheaf(...) \
> + alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
> +
> /*
> * These macros allow declaring a kmem_buckets * parameter alongside size, which
> * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
> diff --git a/mm/slub.c b/mm/slub.c
> index 1900afa6153ca6d88f9df7db3ce84d98629489e7..a0e2cb7dfb5173f39f36bea1eb9760c3c1b99dd7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -444,6 +444,7 @@ struct slab_sheaf {
> union {
> struct rcu_head rcu_head;
> struct list_head barn_list;
> + bool oversize;
> };
> struct kmem_cache *cache;
> unsigned int size;
> @@ -2819,6 +2820,30 @@ static int barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf,
> return ret;
> }
>
> +static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
> +{
> + struct slab_sheaf *sheaf = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&barn->lock, flags);
> +
> + if (barn->nr_empty) {
> + sheaf = list_first_entry(&barn->sheaves_empty,
> + struct slab_sheaf, barn_list);
> + list_del(&sheaf->barn_list);
> + barn->nr_empty--;
> + } else if (barn->nr_full) {
> + sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
> + barn_list);
> + list_del(&sheaf->barn_list);
> + barn->nr_full--;
> + }
> +
> + spin_unlock_irqrestore(&barn->lock, flags);
> +
> + return sheaf;
> +}
> +
> /*
> * If a full sheaf is available, return it and put the supplied empty one to
> * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't
> @@ -4893,6 +4918,130 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int nod
> }
> EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
>
> +
> +/*
> + * returns a sheaf that has least the given count of objects
> + * when prefilling is needed, do so with given gfp flags
> + *
> + * return NULL if prefilling failed, or when the requested count is
> + * above cache's sheaf_capacity (TODO: lift this limitation)
> + */
> +struct slab_sheaf *
> +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count)
> +{
> + struct slub_percpu_sheaves *pcs;
> + struct slab_sheaf *sheaf = NULL;
> +
> + //TODO: handle via oversize sheaf
> + if (count > s->sheaf_capacity)
> + return NULL;
> +
> + pcs = cpu_sheaves_lock(s->cpu_sheaves);
> +
> + if (pcs->spare && pcs->spare->size > 0) {
> + sheaf = pcs->spare;
> + pcs->spare = NULL;
> + }
> +
> + if (!sheaf)
> + sheaf = barn_get_full_or_empty_sheaf(pcs->barn);
> +
> + cpu_sheaves_unlock(s->cpu_sheaves);
> +
> + if (!sheaf)
> + sheaf = alloc_empty_sheaf(s, gfp);
> +
> + if (sheaf && sheaf->size < count) {
> + if (refill_sheaf(s, sheaf, gfp)) {
> + sheaf_flush(s, sheaf);
> + free_empty_sheaf(s, sheaf);
> + sheaf = NULL;
> + }
> + }
> +
> + return sheaf;
> +}
> +
> +/*
> + * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
> + * It tries to refill the sheaf back to the cache's sheaf_capacity
> + * to avoid handling partially full sheaves.
> + *
> + * If the refill fails because gfp is e.g. GFP_NOWAIT, the sheaf is
> + * instead dissolved
> + */
> +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
> + struct slab_sheaf *sheaf)
> +{
> + struct slub_percpu_sheaves *pcs;
> + bool refill = false;
> + struct node_barn *barn;
> +
> + //TODO: handle oversize sheaf
> +
> + pcs = cpu_sheaves_lock(s->cpu_sheaves);
> +
> + if (!pcs->spare) {
> + pcs->spare = sheaf;
> + sheaf = NULL;
> + }
> +
> + /* racy check */
> + if (!sheaf && pcs->barn->nr_full >= MAX_FULL_SHEAVES) {
> + barn = pcs->barn;
> + refill = true;
> + }
> +
> + cpu_sheaves_unlock(s->cpu_sheaves);
> +
> + if (!sheaf)
> + return;
> +
> + /*
> + * if the barn is full of full sheaves or we fail to refill the sheaf,
> + * simply flush and free it
> + */
> + if (!refill || refill_sheaf(s, sheaf, gfp)) {
> + sheaf_flush(s, sheaf);
> + free_empty_sheaf(s, sheaf);
> + return;
> + }
> +
> + /* we racily determined the sheaf would fit, so now force it */
> + barn_put_full_sheaf(barn, sheaf, true);
> +}
> +
> +/*
> + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf()
> + *
> + * Guaranteed not to fail as many allocations as was the requested count.
> + * After the sheaf is emptied, it fails - no fallback to the slab cache itself.
> + *
> + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
> + * memcg charging is forced over limit if necessary, to avoid failure.
> + */
> +void *
> +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
> + struct slab_sheaf *sheaf)
> +{
> + void *ret = NULL;
> + bool init;
> +
> + if (sheaf->size == 0)
> + goto out;
> +
> + ret = sheaf->objects[--sheaf->size];
> +
> + init = slab_want_init_on_alloc(gfp, s);
> +
> + /* add __GFP_NOFAIL to force successful memcg charging */
> + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size);
Maybe I'm missing something, but how can this be used for non-sleepable contexts
if __GFP_NOFAIL is used? I think we have to charge them when the sheaf
is returned
via kmem_cache_prefill_sheaf(), just like users of bulk alloc/free?
Best,
Hyeonggon
> +out:
> + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE);
> +
> + return ret;
> +}
> +
> /*
> * To avoid unnecessary overhead, we pass through large allocation requests
> * directly to the page allocator. We use __GFP_COMP, because we will need to
>
> --
> 2.47.0
>
More information about the maple-tree
mailing list