[PATCH v2 6/7] mm, slab: call kvfree_rcu_barrier() from kmem_cache_destroy()
Keith Busch
kbusch at kernel.org
Wed Feb 26 07:51:37 PST 2025
On Tue, Feb 25, 2025 at 07:21:19PM +0100, Uladzislau Rezki wrote:
> WQ_MEM_RECLAIM-patch fixes this for me:
This is successful with the new kuint test for me as well. I can't
readily test this in production where I first learned of this issue (at
least not in the near term), but for what it's worth, this looks like a
good change to me.
Reviewed-by: Keith Busch <kbusch at kernel.org>
> <snip>
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 4030907b6b7d..1b5ed5512782 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1304,6 +1304,8 @@ module_param(rcu_min_cached_objs, int, 0444);
> static int rcu_delay_page_cache_fill_msec = 5000;
> module_param(rcu_delay_page_cache_fill_msec, int, 0444);
>
> +static struct workqueue_struct *rcu_reclaim_wq;
> +
> /* Maximum number of jiffies to wait before draining a batch. */
> #define KFREE_DRAIN_JIFFIES (5 * HZ)
> #define KFREE_N_BATCHES 2
> @@ -1632,10 +1634,10 @@ __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
> if (delayed_work_pending(&krcp->monitor_work)) {
> delay_left = krcp->monitor_work.timer.expires - jiffies;
> if (delay < delay_left)
> - mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
> + mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
> return;
> }
> - queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
> + queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
> }
>
> static void
> @@ -1733,7 +1735,7 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
> // "free channels", the batch can handle. Break
> // the loop since it is done with this CPU thus
> // queuing an RCU work is _always_ success here.
> - queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
> + queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work);
> WARN_ON_ONCE(!queued);
> break;
> }
> @@ -1883,7 +1885,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
> if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> !atomic_xchg(&krcp->work_in_progress, 1)) {
> if (atomic_read(&krcp->backoff_page_cache_fill)) {
> - queue_delayed_work(system_unbound_wq,
> + queue_delayed_work(rcu_reclaim_wq,
> &krcp->page_cache_work,
> msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
> } else {
> @@ -2120,6 +2122,10 @@ void __init kvfree_rcu_init(void)
> int i, j;
> struct shrinker *kfree_rcu_shrinker;
>
> + rcu_reclaim_wq = alloc_workqueue("rcu_reclaim",
> + WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
> + WARN_ON(!rcu_reclaim_wq);
> +
> /* Clamp it to [0:100] seconds interval. */
> if (rcu_delay_page_cache_fill_msec < 0 ||
> rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
> <snip>
More information about the Linux-nvme
mailing list