[RFC PATCH v2 7/7] futex: Use runtime constants for __futex_hash() hot path

Samuel Holland samuel.holland at sifive.com
Mon Mar 16 20:06:40 PDT 2026


Hi Prateek,

On 2026-03-16 12:24 AM, K Prateek Nayak wrote:
> From: Peter Zijlstra <peterz at infradead.org>
> 
> Runtime constify the read-only after init data  __futex_shift(shift_32),
> __futex_mask(mask_32), and __futex_queues(ptr) used in __futex_hash()
> hot path to avoid referencing global variable.
> 
> This also allows __futex_queues to be allocated dynamically to
> "nr_node_ids" slots instead of reserving config dependent MAX_NUMNODES
> (1 << CONFIG_NODES_SHIFT) worth of slots upfront.
> 
> No functional chages intended.
> 
>   [ prateek: Dynamically allocate __futex_queues, mark the global data
>     __ro_after_init since they are constified after futex_init(). ]
> 
> Link: https://patch.msgid.link/20260227161841.GH606826@noisy.programming.kicks-ass.net
> Reported-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de> # MAX_NUMNODES bloat
> Not-yet-signed-off-by: Peter Zijlstra <peterz at infradead.org>
> Signed-off-by: K Prateek Nayak <kprateek.nayak at amd.com>
> ---
>  include/asm-generic/vmlinux.lds.h |  5 +++-
>  kernel/futex/core.c               | 42 +++++++++++++++++--------------
>  2 files changed, 27 insertions(+), 20 deletions(-)
> 
> diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
> index 1e1580febe4b..86f99fa6ae24 100644
> --- a/include/asm-generic/vmlinux.lds.h
> +++ b/include/asm-generic/vmlinux.lds.h
> @@ -975,7 +975,10 @@
>  		RUNTIME_CONST(shift, d_hash_shift)			\
>  		RUNTIME_CONST(ptr, dentry_hashtable)			\
>  		RUNTIME_CONST(ptr, __dentry_cache)			\
> -		RUNTIME_CONST(ptr, __names_cache)
> +		RUNTIME_CONST(ptr, __names_cache)			\
> +		RUNTIME_CONST(shift, __futex_shift)			\
> +		RUNTIME_CONST(mask,  __futex_mask)			\
> +		RUNTIME_CONST(ptr,   __futex_queues)
>  
>  /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
>  #define KUNIT_TABLE()							\
> diff --git a/kernel/futex/core.c b/kernel/futex/core.c
> index cf7e610eac42..6b5c5a1596a5 100644
> --- a/kernel/futex/core.c
> +++ b/kernel/futex/core.c
> @@ -45,23 +45,19 @@
>  #include <linux/mempolicy.h>
>  #include <linux/mmap_lock.h>
>  
> +#include <asm/runtime-const.h>
> +
>  #include "futex.h"
>  #include "../locking/rtmutex_common.h"
>  
> -/*
> - * The base of the bucket array and its size are always used together
> - * (after initialization only in futex_hash()), so ensure that they
> - * reside in the same cacheline.
> - */
> -static struct {
> -	unsigned long            hashmask;
> -	unsigned int		 hashshift;
> -	struct futex_hash_bucket *queues[MAX_NUMNODES];
> -} __futex_data __read_mostly __aligned(2*sizeof(long));
> +static u32 __futex_mask __ro_after_init;
> +static u32 __futex_shift __ro_after_init;
> +static struct futex_hash_bucket **__futex_queues __ro_after_init;
>  
> -#define futex_hashmask	(__futex_data.hashmask)
> -#define futex_hashshift	(__futex_data.hashshift)
> -#define futex_queues	(__futex_data.queues)
> +static __always_inline struct futex_hash_bucket **futex_queues(void)
> +{
> +	return runtime_const_ptr(__futex_queues);
> +}
>  
>  struct futex_private_hash {
>  	int		state;
> @@ -439,14 +435,14 @@ __futex_hash(union futex_key *key, struct futex_private_hash *fph)
>  		 * NOTE: this isn't perfectly uniform, but it is fast and
>  		 * handles sparse node masks.
>  		 */
> -		node = (hash >> futex_hashshift) % nr_node_ids;
> +		node = runtime_const_shift_right_32(hash, __futex_shift) % nr_node_ids;
>  		if (!node_possible(node)) {
>  			node = find_next_bit_wrap(node_possible_map.bits,
>  						  nr_node_ids, node);
>  		}
>  	}
>  
> -	return &futex_queues[node][hash & futex_hashmask];
> +	return &futex_queues()[node][runtime_const_mask_32(hash, __futex_mask)];
>  }
>  
>  /**
> @@ -1913,7 +1909,7 @@ int futex_hash_allocate_default(void)
>  	 *   16 <= threads * 4 <= global hash size
>  	 */
>  	buckets = roundup_pow_of_two(4 * threads);
> -	buckets = clamp(buckets, 16, futex_hashmask + 1);
> +	buckets = clamp(buckets, 16, __futex_mask + 1);
>  
>  	if (current_buckets >= buckets)
>  		return 0;
> @@ -1983,10 +1979,19 @@ static int __init futex_init(void)
>  	hashsize = max(4, hashsize);
>  	hashsize = roundup_pow_of_two(hashsize);
>  #endif
> -	futex_hashshift = ilog2(hashsize);
> +	__futex_mask = hashsize - 1;
> +	__futex_shift = ilog2(hashsize);

__futex_mask is always a power of two minus 1, in other words all low bits set.
Would it be worth using an n-bit zero extension operation instead of an
arbitrary 32-bit mask? This would use fewer instructions on some architectures:
for example a single ubfx on arm64 and slli+srli on riscv.

Regards,
Samuel




More information about the linux-riscv mailing list