[PATCH v2] NUMA: Early use of cpu_to_node() returns 0 instead of the correct node id
Huang Shijie
shijie at os.amperecomputing.com
Mon Jan 22 20:58:43 PST 2024
During the kernel booting, the generic cpu_to_node() is called too early in
arm64, powerpc and riscv when CONFIG_NUMA is enabled.
For arm64/powerpc/riscv, there are at least four places in the common code
where the generic cpu_to_node() is called before it is initialized:
1.) early_trace_init() in kernel/trace/trace.c
2.) sched_init() in kernel/sched/core.c
3.) init_sched_fair_class() in kernel/sched/fair.c
4.) workqueue_init_early() in kernel/workqueue.c
In order to fix the bug, the patch changes generic cpu_to_node to
function pointer, and export it for kernel modules.
Introduce smp_prepare_boot_cpu_start() to wrap the original
smp_prepare_boot_cpu(), and set cpu_to_node with early_cpu_to_node.
Introduce smp_prepare_cpus_done() to wrap the original smp_prepare_cpus(),
and set the cpu_to_node to formal _cpu_to_node().
Signed-off-by: Huang Shijie <shijie at os.amperecomputing.com>
---
v1 --> v2:
In order to fix the x86 compiling error, move the cpu_to_node()
from driver/base/arch_numa.c to driver/base/node.c.
v1: http://lists.infradead.org/pipermail/linux-arm-kernel/2024-January/896160.html
An old different title patch:
http://lists.infradead.org/pipermail/linux-arm-kernel/2024-January/895963.html
---
drivers/base/node.c | 11 +++++++++++
include/linux/topology.h | 6 ++----
init/main.c | 29 +++++++++++++++++++++++++++--
3 files changed, 40 insertions(+), 6 deletions(-)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1c05640461dd..477d58c12886 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -976,3 +976,14 @@ void __init node_dev_init(void)
panic("%s() failed to add node: %d\n", __func__, ret);
}
}
+
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+#ifndef cpu_to_node
+int _cpu_to_node(int cpu)
+{
+ return per_cpu(numa_node, cpu);
+}
+int (*cpu_to_node)(int cpu);
+EXPORT_SYMBOL(cpu_to_node);
+#endif
+#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 52f5850730b3..e7ce2bae11dd 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -91,10 +91,8 @@ static inline int numa_node_id(void)
#endif
#ifndef cpu_to_node
-static inline int cpu_to_node(int cpu)
-{
- return per_cpu(numa_node, cpu);
-}
+extern int (*cpu_to_node)(int cpu);
+extern int _cpu_to_node(int cpu);
#endif
#ifndef set_numa_node
diff --git a/init/main.c b/init/main.c
index e24b0780fdff..b142e9c51161 100644
--- a/init/main.c
+++ b/init/main.c
@@ -870,6 +870,18 @@ static void __init print_unknown_bootoptions(void)
memblock_free(unknown_options, len);
}
+static void __init smp_prepare_boot_cpu_start(void)
+{
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+#ifndef cpu_to_node
+ /* The early_cpu_to_node should be ready now. */
+ cpu_to_node = early_cpu_to_node;
+#endif
+#endif
+}
+
asmlinkage __visible __init __no_sanitize_address __noreturn __no_stack_protector
void start_kernel(void)
{
@@ -899,7 +911,7 @@ void start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
- smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+ smp_prepare_boot_cpu_start();
boot_cpu_hotplug_init();
pr_notice("Kernel command line: %s\n", saved_command_line);
@@ -1519,6 +1531,19 @@ void __init console_on_rootfs(void)
fput(file);
}
+static void __init smp_prepare_cpus_done(unsigned int setup_max_cpus)
+{
+ /* Different ARCHs may override smp_prepare_cpus() */
+ smp_prepare_cpus(setup_max_cpus);
+
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+#ifndef cpu_to_node
+ /* Change to the formal function. */
+ cpu_to_node = _cpu_to_node;
+#endif
+#endif
+}
+
static noinline void __init kernel_init_freeable(void)
{
/* Now the scheduler is fully set up and can do blocking allocations */
@@ -1531,7 +1556,7 @@ static noinline void __init kernel_init_freeable(void)
cad_pid = get_pid(task_pid(current));
- smp_prepare_cpus(setup_max_cpus);
+ smp_prepare_cpus_done(setup_max_cpus);
workqueue_init();
--
2.40.1
More information about the linux-riscv
mailing list