>From d02e3244c436234d0d07500be6d4df64feb2052a Mon Sep 17 00:00:00 2001 From: William Cohen Date: Mon, 10 Nov 2014 14:26:44 -0500 Subject: [PATCH] Correct the race condition in aarch64_insn_patch_text_sync() When experimenting with patches to provide kprobes support for aarch64 smp machines would hang when inserting breakpoints into kernel code. The hangs were caused by a race condition in the code called by aarch64_insn_patch_text_sync(). The first processor in the aarch64_insn_patch_text_cb() function would patch the code while other processors were still entering the function and incrementing the cpu_count field. This resulted in some processors never observing the exit condition and exiting the function. Thus, processors in the system hung. The patching function now waits for all processors to enter the patching function before changing code to ensure that none of the processors are in code that is going to be patched. Once all the processors have entered the function, the last processor to enter the patching function performs the patching and signals that the patching is complete with one last increment of the cpu_count field to make it num_cpus_online()+1. Signed-off-by: William Cohen --- arch/arm64/kernel/insn.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index e007714..4fdddf1 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -151,10 +151,13 @@ struct aarch64_insn_patch { static int __kprobes aarch64_insn_patch_text_cb(void *arg) { int i, ret = 0; + int count = num_online_cpus(); struct aarch64_insn_patch *pp = arg; - /* The first CPU becomes master */ - if (atomic_inc_return(&pp->cpu_count) == 1) { + /* Make sure all the processors are in this function + before patching the code. The last CPU to this function + does the update. */ + if (atomic_inc_return(&pp->cpu_count) == count) { for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); @@ -163,9 +166,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) * which ends with "dsb; isb" pair guaranteeing global * visibility. */ - atomic_set(&pp->cpu_count, -1); + /* Notifiy other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); } else { - while (atomic_read(&pp->cpu_count) != -1) + while (atomic_read(&pp->cpu_count) <= count) cpu_relax(); isb(); } -- 1.8.3.1