[PATCH 1/4] locking/atomic/x86: Silence intentional wrapping addition
Kees Cook
keescook at chromium.org
Wed Apr 24 12:17:34 PDT 2024
Use add_wrap() to annotate the addition in atomic_add_return() as
expecting to wrap around.
Signed-off-by: Kees Cook <keescook at chromium.org>
---
Cc: Will Deacon <will at kernel.org>
Cc: Peter Zijlstra <peterz at infradead.org>
Cc: Boqun Feng <boqun.feng at gmail.com>
Cc: Mark Rutland <mark.rutland at arm.com>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: Ingo Molnar <mingo at redhat.com>
Cc: Borislav Petkov <bp at alien8.de>
Cc: Dave Hansen <dave.hansen at linux.intel.com>
Cc: x86 at kernel.org
Cc: "H. Peter Anvin" <hpa at zytor.com>
---
arch/x86/include/asm/atomic.h | 3 ++-
arch/x86/include/asm/atomic64_32.h | 2 +-
arch/x86/include/asm/atomic64_64.h | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 55a55ec04350..a5862a258760 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,6 +3,7 @@
#define _ASM_X86_ATOMIC_H
#include <linux/compiler.h>
+#include <linux/overflow.h>
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
@@ -82,7 +83,7 @@ static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
- return i + xadd(&v->counter, i);
+ return wrapping_add(int, i, xadd(&v->counter, i));
}
#define arch_atomic_add_return arch_atomic_add_return
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 3486d91b8595..608b100e8ffe 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -254,7 +254,7 @@ static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 old, c = 0;
- while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
+ while ((old = arch_atomic64_cmpxchg(v, c, wrapping_add(s64, c, i))) != c)
c = old;
return old;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 3165c0feedf7..f1dc8aa54b52 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -76,7 +76,7 @@ static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
- return i + xadd(&v->counter, i);
+ return wrapping_add(s64, i, xadd(&v->counter, i));
}
#define arch_atomic64_add_return arch_atomic64_add_return
--
2.34.1
More information about the linux-arm-kernel
mailing list