[RFC PATCH v5 3/5] riscv/atomic.h : Deduplicate arch_atomic.*
Leonardo Bras
leobras at redhat.com
Wed Aug 9 21:03:45 PDT 2023
Some functions use mostly the same asm for 32-bit and 64-bit versions.
Make a macro that is generic enough and avoid code duplication.
(This did not cause any change in generated asm)
Signed-off-by: Leonardo Bras <leobras at redhat.com>
Reviewed-by: Guo Ren <guoren at kernel.org>
Reviewed-by: Andrea Parri <parri.andrea at gmail.com>
---
arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
1 file changed, 76 insertions(+), 88 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c2153..80cca7ac16fd 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
+#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " beq %[p], %[u], 1f\n" \
+ " add %[rc], %[p], %[a]\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : [a]"r" (_a), [u]"r" (_u) \
+ : "memory"); \
+})
+
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
+
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
+
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
+#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bltz %[p], 1f\n" \
+ " addi %[rc], %[p], 1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
+
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bgtz %[p], 1f\n" \
+ " addi %[rc], %[p], -1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
+
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " addi %[rc], %[p], -1\n" \
+ " bltz %[rc], 1f\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
+
return prev - 1;
}
@@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
+
return !(prev < 0);
}
@@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
+
return !(prev > 0);
}
@@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
+
return prev - 1;
}
--
2.41.0
More information about the linux-riscv
mailing list