[RFC PATCH 1/1] riscv/atomic.h: Deduplicate arch_atomic.*

Leonardo Bras Soares Passos leobras at redhat.com
Thu May 25 02:31:16 PDT 2023


Friendly ping?

On Wed, Apr 19, 2023 at 3:25 AM Leonardo Bras <leobras at redhat.com> wrote:
>
> Some functions use mostly the same asm for 32-bit and 64-bit versions.
>
> Make a macro that is generic enough and avoid code duplication.
>
> Signed-off-by: Leonardo Bras <leobras at redhat.com>
> ---
>  arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
>  1 file changed, 76 insertions(+), 88 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index 0dfe9d857a762..85eb2edbc8219 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
>  #undef ATOMIC_FETCH_OP
>  #undef ATOMIC_OP_RETURN
>
> +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx)        \
> +({                                                                     \
> +       __asm__ __volatile__ (                                          \
> +               "0:     lr." sfx "     %[p],  %[c]\n"                   \
> +               "       beq            %[p],  %[u], 1f\n"               \
> +               "       add            %[rc], %[p], %[a]\n"             \
> +               "       sc." sfx ".rl  %[rc], %[rc], %[c]\n"            \
> +               "       bnez           %[rc], 0b\n"                     \
> +               "       fence          rw, rw\n"                        \
> +               "1:\n"                                                  \
> +               : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)  \
> +               : [a]"r" (_a), [u]"r" (_u)                              \
> +               : "memory");                                            \
> +})
> +
>  /* This is required to provide a full barrier on success. */
>  static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
>  {
>         int prev, rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.w     %[p],  %[c]\n"
> -               "       beq      %[p],  %[u], 1f\n"
> -               "       add      %[rc], %[p], %[a]\n"
> -               "       sc.w.rl  %[rc], %[rc], %[c]\n"
> -               "       bnez     %[rc], 0b\n"
> -               "       fence    rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               : [a]"r" (a), [u]"r" (u)
> -               : "memory");
> +       _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> +
>         return prev;
>  }
>  #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> @@ -222,17 +228,8 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
>         s64 prev;
>         long rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.d     %[p],  %[c]\n"
> -               "       beq      %[p],  %[u], 1f\n"
> -               "       add      %[rc], %[p], %[a]\n"
> -               "       sc.d.rl  %[rc], %[rc], %[c]\n"
> -               "       bnez     %[rc], 0b\n"
> -               "       fence    rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               : [a]"r" (a), [u]"r" (u)
> -               : "memory");
> +       _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> +
>         return prev;
>  }
>  #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> @@ -310,61 +307,79 @@ ATOMIC_OPS()
>  #undef ATOMIC_OPS
>  #undef ATOMIC_OP
>
> +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx)     \
> +({                                                                     \
> +       __asm__ __volatile__ (                                          \
> +               "0:     lr." sfx "      %[p],  %[c]\n"                  \
> +               "       bltz            %[p],  1f\n"                    \
> +               "       addi            %[rc], %[p], 1\n"               \
> +               "       sc." sfx ".rl   %[rc], %[rc], %[c]\n"           \
> +               "       bnez            %[rc], 0b\n"                    \
> +               "       fence           rw, rw\n"                       \
> +               "1:\n"                                                  \
> +               : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)  \
> +               :                                                       \
> +               : "memory");                                            \
> +})
> +
>  static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
>  {
>         int prev, rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.w      %[p],  %[c]\n"
> -               "       bltz      %[p],  1f\n"
> -               "       addi      %[rc], %[p], 1\n"
> -               "       sc.w.rl   %[rc], %[rc], %[c]\n"
> -               "       bnez      %[rc], 0b\n"
> -               "       fence     rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> +
>         return !(prev < 0);
>  }
>
>  #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
>
> +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx)     \
> +({                                                                     \
> +       __asm__ __volatile__ (                                          \
> +               "0:     lr." sfx "      %[p],  %[c]\n"                  \
> +               "       bgtz            %[p],  1f\n"                    \
> +               "       addi            %[rc], %[p], -1\n"              \
> +               "       sc." sfx ".rl   %[rc], %[rc], %[c]\n"           \
> +               "       bnez            %[rc], 0b\n"                    \
> +               "       fence           rw, rw\n"                       \
> +               "1:\n"                                                  \
> +               : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)  \
> +               :                                                       \
> +               : "memory");                                            \
> +})
> +
>  static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
>  {
>         int prev, rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.w      %[p],  %[c]\n"
> -               "       bgtz      %[p],  1f\n"
> -               "       addi      %[rc], %[p], -1\n"
> -               "       sc.w.rl   %[rc], %[rc], %[c]\n"
> -               "       bnez      %[rc], 0b\n"
> -               "       fence     rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> +
>         return !(prev > 0);
>  }
>
>  #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
>
> +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx)         \
> +({                                                                     \
> +       __asm__ __volatile__ (                                          \
> +               "0:     lr." sfx "     %[p],  %[c]\n"                   \
> +               "       addi           %[rc], %[p], -1\n"               \
> +               "       bltz           %[rc], 1f\n"                     \
> +               "       sc." sfx ".rl  %[rc], %[rc], %[c]\n"            \
> +               "       bnez           %[rc], 0b\n"                     \
> +               "       fence          rw, rw\n"                        \
> +               "1:\n"                                                  \
> +               : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter)  \
> +               :                                                       \
> +               : "memory");                                            \
> +})
> +
>  static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
>  {
>         int prev, rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.w     %[p],  %[c]\n"
> -               "       addi     %[rc], %[p], -1\n"
> -               "       bltz     %[rc], 1f\n"
> -               "       sc.w.rl  %[rc], %[rc], %[c]\n"
> -               "       bnez     %[rc], 0b\n"
> -               "       fence    rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> +
>         return prev - 1;
>  }
>
> @@ -376,17 +391,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
>         s64 prev;
>         long rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.d      %[p],  %[c]\n"
> -               "       bltz      %[p],  1f\n"
> -               "       addi      %[rc], %[p], 1\n"
> -               "       sc.d.rl   %[rc], %[rc], %[c]\n"
> -               "       bnez      %[rc], 0b\n"
> -               "       fence     rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> +
>         return !(prev < 0);
>  }
>
> @@ -397,17 +403,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
>         s64 prev;
>         long rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.d      %[p],  %[c]\n"
> -               "       bgtz      %[p],  1f\n"
> -               "       addi      %[rc], %[p], -1\n"
> -               "       sc.d.rl   %[rc], %[rc], %[c]\n"
> -               "       bnez      %[rc], 0b\n"
> -               "       fence     rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> +
>         return !(prev > 0);
>  }
>
> @@ -418,17 +415,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
>         s64 prev;
>         long rc;
>
> -       __asm__ __volatile__ (
> -               "0:     lr.d     %[p],  %[c]\n"
> -               "       addi      %[rc], %[p], -1\n"
> -               "       bltz     %[rc], 1f\n"
> -               "       sc.d.rl  %[rc], %[rc], %[c]\n"
> -               "       bnez     %[rc], 0b\n"
> -               "       fence    rw, rw\n"
> -               "1:\n"
> -               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> -               :
> -               : "memory");
> +       _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> +
>         return prev - 1;
>  }
>
> --
> 2.40.0
>




More information about the linux-riscv mailing list