[RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations

guoren at kernel.org guoren at kernel.org
Mon Aug 8 01:05:59 PDT 2022


From: Guo Ren <guoren at linux.alibaba.com>

This patch provides riscv specific implementations for the this_cpu
operations. We use atomic operations as appropriate (32 & 64 width).

Use AMO instructions listed below for percpu, others are generic:
 - amoadd.w/d
 - amoand.w/d
 - amoor.w/d
 - amoswap.w/d

Signed-off-by: Guo Ren <guoren at linux.alibaba.com>
Signed-off-by: Guo Ren <guoren at kernel.org>
---
 arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
 create mode 100644 arch/riscv/include/asm/percpu.h

diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
new file mode 100644
index 000000000000..f41d339c41f3
--- /dev/null
+++ b/arch/riscv/include/asm/percpu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_RISCV_PERCPU_H
+#define _ASM_RISCV_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+#define __PERCPU_OP_CASE(asm_type, name, sz, asm_op)			\
+static inline void							\
+__percpu_##name##_case_##sz(void *ptr, ulong val)			\
+{									\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " zero, %1, (%0)"	\
+		:							\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+}
+
+#define __PERCPU_RET_OP_CASE(asm_type, name, sz, asm_op, c_op)		\
+static inline u##sz							\
+__percpu_##name##_return_case_##sz(void *ptr, ulong val)		\
+{									\
+	u##sz ret;							\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " %0, %2, (%1)"	\
+		: "=r" (ret)						\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+									\
+	return ret c_op val;						\
+}
+
+#ifdef CONFIG_64BIT
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)				\
+	__PERCPU_OP_CASE(d, name, 64, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)			\
+	__PERCPU_RET_OP_CASE(d, name, 64, asm_op, c_op)
+#else  /* CONFIG_32BIT */
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)
+#endif /* CONFIG_64BIT */
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, or)
+PERCPU_RET_OP(add, add, +)
+
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+
+#define _pcp_protect(op, pcp, ...)					\
+({									\
+	preempt_disable_notrace();					\
+	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
+	preempt_enable_notrace();					\
+})
+
+#define _pcp_protect_return(op, pcp, args...)				\
+({									\
+	typeof(pcp) __retval;						\
+	preempt_disable_notrace();					\
+	if (__native_word(pcp)) 					\
+		__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);\
+	else								\
+		BUILD_BUG();						\
+	preempt_enable_notrace();					\
+	__retval;							\
+})
+
+#define this_cpu_add_4(pcp, val)	\
+	_pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_return_4(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_and_4(pcp, val)	\
+	_pcp_protect(__percpu_and_case_32, pcp, val)
+#define this_cpu_or_4(pcp, val)		\
+	_pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_xchg_4(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+
+#ifdef CONFIG_64BIT
+#define this_cpu_add_8(pcp, val)	\
+	_pcp_protect(__percpu_add_case_64, pcp, val)
+#define this_cpu_add_return_8(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+#define this_cpu_and_8(pcp, val)	\
+	_pcp_protect(__percpu_and_case_64, pcp, val)
+#define this_cpu_or_8(pcp, val)		\
+	_pcp_protect(__percpu_or_case_64, pcp, val)
+#define this_cpu_xchg_8(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_RISCV_PERCPU_H */
-- 
2.36.1




More information about the linux-riscv mailing list