[PATCH V8 10/10] csky: Add qspinlock support

guoren at kernel.org guoren at kernel.org
Sun Jul 24 05:25:17 PDT 2022


From: Guo Ren <guoren at linux.alibaba.com>

Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").

C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles.

Signed-off-by: Guo Ren <guoren at linux.alibaba.com>
Signed-off-by: Guo Ren <guoren at kernel.org>
---
 arch/csky/Kconfig               | 16 ++++++++++++++++
 arch/csky/include/asm/Kbuild    |  2 ++
 arch/csky/include/asm/cmpxchg.h | 20 ++++++++++++++++++++
 3 files changed, 38 insertions(+)

diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index dfdb436b6078..09f7d1f06bca 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -354,6 +354,22 @@ config HAVE_EFFICIENT_UNALIGNED_STRING_OPS
 	  Say Y here to enable EFFICIENT_UNALIGNED_STRING_OPS. Some CPU models could
 	  deal with unaligned access by hardware.
 
+choice
+	prompt "C-SKY spinlock type"
+	default CSKY_TICKET_SPINLOCKS
+
+config CSKY_TICKET_SPINLOCKS
+	bool "Using ticket spinlock"
+
+config CSKY_QUEUED_SPINLOCKS
+	bool "Using queued spinlock"
+	depends on SMP
+	select ARCH_USE_QUEUED_SPINLOCKS
+	help
+	  Make sure your micro arch LL/SC has a strong forward progress guarantee.
+	  Otherwise, stay at ticket-lock/combo-lock.
+endchoice
+
 endmenu
 
 source "arch/csky/Kconfig.platforms"
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 103207a58f97..b70b14de904f 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -3,10 +3,12 @@ generic-y += asm-offsets.h
 generic-y += extable.h
 generic-y += gpio.h
 generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
 generic-y += spinlock.h
 generic-y += spinlock_types.h
 generic-y += qrwlock.h
 generic-y += qrwlock_types.h
+generic-y += qspinlock.h
 generic-y += parport.h
 generic-y += user.h
 generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 5b8faccd65e4..5f693fadb56c 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -15,6 +15,26 @@ extern void __bad_xchg(void);
 	__typeof__(*(ptr)) __ret;				\
 	unsigned long tmp;					\
 	switch (size) {						\
+	case 2: {						\
+		u32 ret;					\
+		u32 shif = ((ulong)__ptr & 2) ? 16 : 0;		\
+		u32 mask = 0xffff << shif;			\
+		__ptr = (__typeof__(ptr))((ulong)__ptr & ~2);	\
+		__asm__ __volatile__ (				\
+			"1:	ldex.w %0, (%4)\n"		\
+			"	and    %1, %0, %2\n"		\
+			"	or     %1, %1, %3\n"		\
+			"	stex.w %1, (%4)\n"		\
+			"	bez    %1, 1b\n"		\
+			: "=&r" (ret), "=&r" (tmp)		\
+			: "r" (~mask),				\
+			  "r" ((u32)__new << shif),		\
+			  "r" (__ptr)				\
+			: "memory");				\
+		__ret = (__typeof__(*(ptr)))			\
+			((ret & mask) >> shif);			\
+		break;						\
+	}							\
 	case 4:							\
 		asm volatile (					\
 		"1:	ldex.w		%0, (%3) \n"		\
-- 
2.36.1




More information about the linux-riscv mailing list