[PATCH V9 14/15] openrisc: Move from ticket-lock to qspinlock
guoren at kernel.org
guoren at kernel.org
Mon Aug 8 00:13:17 PDT 2022
From: Guo Ren <guoren at linux.alibaba.com>
Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").
Openrisc only has "l.lwa/l.swa" for all atomic operations. That means
its ll/sc pair should be a strong atomic forward progress guarantee, or
all atomic operations may cause live lock. The ticket-lock needs
atomic_fetch_add well defined forward progress guarantees under
contention, and qspinlock needs xchg16 forward progress guarantees. The
atomic_fetch_add (l.lwa + add + l.swa) & xchg16 (l.lwa + and + or +
l.swa) have similar implementations, so they has the same forward
progress guarantees.
The qspinlock is smaller and faster than ticket-lock when all is in
fast-path. No reason keep openrisc in ticket-lock not qspinlock. Here is
the comparison between qspinlock and ticket-lock in fast-path code
sizes (bytes):
TYPE : TICKET | QUEUED
arch_spin_lock : 128 | 96
arch_spin_unlock : 56 | 44
arch_spin_trylock : 108 | 80
arch_spin_is_locked : 36 | 36
arch_spin_is_contended : 36 | 36
arch_spin_value_unlocked: 28 | 28
Signed-off-by: Guo Ren <guoren at linux.alibaba.com>
Signed-off-by: Guo Ren <guoren at kernel.org>
Cc: Stafford Horne <shorne at gmail.com>
Cc: Jonas Bonn <jonas at southpole.se>
Cc: Stefan Kristiansson <stefan.kristiansson at saunalahti.fi>
---
arch/openrisc/Kconfig | 1 +
arch/openrisc/include/asm/Kbuild | 2 ++
arch/openrisc/include/asm/cmpxchg.h | 25 +++++++++++++++++++++++++
3 files changed, 28 insertions(+)
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index c7f282f60f64..1652a6aac882 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -10,6 +10,7 @@ config OPENRISC
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_USE_QUEUED_SPINLOCKS
select COMMON_CLK
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c8c99b554ca4..ad147fec50b4 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -2,6 +2,8 @@
generic-y += extable.h
generic-y += kvm_para.h
generic-y += parport.h
+generic-y += mcs_spinlock.h
+generic-y += qspinlock.h
generic-y += spinlock_types.h
generic-y += spinlock.h
generic-y += qrwlock_types.h
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index df83b33b5882..2d650b07a0f4 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -65,6 +65,27 @@ static inline u32 cmpxchg32(volatile void *ptr, u32 old, u32 new)
})
/* xchg */
+static inline u32 xchg16(volatile void *ptr, u32 val)
+{
+ u32 ret, tmp;
+ u32 shif = ((ulong)ptr & 2) ? 16 : 0;
+ u32 mask = 0xffff << shif;
+ u32 *__ptr = (u32 *)((ulong)ptr & ~2);
+
+ __asm__ __volatile__(
+ "1: l.lwa %0, 0(%2) \n"
+ " l.and %1, %0, %3 \n"
+ " l.or %1, %1, %4 \n"
+ " l.swa 0(%2), %1 \n"
+ " l.bnf 1b \n"
+ " l.nop \n"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r"(__ptr), "r" (~mask), "r" (val << shif)
+ : "cc", "memory");
+
+ return (ret & mask) >> shif;
+}
+
static inline u32 xchg32(volatile void *ptr, u32 val)
{
__asm__ __volatile__(
@@ -85,6 +106,10 @@ static inline u32 xchg32(volatile void *ptr, u32 val)
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 2: \
+ __ret = (__typeof__(*(ptr))) \
+ xchg16(__ptr, (u32)__new); \
+ break; \
case 4: \
__ret = (__typeof__(*(ptr))) \
xchg32(__ptr, (u32)__new); \
--
2.36.1
More information about the linux-riscv
mailing list