[PATCH 1/4] ARM: vexpress/dcscb: fix cache disabling sequences
Nicolas Pitre
nicolas.pitre at linaro.org
Mon Jul 22 13:58:12 EDT 2013
On Fri, 19 Jul 2013, Lorenzo Pieralisi wrote:
> On Fri, Jul 19, 2013 at 11:28:49AM +0100, Dave Martin wrote:
> > On Thu, Jul 18, 2013 at 02:59:06PM -0400, Nicolas Pitre wrote:
> > > On Thu, 18 Jul 2013, Dave Martin wrote:
> > >
> > > > I had other names in mind, like "coherency_exit" or "cache_exit".
> > > > Those are not very intelligible, but that might at least make people
> > > > pause and think before blindly using it.
> > >
> > > Good point. It should still embody the architecture name for which it
> > > is valid though.
> >
> > Sure, I was assuming something would be pasted on the start of the name.
>
> v7 :-) with a comment describing the assumptions (in particular related
> as Dave mentioned to the SMP bit behaviour) ?
OK... What about this then:
----- >8
From: Nicolas Pitre <nicolas.pitre at linaro.org>
Date: Thu, 18 Jul 2013 13:12:48 -0400
Subject: [PATCH] ARM: cacheflush: consolidate single-CPU ARMv7 cache disabling
code
This code is becoming duplicated in many places. So let's consolidate
it into a handy macro that is known to be right and available for reuse.
Signed-off-by: Nicolas Pitre <nico at linaro.org>
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672..8f4e2297e2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -436,4 +436,44 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldr/str exclusive must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ *
+ * Further considerations:
+ *
+ * - This relies on the presence and behavior of the AUXCR.SMP bit as
+ * documented in the ARMv7 TRM. Vendor implementations that deviate from
+ * that will need their own procedure.
+ * - This is unlikely to work if Linux is running non-secure.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" \
+ "isb \n\t" \
+ "dsb " \
+ /* The clobber list is dictated by the call to v7_flush_dcache_* */ \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","r11","lr","memory" )
+
#endif
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 85fffa702f..14d4996887 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,32 +133,8 @@ static void dcscb_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush all cache levels for this cluster.
- *
- * To do so we do:
- * - Clear the SCTLR.C bit to prevent further cache allocations
- * - Flush the whole cache
- * - Clear the ACTLR "SMP" bit to disable local coherency
- *
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including to the stack.
- */
- asm volatile(
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb "
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","r11","lr","memory");
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
@@ -177,24 +153,8 @@ static void dcscb_power_down(void)
} else {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush the local CPU cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb "
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","r11","lr","memory");
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index dfb55d45b6..5940f1e317 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -134,26 +134,7 @@ static void tc2_pm_down(u64 residency)
: : "r" (0x400) );
}
- /*
- * We need to disable and flush the whole (L1 and L2) cache.
- * Let's do it in the safest possible way i.e. with
- * no memory access within the following sequence
- * including the stack.
- */
- asm volatile(
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_all \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb "
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","r11","lr","memory");
+ v7_exit_coherency_flush(all);
cci_disable_port_by_cpu(mpidr);
@@ -169,24 +150,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- /*
- * We need to disable and flush only the L1 cache.
- * Let's do it in the safest possible way as above.
- */
- asm volatile(
- "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
- "bic r0, r0, #"__stringify(CR_C)" \n\t"
- "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
- "isb \n\t"
- "bl v7_flush_dcache_louis \n\t"
- "clrex \n\t"
- "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
- "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
- "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
- "isb \n\t"
- "dsb "
- : : : "r0","r1","r2","r3","r4","r5","r6","r7",
- "r9","r10","r11","lr","memory");
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
More information about the linux-arm-kernel
mailing list