[PATCH v3] arm64: enable EDAC on arm64

Rob Herring robherring2 at gmail.com
Mon Apr 21 09:09:16 PDT 2014


From: Rob Herring <robh at kernel.org>

Implement atomic_scrub and enable EDAC for arm64.

Signed-off-by: Rob Herring <robh at kernel.org>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will.deacon at arm.com>
---
v3:
- Drop "cc" clobber annotation.
v2:
- Add loop for exclusive store success
- Fix size to be 32-bits at a time. The framework gives no alignment
  guarantees.

 arch/arm64/Kconfig            |  1 +
 arch/arm64/include/asm/edac.h | 38 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+)
 create mode 100644 arch/arm64/include/asm/edac.h

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e6e4d37..4c1f857 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -14,6 +14,7 @@ config ARM64
 	select COMMON_CLK
 	select CPU_PM if (SUSPEND || CPU_IDLE)
 	select DCACHE_WORD_ACCESS
+	select EDAC_SUPPORT
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 	select GENERIC_CPU_AUTOPROBE
diff --git a/arch/arm64/include/asm/edac.h b/arch/arm64/include/asm/edac.h
new file mode 100644
index 0000000..8a3d176
--- /dev/null
+++ b/arch/arm64/include/asm/edac.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing.  It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+	unsigned int *virt_addr = va;
+	unsigned int i;
+
+	for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+		long result;
+		unsigned long tmp;
+
+		asm volatile("/* atomic_scrub */\n"
+		"1:     ldxr    %w0, %2\n"
+		"       stxr    %w1, %w0, %2\n"
+		"       cbnz    %w1, 1b"
+			: "=&r" (result), "=&r" (tmp), "+Q" (*virt_addr) : : );
+	}
+}
+#endif
-- 
1.9.1




More information about the linux-arm-kernel mailing list