[PATCH 09/18] arm64: kexec: Use dcache ops macros instead of open-coding
Pavel Tatashin
pasha.tatashin at soleen.com
Thu May 27 08:05:17 PDT 2021
From: James Morse <james.morse at arm.com>
kexec does dcache maintenance when it re-writes all memory. Our
dcache_by_line_op macro depends on reading the sanitised DminLine
from memory. Kexec may have overwritten this, so open-codes the
sequence.
dcache_by_line_op is a whole set of macros, it uses dcache_line_size
which uses read_ctr for the sanitsed DminLine. Reading the DminLine
is the first thing the dcache_by_line_op does.
Rename dcache_by_line_op dcache_by_myline_op and take DminLine as
an argument. Kexec can now use the slightly smaller macro.
This makes up-coming changes to the dcache maintenance easier on
the eye.
Code generated by the existing callers is unchanged.
Signed-off-by: James Morse <james.morse at arm.com>
[Fixed merging issues]
Signed-off-by: Pavel Tatashin <pasha.tatashin at soleen.com>
---
arch/arm64/include/asm/assembler.h | 12 ++++++++----
arch/arm64/kernel/relocate_kernel.S | 13 +++----------
2 files changed, 11 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8418c1bd8f04..f1367863d995 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -393,10 +393,9 @@ alternative_else
alternative_endif
.endm
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
- dcache_line_size \tmp1, \tmp2
+ .macro dcache_by_myline_op op, domain, kaddr, size, linesz, tmp2
add \size, \kaddr, \size
- sub \tmp2, \tmp1, #1
+ sub \tmp2, \linesz, #1
bic \kaddr, \kaddr, \tmp2
9998:
.ifc \op, cvau
@@ -416,12 +415,17 @@ alternative_endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
+ add \kaddr, \kaddr, \linesz
cmp \kaddr, \size
b.lo 9998b
dsb \domain
.endm
+ .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ dcache_line_size \tmp1, \tmp2
+ dcache_by_myline_op \op, \domain, \kaddr, \size, \tmp1, \tmp2
+ .endm
+
/*
* Macro to perform an instruction cache maintenance for the interval
* [start, end)
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 8058fabe0a76..718037bef560 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -41,16 +41,9 @@ SYM_CODE_START(arm64_relocate_new_kernel)
tbz x16, IND_SOURCE_BIT, .Ltest_indirection
/* Invalidate dest page to PoC. */
- mov x2, x13
- add x20, x2, #PAGE_SIZE
- sub x1, x15, #1
- bic x2, x2, x1
-2: dc ivac, x2
- add x2, x2, x15
- cmp x2, x20
- b.lo 2b
- dsb sy
-
+ mov x2, x13
+ mov x1, #PAGE_SIZE
+ dcache_by_myline_op ivac, sy, x2, x1, x15, x20
copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
b .Lnext
.Ltest_indirection:
--
2.25.1
More information about the linux-arm-kernel
mailing list