[PATCH 4/6] arm64: head: clarify `compute_indices`
Anshuman Khandual
anshuman.khandual at arm.com
Tue May 17 20:17:23 PDT 2022
From: Mark Rutland <mark.rutland at arm.com>
The logic in the `compute_indices` macro can be difficult to follow, as
it transiently uses output operands for unrelated temporary values.
Let's make this clearer by using a `tmp` parameter, and splitting the
logic into commented blocks. By folding a MUL and ADD into a single MADD
we avoid the need for a second temporary.
As `ptrs` is sometimes a register and sometimes an immediate, we cannot
simplify this much further at present. If it were always a register, we
could remove redundant MOVs, and if it were always an immediate we could
use `(\ptrs - 1)` as an immediate for the ANDs when extracting index
bits (or replace the LSR; SUB; AND sequence with a single UBFX).
There should be no funcitonal change as a result of this patch.
Cc: Ard Biesheuvel <ardb at kernel.org>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Signed-off-by: Mark Rutland <mark.rutland at arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual at arm.com>
---
arch/arm64/kernel/head.S | 33 ++++++++++++++++++---------------
1 file changed, 18 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b5d7dacbbb2c..01739f5ec3de 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -189,20 +189,23 @@ SYM_CODE_END(preserve_boot_args)
* Preserves: vstart, vend, shift, ptrs
* Returns: istart, iend, count
*/
- .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
+ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count, tmp
+ // iend = (vend >> shift) & (ptrs - 1)
lsr \iend, \vend, \shift
- mov \istart, \ptrs
- sub \istart, \istart, #1
- and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
- mov \istart, \ptrs
- mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += count * ptrs
- // our entries span multiple tables
+ mov \tmp, \ptrs
+ sub \tmp, \tmp, #1
+ and \iend, \iend, \tmp
+ // iend += count * ptrs
+ // our entries span multiple tables
+ mov \tmp, \ptrs
+ madd \iend, \count, \tmp, \iend
+
+ // istart = (vend >> shift) & (ptrs - 1)
lsr \istart, \vstart, \shift
- mov \count, \ptrs
- sub \count, \count, #1
- and \istart, \istart, \count
+ mov \tmp, \ptrs
+ sub \tmp, \tmp, #1
+ and \istart, \istart, \tmp
sub \count, \iend, \istart
.endm
@@ -229,25 +232,25 @@ SYM_CODE_END(preserve_boot_args)
add \rtbl, \tbl, #PAGE_SIZE
mov \sv, \rtbl
mov \count, #0
- compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
+ compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count, \tmp
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
mov \tbl, \sv
mov \sv, \rtbl
#if SWAPPER_PGTABLE_LEVELS > 3
- compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
+ compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count, \tmp
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
mov \tbl, \sv
mov \sv, \rtbl
#endif
#if SWAPPER_PGTABLE_LEVELS > 2
- compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
+ compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count, \tmp
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
mov \tbl, \sv
#endif
- compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
+ compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count, \tmp
bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
.endm
--
2.20.1
More information about the linux-arm-kernel
mailing list