[PATCH 3/5] riscv: __asm_to/copy_from_user: Copy until dst is aligned
Akira Tsukamoto
akira.tsukamoto at gmail.com
Sat Jun 19 04:36:49 PDT 2021
First copy in bytes until reaches the first word aligned boundary in
destination memory address.
For speeding up the copy, trying to avoid both the unaligned memory access
and byte access are the key. This is the preparation before the bulk
aligned word copy.
Signed-off-by: Akira Tsukamoto <akira.tsukamoto at gmail.com>
---
arch/riscv/lib/uaccess.S | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index be1810077f9a..4906b5ca91c3 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -32,6 +32,34 @@ ENTRY(__asm_copy_from_user)
add t0, a0, a2
bgtu a0, t0, 5f
+ /*
+ * Use byte copy only if too small.
+ */
+ li a3, 8*SZREG /* size must be larger than size in word_copy */
+ bltu a2, a3, .Lbyte_copy_tail
+
+ /*
+ * Copy first bytes until dst is align to word boundary.
+ * a0 - start of dst
+ * t1 - start of aligned dst
+ */
+ addi t1, a0, SZREG-1
+ andi t1, t1, ~(SZREG-1)
+ /* dst is already aligned, skip */
+ beq a0, t1, .Lskip_first_bytes
+1:
+ /* a5 - one byte for copying data */
+ fixup lb a5, 0(a1), 10f
+ addi a1, a1, 1 /* src */
+ fixup sb a5, 0(a0), 10f
+ addi a0, a0, 1 /* dst */
+ bltu a0, t1, 1b /* t1 - start of aligned dst */
+
+.Lskip_first_bytes:
+
+.Lword_copy:
+.Lshift_copy:
+
.Lbyte_copy_tail:
/*
* Byte copy anything left.
--
2.17.1
More information about the linux-riscv
mailing list