[RFC 09/15] temp: remove various library optimisations

Ben Dooks ben.dooks at codethink.co.uk
Fri Dec 20 07:57:55 PST 2024


These either need fixing or checking for big endian.

- memove is deifentyl not working
- ignore memset and  memcpy optimisation for now
- uaccess code needs fixing
---
 arch/riscv/lib/memcpy.S  | 22 +++++++++++++++++++++-
 arch/riscv/lib/memmove.S |  2 +-
 arch/riscv/lib/memset.S  |  1 +
 arch/riscv/lib/strlen.S  |  2 +-
 arch/riscv/lib/uaccess.S |  7 +++++--
 5 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
index 44e009ec5fef..b51380f06204 100644
--- a/arch/riscv/lib/memcpy.S
+++ b/arch/riscv/lib/memcpy.S
@@ -7,12 +7,15 @@
 #include <asm/asm.h>
 
 /* void *memcpy(void *, const void *, size_t) */
-SYM_FUNC_START(__memcpy)
+SYM_FUNC_START(__memcpy1)
 	move t6, a0  /* Preserve return value */
 
 	/* Defer to byte-oriented copy for small sizes */
 	sltiu a3, a2, 128
+	j	4f	/* for now just always use bytes */	
+
 	bnez a3, 4f
+
 	/* Use word-oriented copy only if low-order bits match */
 	andi a3, t6, SZREG-1
 	andi a4, a1, SZREG-1
@@ -87,6 +90,7 @@ SYM_FUNC_START(__memcpy)
 	or a5, a5, a3
 	andi a5, a5, 3
 	bnez a5, 5f
+	j	5f	/* skip word */
 7:
 	lw a4, 0(a1)
 	addi a1, a1, 4
@@ -104,6 +108,22 @@ SYM_FUNC_START(__memcpy)
 	bltu a1, a3, 5b
 6:
 	ret
+
+SYM_FUNC_START(__memcpy)
+	move t6, a0  /* Preserve return value */
+	beqz a2, 6f
+	add a3, a1, a2
+
+5:
+	lb a4, 0(a1)
+	addi a1, a1, 1
+	sb a4, 0(t6)
+	addi t6, t6, 1
+	bltu a1, a3, 5b
+6:
+	ret
+
+
 SYM_FUNC_END(__memcpy)
 SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
 SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index cb3e2e7ef0ba..c51475e4f3ce 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -60,7 +60,7 @@ SYM_FUNC_START(__memmove)
 	 */
 	andi t0, a2, -(2 * SZREG)
 	beqz t0, .Lbyte_copy
-
+	j .Lbyte_copy
 	/*
 	 * Now solve for t5 and t6.
 	 */
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index da23b8347e2d..a3cd79cb33b4 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -14,6 +14,7 @@ SYM_FUNC_START(__memset)
 	/* Defer to byte-oriented fill for small sizes */
 	sltiu a3, a2, 16
 	bnez a3, 4f
+	j 4f /* disabel optimised for now */
 
 	/*
 	 * Round to nearest XLEN-aligned address
diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S
index 962983b73251..bea650fd24af 100644
--- a/arch/riscv/lib/strlen.S
+++ b/arch/riscv/lib/strlen.S
@@ -8,7 +8,7 @@
 /* int strlen(const char *s) */
 SYM_FUNC_START(strlen)
 
-	ALTERNATIVE("nop", "j strlen_zbb", 0, RISCV_ISA_EXT_ZBB, CONFIG_RISCV_ISA_ZBB)
+	/*ALTERNATIVE("nop", "j strlen_zbb", 0, RISCV_ISA_EXT_ZBB, CONFIG_RISCV_ISA_ZBB)*/
 
 	/*
 	 * Returns
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 6a9f116bb545..3d7da86277bb 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -46,7 +46,8 @@ SYM_FUNC_START(fallback_scalar_usercopy)
 	 */
 	li	a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */
 	bltu	a2, a3, .Lbyte_copy_tail
-
+	j	.Lbyte_copy_tail
+	
 	/*
 	 * Copy first bytes until dst is aligned to word boundary.
 	 * a0 - start of dst
@@ -73,7 +74,9 @@ SYM_FUNC_START(fallback_scalar_usercopy)
 	 */
 	/* a1 - start of src */
 	andi	a3, a1, SZREG-1
-	bnez	a3, .Lshift_copy
+	/*	bnez	a3, .Lshift_copy */
+	/* for now, ignore shift copy until fixed */
+	bnez	a3, .Lbyte_copy_tail
 
 .Lword_copy:
         /*
-- 
2.37.2.352.g3c44437643




More information about the linux-riscv mailing list