[PATCH 1/1] riscv: provide memmove implementation

Palmer Dabbelt palmer at dabbelt.com
Thu Dec 10 20:43:39 EST 2020


On Mon, 30 Nov 2020 01:13:19 PST (-0800), nylon7 at andestech.com wrote:
> The memmove used by the kernel feature like KASAN.
>
> Signed-off-by: Nick Hu <nickhu at andestech.com>
> Signed-off-by: Nick Hu <nick650823 at gmail.com>
> Signed-off-by: Nylon Chen <nylon7 at andestech.com>
> ---
>  arch/riscv/include/asm/string.h |  8 ++---
>  arch/riscv/kernel/riscv_ksyms.c |  2 ++
>  arch/riscv/lib/Makefile         |  1 +
>  arch/riscv/lib/memmove.S        | 64 +++++++++++++++++++++++++++++++++
>  4 files changed, 71 insertions(+), 4 deletions(-)
>  create mode 100644 arch/riscv/lib/memmove.S
>
> diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
> index 924af13f8555..5477e7ecb6e1 100644
> --- a/arch/riscv/include/asm/string.h
> +++ b/arch/riscv/include/asm/string.h
> @@ -12,16 +12,16 @@
>  #define __HAVE_ARCH_MEMSET
>  extern asmlinkage void *memset(void *, int, size_t);
>  extern asmlinkage void *__memset(void *, int, size_t);
> -
>  #define __HAVE_ARCH_MEMCPY
>  extern asmlinkage void *memcpy(void *, const void *, size_t);
>  extern asmlinkage void *__memcpy(void *, const void *, size_t);
> -
> +#define __HAVE_ARCH_MEMMOVE
> +extern asmlinkage void *memmove(void *, const void *, size_t);
> +extern asmlinkage void *__memmove(void *, const void *, size_t);
>  /* For those files which don't want to check by kasan. */
>  #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
> -
>  #define memcpy(dst, src, len) __memcpy(dst, src, len)
>  #define memset(s, c, n) __memset(s, c, n)
> -
> +#define memmove(dst, src, len) __memmove(dst, src, len)
>  #endif
>  #endif /* _ASM_RISCV_STRING_H */
> diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
> index 450492e1cb4e..5ab1c7e1a6ed 100644
> --- a/arch/riscv/kernel/riscv_ksyms.c
> +++ b/arch/riscv/kernel/riscv_ksyms.c
> @@ -11,5 +11,7 @@
>   */
>  EXPORT_SYMBOL(memset);
>  EXPORT_SYMBOL(memcpy);
> +EXPORT_SYMBOL(memmove);
>  EXPORT_SYMBOL(__memset);
>  EXPORT_SYMBOL(__memcpy);
> +EXPORT_SYMBOL(__memmove);
> diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
> index 47e7a8204460..ac6171e9c19e 100644
> --- a/arch/riscv/lib/Makefile
> +++ b/arch/riscv/lib/Makefile
> @@ -2,5 +2,6 @@
>  lib-y			+= delay.o
>  lib-y			+= memcpy.o
>  lib-y			+= memset.o
> +lib-y			+= memmove.o
>  lib-$(CONFIG_MMU)	+= uaccess.o
>  lib-$(CONFIG_64BIT)	+= tishift.o
> diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
> new file mode 100644
> index 000000000000..07d1d2152ba5
> --- /dev/null
> +++ b/arch/riscv/lib/memmove.S
> @@ -0,0 +1,64 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#include <linux/linkage.h>
> +#include <asm/asm.h>
> +
> +ENTRY(__memmove)
> +WEAK(memmove)
> +        move    t0, a0
> +        move    t1, a1
> +
> +        beq     a0, a1, exit_memcpy
> +        beqz    a2, exit_memcpy
> +        srli    t2, a2, 0x2
> +
> +        slt     t3, a0, a1
> +        beqz    t3, do_reverse
> +
> +        andi    a2, a2, 0x3
> +        li      t4, 1
> +        beqz    t2, byte_copy
> +
> +word_copy:
> +        lw      t3, 0(a1)
> +        addi    t2, t2, -1
> +        addi    a1, a1, 4
> +        sw      t3, 0(a0)
> +        addi    a0, a0, 4
> +        bnez    t2, word_copy
> +        beqz    a2, exit_memcpy
> +        j       byte_copy
> +
> +do_reverse:
> +        add     a0, a0, a2
> +        add     a1, a1, a2
> +        andi    a2, a2, 0x3
> +        li      t4, -1
> +        beqz    t2, reverse_byte_copy
> +
> +reverse_word_copy:
> +        addi    a1, a1, -4
> +        addi    t2, t2, -1
> +        lw      t3, 0(a1)
> +        addi    a0, a0, -4
> +        sw      t3, 0(a0)
> +        bnez    t2, reverse_word_copy
> +        beqz    a2, exit_memcpy
> +
> +reverse_byte_copy:
> +        addi    a0, a0, -1
> +        addi    a1, a1, -1
> +
> +byte_copy:
> +        lb      t3, 0(a1)
> +        addi    a2, a2, -1
> +        sb      t3, 0(a0)
> +        add     a1, a1, t4
> +        add     a0, a0, t4
> +        bnez    a2, byte_copy
> +
> +exit_memcpy:
> +        move a0, t0
> +        move a1, t1
> +        ret
> +END(__memmove)

Thanks, this is on for-next.



More information about the linux-riscv mailing list