[PATCH 2/2] riscv: uaccess: do not do misaligned accesses in get/put_user()

Clément Léger cleger at rivosinc.com
Mon Jun 2 00:34:17 PDT 2025



On 31/05/2025 20:28, David Laight wrote:
> On Fri, 30 May 2025 22:56:58 +0200
> Clément Léger <cleger at rivosinc.com> wrote:
> 
>> Doing misaligned access to userspace memory would make a trap on
>> platform where it is emulated. Latest fixes removed the kernel
>> capability to do unaligned accesses to userspace memory safely since
>> interrupts are kept disabled at all time during that. Thus doing so
>> would crash the kernel.
>>
>> Such behavior was detected with GET_UNALIGN_CTL() that was doing
>> a put_user() with an unsigned long* address that should have been an
>> unsigned int*. Reenabling kernel misaligned access emulation is a bit
>> risky and it would also degrade performances. Rather than doing that,
>> we will try to avoid any misaligned accessed by using copy_from/to_user()
>> which does not do any misaligned accesses. This can be done only for
>> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
>> a bit more code for this config.
> 
> For get_user() you are much better off reading the two words that contain
> the value and then doing 'shift' and 'or' to get the correct value.
> 
> Even for put_user() doing the explicit byte accesses will be faster than
> going though the generic copy_to/from_user() function.

Hi David,

Alexandre tried that approach as well but that added a bit more code and
it was more complex than just calling copy_from/to_user(). That can
still be done in another commit if we need more performance later. As a
side note, prior to that patch, these misaligned accesses were using
trap-and-emulate so that is still a performance improvement.

Thanks,

Clément

> 
> 	David
> 
>>
>> Signed-off-by: Clément Léger <cleger at rivosinc.com>
>> ---
>>  arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
>>  1 file changed, 22 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
>> index 046de7ced09c..b542c05f394f 100644
>> --- a/arch/riscv/include/asm/uaccess.h
>> +++ b/arch/riscv/include/asm/uaccess.h
>> @@ -169,8 +169,21 @@ do {								\
>>  
>>  #endif /* CONFIG_64BIT */
>>  
>> +unsigned long __must_check __asm_copy_to_user(void __user *to,
>> +	const void *from, unsigned long n);
>> +unsigned long __must_check __asm_copy_from_user(void *to,
>> +	const void __user *from, unsigned long n);
>> +
>>  #define __get_user_nocheck(x, __gu_ptr, label)			\
>>  do {								\
>> +	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {			\
>> +		if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {		\
>> +			if (__asm_copy_from_user(&(x), __gu_ptr, sizeof(*__gu_ptr)))	\
>> +				goto label;			\
>> +			else					\
>> +				break;				\
>> +		}						\
>> +	}							\
>>  	switch (sizeof(*__gu_ptr)) {				\
>>  	case 1:							\
>>  		__get_user_asm("lb", (x), __gu_ptr, label);	\
>> @@ -297,6 +310,15 @@ do {								\
>>  
>>  #define __put_user_nocheck(x, __gu_ptr, label)			\
>>  do {								\
>> +	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {			\
>> +		if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) {		\
>> +			unsigned long val = (unsigned long)(x);				\
>> +			if (__asm_copy_to_user(__gu_ptr, &(val), sizeof(*__gu_ptr)))	\
>> +				goto label;			\
>> +			else					\
>> +				break;				\
>> +		}						\
>> +	}							\
>>  	switch (sizeof(*__gu_ptr)) {				\
>>  	case 1:							\
>>  		__put_user_asm("sb", (x), __gu_ptr, label);	\
>> @@ -385,12 +407,6 @@ err_label:							\
>>  		-EFAULT;					\
>>  })
>>  
>> -
>> -unsigned long __must_check __asm_copy_to_user(void __user *to,
>> -	const void *from, unsigned long n);
>> -unsigned long __must_check __asm_copy_from_user(void *to,
>> -	const void __user *from, unsigned long n);
>> -
>>  static inline unsigned long
>>  raw_copy_from_user(void *to, const void __user *from, unsigned long n)
>>  {
> 




More information about the linux-riscv mailing list