Doing misaligned access to userspace memory would make a trap on
platform where it is emulated. Latest fixes removed the kernel
capability to do unaligned accesses to userspace memory safely since
interrupts are kept disabled at all time during that. Thus doing so
would crash the kernel.
Such behavior was detected with GET_UNALIGN_CTL() that was doing
a put_user() with an unsigned long* address that should have been an
unsigned int*. Reenabling kernel misaligned access emulation is a bit
risky and it would also degrade performances. Rather than doing that,
we will try to avoid any misaligned accessed by using copy_from/to_user()
which does not do any misaligned accesses. This can be done only for
!CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
a bit more code for this config.
Signed-off-by: Clément Léger <cleger@rivosinc.com>
---
arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 046de7ced09c..b542c05f394f 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -169,8 +169,21 @@ do { \
#endif /* CONFIG_64BIT */
+unsigned long __must_check __asm_copy_to_user(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user(void *to,
+ const void __user *from, unsigned long n);
+
#define __get_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
+ if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ if (__asm_copy_from_user(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
+ goto label; \
+ else \
+ break; \
+ } \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__get_user_asm("lb", (x), __gu_ptr, label); \
@@ -297,6 +310,15 @@ do { \
#define __put_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
+ if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ unsigned long val = (unsigned long)(x); \
+ if (__asm_copy_to_user(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
+ goto label; \
+ else \
+ break; \
+ } \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__put_user_asm("sb", (x), __gu_ptr, label); \
@@ -385,12 +407,6 @@ err_label: \
-EFAULT; \
})
-
-unsigned long __must_check __asm_copy_to_user(void __user *to,
- const void *from, unsigned long n);
-unsigned long __must_check __asm_copy_from_user(void *to,
- const void __user *from, unsigned long n);
-
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
--
2.49.0
On Fri, 30 May 2025 22:56:58 +0200
Clément Léger <cleger@rivosinc.com> wrote:
> Doing misaligned access to userspace memory would make a trap on
> platform where it is emulated. Latest fixes removed the kernel
> capability to do unaligned accesses to userspace memory safely since
> interrupts are kept disabled at all time during that. Thus doing so
> would crash the kernel.
>
> Such behavior was detected with GET_UNALIGN_CTL() that was doing
> a put_user() with an unsigned long* address that should have been an
> unsigned int*. Reenabling kernel misaligned access emulation is a bit
> risky and it would also degrade performances. Rather than doing that,
> we will try to avoid any misaligned accessed by using copy_from/to_user()
> which does not do any misaligned accesses. This can be done only for
> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
> a bit more code for this config.
For get_user() you are much better off reading the two words that contain
the value and then doing 'shift' and 'or' to get the correct value.
Even for put_user() doing the explicit byte accesses will be faster than
going though the generic copy_to/from_user() function.
David
>
> Signed-off-by: Clément Léger <cleger@rivosinc.com>
> ---
> arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
> 1 file changed, 22 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
> index 046de7ced09c..b542c05f394f 100644
> --- a/arch/riscv/include/asm/uaccess.h
> +++ b/arch/riscv/include/asm/uaccess.h
> @@ -169,8 +169,21 @@ do { \
>
> #endif /* CONFIG_64BIT */
>
> +unsigned long __must_check __asm_copy_to_user(void __user *to,
> + const void *from, unsigned long n);
> +unsigned long __must_check __asm_copy_from_user(void *to,
> + const void __user *from, unsigned long n);
> +
> #define __get_user_nocheck(x, __gu_ptr, label) \
> do { \
> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
> + if (__asm_copy_from_user(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
> + goto label; \
> + else \
> + break; \
> + } \
> + } \
> switch (sizeof(*__gu_ptr)) { \
> case 1: \
> __get_user_asm("lb", (x), __gu_ptr, label); \
> @@ -297,6 +310,15 @@ do { \
>
> #define __put_user_nocheck(x, __gu_ptr, label) \
> do { \
> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
> + unsigned long val = (unsigned long)(x); \
> + if (__asm_copy_to_user(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
> + goto label; \
> + else \
> + break; \
> + } \
> + } \
> switch (sizeof(*__gu_ptr)) { \
> case 1: \
> __put_user_asm("sb", (x), __gu_ptr, label); \
> @@ -385,12 +407,6 @@ err_label: \
> -EFAULT; \
> })
>
> -
> -unsigned long __must_check __asm_copy_to_user(void __user *to,
> - const void *from, unsigned long n);
> -unsigned long __must_check __asm_copy_from_user(void *to,
> - const void __user *from, unsigned long n);
> -
> static inline unsigned long
> raw_copy_from_user(void *to, const void __user *from, unsigned long n)
> {
On 31/05/2025 20:28, David Laight wrote:
> On Fri, 30 May 2025 22:56:58 +0200
> Clément Léger <cleger@rivosinc.com> wrote:
>
>> Doing misaligned access to userspace memory would make a trap on
>> platform where it is emulated. Latest fixes removed the kernel
>> capability to do unaligned accesses to userspace memory safely since
>> interrupts are kept disabled at all time during that. Thus doing so
>> would crash the kernel.
>>
>> Such behavior was detected with GET_UNALIGN_CTL() that was doing
>> a put_user() with an unsigned long* address that should have been an
>> unsigned int*. Reenabling kernel misaligned access emulation is a bit
>> risky and it would also degrade performances. Rather than doing that,
>> we will try to avoid any misaligned accessed by using copy_from/to_user()
>> which does not do any misaligned accesses. This can be done only for
>> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
>> a bit more code for this config.
>
> For get_user() you are much better off reading the two words that contain
> the value and then doing 'shift' and 'or' to get the correct value.
>
> Even for put_user() doing the explicit byte accesses will be faster than
> going though the generic copy_to/from_user() function.
Hi David,
Alexandre tried that approach as well but that added a bit more code and
it was more complex than just calling copy_from/to_user(). That can
still be done in another commit if we need more performance later. As a
side note, prior to that patch, these misaligned accesses were using
trap-and-emulate so that is still a performance improvement.
Thanks,
Clément
>
> David
>
>>
>> Signed-off-by: Clément Léger <cleger@rivosinc.com>
>> ---
>> arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
>> 1 file changed, 22 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
>> index 046de7ced09c..b542c05f394f 100644
>> --- a/arch/riscv/include/asm/uaccess.h
>> +++ b/arch/riscv/include/asm/uaccess.h
>> @@ -169,8 +169,21 @@ do { \
>>
>> #endif /* CONFIG_64BIT */
>>
>> +unsigned long __must_check __asm_copy_to_user(void __user *to,
>> + const void *from, unsigned long n);
>> +unsigned long __must_check __asm_copy_from_user(void *to,
>> + const void __user *from, unsigned long n);
>> +
>> #define __get_user_nocheck(x, __gu_ptr, label) \
>> do { \
>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
>> + if (__asm_copy_from_user(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
>> + goto label; \
>> + else \
>> + break; \
>> + } \
>> + } \
>> switch (sizeof(*__gu_ptr)) { \
>> case 1: \
>> __get_user_asm("lb", (x), __gu_ptr, label); \
>> @@ -297,6 +310,15 @@ do { \
>>
>> #define __put_user_nocheck(x, __gu_ptr, label) \
>> do { \
>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
>> + unsigned long val = (unsigned long)(x); \
>> + if (__asm_copy_to_user(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
>> + goto label; \
>> + else \
>> + break; \
>> + } \
>> + } \
>> switch (sizeof(*__gu_ptr)) { \
>> case 1: \
>> __put_user_asm("sb", (x), __gu_ptr, label); \
>> @@ -385,12 +407,6 @@ err_label: \
>> -EFAULT; \
>> })
>>
>> -
>> -unsigned long __must_check __asm_copy_to_user(void __user *to,
>> - const void *from, unsigned long n);
>> -unsigned long __must_check __asm_copy_from_user(void *to,
>> - const void __user *from, unsigned long n);
>> -
>> static inline unsigned long
>> raw_copy_from_user(void *to, const void __user *from, unsigned long n)
>> {
>
On Sat, 31 May 2025, David Laight wrote: > > Such behavior was detected with GET_UNALIGN_CTL() that was doing > > a put_user() with an unsigned long* address that should have been an > > unsigned int*. Reenabling kernel misaligned access emulation is a bit > > risky and it would also degrade performances. Rather than doing that, > > we will try to avoid any misaligned accessed by using copy_from/to_user() > > which does not do any misaligned accesses. This can be done only for > > !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate > > a bit more code for this config. > > For get_user() you are much better off reading the two words that contain > the value and then doing 'shift' and 'or' to get the correct value. > > Even for put_user() doing the explicit byte accesses will be faster than > going though the generic copy_to/from_user() function. FWIW I think optimising copy_to/from_user for such cases would be a more robust approach moving forward than sprinkling open-coded implementations across code. Maciej
On 01/06/2025 19:35, Maciej W. Rozycki wrote: > On Sat, 31 May 2025, David Laight wrote: > >>> Such behavior was detected with GET_UNALIGN_CTL() that was doing >>> a put_user() with an unsigned long* address that should have been an >>> unsigned int*. Reenabling kernel misaligned access emulation is a bit >>> risky and it would also degrade performances. Rather than doing that, >>> we will try to avoid any misaligned accessed by using copy_from/to_user() >>> which does not do any misaligned accesses. This can be done only for >>> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate >>> a bit more code for this config. >> >> For get_user() you are much better off reading the two words that contain >> the value and then doing 'shift' and 'or' to get the correct value. >> >> Even for put_user() doing the explicit byte accesses will be faster than >> going though the generic copy_to/from_user() function. > > FWIW I think optimising copy_to/from_user for such cases would be a more > robust approach moving forward than sprinkling open-coded implementations > across code. Hi Maciej, Indeed, that's a good idea, we could optimize small copy in copy_from/to_user so that will benefit all the users as well. Thanks, Clément > > Maciej
On 5/30/25 22:56, Clément Léger wrote:
> Doing misaligned access to userspace memory would make a trap on
> platform where it is emulated. Latest fixes removed the kernel
> capability to do unaligned accesses to userspace memory safely since
> interrupts are kept disabled at all time during that. Thus doing so
> would crash the kernel.
>
> Such behavior was detected with GET_UNALIGN_CTL() that was doing
> a put_user() with an unsigned long* address that should have been an
> unsigned int*. Reenabling kernel misaligned access emulation is a bit
> risky and it would also degrade performances. Rather than doing that,
> we will try to avoid any misaligned accessed by using copy_from/to_user()
> which does not do any misaligned accesses. This can be done only for
> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
> a bit more code for this config.
>
> Signed-off-by: Clément Léger <cleger@rivosinc.com>
> ---
> arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
> 1 file changed, 22 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
> index 046de7ced09c..b542c05f394f 100644
> --- a/arch/riscv/include/asm/uaccess.h
> +++ b/arch/riscv/include/asm/uaccess.h
> @@ -169,8 +169,21 @@ do { \
>
> #endif /* CONFIG_64BIT */
>
> +unsigned long __must_check __asm_copy_to_user(void __user *to,
> + const void *from, unsigned long n);
> +unsigned long __must_check __asm_copy_from_user(void *to,
> + const void __user *from, unsigned long n);
> +
> #define __get_user_nocheck(x, __gu_ptr, label) \
> do { \
> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
Nit: I would use && instead of 2 ifs.
> + if (__asm_copy_from_user(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
> + goto label; \
> + else \
> + break; \
Here I would remove the else
> + } \
> + } \
> switch (sizeof(*__gu_ptr)) { \
> case 1: \
> __get_user_asm("lb", (x), __gu_ptr, label); \
> @@ -297,6 +310,15 @@ do { \
>
> #define __put_user_nocheck(x, __gu_ptr, label) \
> do { \
> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { \
> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
> + unsigned long val = (unsigned long)(x); \
Here it sems like __inttype(*(__gu_ptr)) is more accurate than unsigned
long, even though I think unsigned long works fine too.
> + if (__asm_copy_to_user(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
> + goto label; \
> + else \
> + break; \
> + } \
> + } \
> switch (sizeof(*__gu_ptr)) { \
> case 1: \
> __put_user_asm("sb", (x), __gu_ptr, label); \
> @@ -385,12 +407,6 @@ err_label: \
> -EFAULT; \
> })
>
> -
> -unsigned long __must_check __asm_copy_to_user(void __user *to,
> - const void *from, unsigned long n);
> -unsigned long __must_check __asm_copy_from_user(void *to,
> - const void __user *from, unsigned long n);
> -
> static inline unsigned long
> raw_copy_from_user(void *to, const void __user *from, unsigned long n)
> {
On 31/05/2025 14:35, Alexandre Ghiti wrote:
> On 5/30/25 22:56, Clément Léger wrote:
>> Doing misaligned access to userspace memory would make a trap on
>> platform where it is emulated. Latest fixes removed the kernel
>> capability to do unaligned accesses to userspace memory safely since
>> interrupts are kept disabled at all time during that. Thus doing so
>> would crash the kernel.
>>
>> Such behavior was detected with GET_UNALIGN_CTL() that was doing
>> a put_user() with an unsigned long* address that should have been an
>> unsigned int*. Reenabling kernel misaligned access emulation is a bit
>> risky and it would also degrade performances. Rather than doing that,
>> we will try to avoid any misaligned accessed by using copy_from/to_user()
>> which does not do any misaligned accesses. This can be done only for
>> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
>> a bit more code for this config.
>>
>> Signed-off-by: Clément Léger <cleger@rivosinc.com>
>> ---
>> arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
>> 1 file changed, 22 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/
>> asm/uaccess.h
>> index 046de7ced09c..b542c05f394f 100644
>> --- a/arch/riscv/include/asm/uaccess.h
>> +++ b/arch/riscv/include/asm/uaccess.h
>> @@ -169,8 +169,21 @@ do { \
>> #endif /* CONFIG_64BIT */
>> +unsigned long __must_check __asm_copy_to_user(void __user *to,
>> + const void *from, unsigned long n);
>> +unsigned long __must_check __asm_copy_from_user(void *to,
>> + const void __user *from, unsigned long n);
>> +
>> #define __get_user_nocheck(x, __gu_ptr, label) \
>> do { \
>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
>> { \
>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr)))
>> { \
>
>
> Nit: I would use && instead of 2 ifs.
>
>
>> + if (__asm_copy_from_user(&(x), __gu_ptr,
>> sizeof(*__gu_ptr))) \
>> + goto label; \
>> + else \
>> + break; \
>
>
> Here I would remove the else
Hi Alex,
The "else" is needed to break from the outer do/while loop or it will go
though the next switch case (and it will crash due to misaligned accesses).
>
>
>> + } \
>> + } \
>> switch (sizeof(*__gu_ptr)) { \
>> case 1: \
>> __get_user_asm("lb", (x), __gu_ptr, label); \
>> @@ -297,6 +310,15 @@ do { \
>> #define __put_user_nocheck(x, __gu_ptr, label) \
>> do { \
>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
>> { \
>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr)))
>> { \
>> + unsigned long val = (unsigned long)(x); \
>
>
> Here it sems like __inttype(*(__gu_ptr)) is more accurate than unsigned
> long, even though I think unsigned long works fine too.
Wasn't aware of __inttype, but it sounds good.
Thanks,
Clément
>
>
>> + if (__asm_copy_to_user(__gu_ptr, &(val),
>> sizeof(*__gu_ptr))) \
>> + goto label; \
>> + else \
>> + break; \
>> + } \
>> + } \
>> switch (sizeof(*__gu_ptr)) { \
>> case 1: \
>> __put_user_asm("sb", (x), __gu_ptr, label); \
>> @@ -385,12 +407,6 @@ err_label: \
>> -EFAULT; \
>> })
>> -
>> -unsigned long __must_check __asm_copy_to_user(void __user *to,
>> - const void *from, unsigned long n);
>> -unsigned long __must_check __asm_copy_from_user(void *to,
>> - const void __user *from, unsigned long n);
>> -
>> static inline unsigned long
>> raw_copy_from_user(void *to, const void __user *from, unsigned long n)
>> {
Hi Clément,
On 6/2/25 09:37, Clément Léger wrote:
>
> On 31/05/2025 14:35, Alexandre Ghiti wrote:
>> On 5/30/25 22:56, Clément Léger wrote:
>>> Doing misaligned access to userspace memory would make a trap on
>>> platform where it is emulated. Latest fixes removed the kernel
>>> capability to do unaligned accesses to userspace memory safely since
>>> interrupts are kept disabled at all time during that. Thus doing so
>>> would crash the kernel.
>>>
>>> Such behavior was detected with GET_UNALIGN_CTL() that was doing
>>> a put_user() with an unsigned long* address that should have been an
>>> unsigned int*. Reenabling kernel misaligned access emulation is a bit
>>> risky and it would also degrade performances. Rather than doing that,
>>> we will try to avoid any misaligned accessed by using copy_from/to_user()
>>> which does not do any misaligned accesses. This can be done only for
>>> !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate
>>> a bit more code for this config.
>>>
>>> Signed-off-by: Clément Léger <cleger@rivosinc.com>
>>> ---
>>> arch/riscv/include/asm/uaccess.h | 28 ++++++++++++++++++++++------
>>> 1 file changed, 22 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/
>>> asm/uaccess.h
>>> index 046de7ced09c..b542c05f394f 100644
>>> --- a/arch/riscv/include/asm/uaccess.h
>>> +++ b/arch/riscv/include/asm/uaccess.h
>>> @@ -169,8 +169,21 @@ do { \
>>> #endif /* CONFIG_64BIT */
>>> +unsigned long __must_check __asm_copy_to_user(void __user *to,
>>> + const void *from, unsigned long n);
>>> +unsigned long __must_check __asm_copy_from_user(void *to,
>>> + const void __user *from, unsigned long n);
>>> +
>>> #define __get_user_nocheck(x, __gu_ptr, label) \
>>> do { \
>>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
>>> { \
>>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr)))
>>> { \
>>
>> Nit: I would use && instead of 2 ifs.
>>
>>
>>> + if (__asm_copy_from_user(&(x), __gu_ptr,
>>> sizeof(*__gu_ptr))) \
>>> + goto label; \
>>> + else \
>>> + break; \
>>
>> Here I would remove the else
> Hi Alex,
>
> The "else" is needed to break from the outer do/while loop or it will go
> though the next switch case (and it will crash due to misaligned accesses).
I meant only the "else", not the "break"!
Thanks,
Alex
>
>>
>>> + } \
>>> + } \
>>> switch (sizeof(*__gu_ptr)) { \
>>> case 1: \
>>> __get_user_asm("lb", (x), __gu_ptr, label); \
>>> @@ -297,6 +310,15 @@ do { \
>>> #define __put_user_nocheck(x, __gu_ptr, label) \
>>> do { \
>>> + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
>>> { \
>>> + if (!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr)))
>>> { \
>>> + unsigned long val = (unsigned long)(x); \
>>
>> Here it sems like __inttype(*(__gu_ptr)) is more accurate than unsigned
>> long, even though I think unsigned long works fine too.
> Wasn't aware of __inttype, but it sounds good.
>
> Thanks,
>
> Clément
>
>>
>>> + if (__asm_copy_to_user(__gu_ptr, &(val),
>>> sizeof(*__gu_ptr))) \
>>> + goto label; \
>>> + else \
>>> + break; \
>>> + } \
>>> + } \
>>> switch (sizeof(*__gu_ptr)) { \
>>> case 1: \
>>> __put_user_asm("sb", (x), __gu_ptr, label); \
>>> @@ -385,12 +407,6 @@ err_label: \
>>> -EFAULT; \
>>> })
>>> -
>>> -unsigned long __must_check __asm_copy_to_user(void __user *to,
>>> - const void *from, unsigned long n);
>>> -unsigned long __must_check __asm_copy_from_user(void *to,
>>> - const void __user *from, unsigned long n);
>>> -
>>> static inline unsigned long
>>> raw_copy_from_user(void *to, const void __user *from, unsigned long n)
>>> {
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
© 2016 - 2025 Red Hat, Inc.