Message ID | 20201015150159.28933-2-cmr@codefail.de (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Improve signal performance on PPC64 with KUAP | expand |
Context | Check | Description |
---|---|---|
snowpatch_ozlabs/apply_patch | success | Successfully applied on branch powerpc/merge (118be7377c97e35c33819bcb3bbbae5a42a4ac43) |
snowpatch_ozlabs/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 63 lines checked |
snowpatch_ozlabs/needsstable | success | Patch has no Fixes tags |
On Thu, Oct 15, 2020 at 10:01:52AM -0500, Christopher M. Riedl wrote: > Implement raw_copy_from_user_allowed() which assumes that userspace read > access is open. Use this new function to implement raw_copy_from_user(). > Finally, wrap the new function to follow the usual "unsafe_" convention > of taking a label argument. The new raw_copy_from_user_allowed() calls > __copy_tofrom_user() internally, but this is still safe to call in user > access blocks formed with user_*_access_begin()/user_*_access_end() > since asm functions are not instrumented for tracing. Please also add a fallback unsafe_copy_from_user to linux/uaccess.h so this can be used as a generic API.
Le 15/10/2020 à 17:01, Christopher M. Riedl a écrit : > Implement raw_copy_from_user_allowed() which assumes that userspace read > access is open. Use this new function to implement raw_copy_from_user(). > Finally, wrap the new function to follow the usual "unsafe_" convention > of taking a label argument. The new raw_copy_from_user_allowed() calls > __copy_tofrom_user() internally, but this is still safe to call in user > access blocks formed with user_*_access_begin()/user_*_access_end() > since asm functions are not instrumented for tracing. Would objtool accept that if it was implemented on powerpc ? __copy_tofrom_user() is a function which is optimised for larger memory copies (using dcbz, etc ...) Do we need such an optimisation for unsafe_copy_from_user() ? Or can we do a simple loop as done for unsafe_copy_to_user() instead ? Christophe > > Signed-off-by: Christopher M. Riedl <cmr@codefail.de> > --- > arch/powerpc/include/asm/uaccess.h | 28 +++++++++++++++++++--------- > 1 file changed, 19 insertions(+), 9 deletions(-) > > diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h > index 26781b044932..66940b4eb692 100644 > --- a/arch/powerpc/include/asm/uaccess.h > +++ b/arch/powerpc/include/asm/uaccess.h > @@ -418,38 +418,45 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) > } > #endif /* __powerpc64__ */ > > -static inline unsigned long raw_copy_from_user(void *to, > - const void __user *from, unsigned long n) > +static inline unsigned long > +raw_copy_from_user_allowed(void *to, const void __user *from, unsigned long n) > { > - unsigned long ret; > if (__builtin_constant_p(n) && (n <= 8)) { > - ret = 1; > + unsigned long ret = 1; > > switch (n) { > case 1: > barrier_nospec(); > - __get_user_size(*(u8 *)to, from, 1, ret); > + __get_user_size_allowed(*(u8 *)to, from, 1, ret); > break; > case 2: > barrier_nospec(); > - __get_user_size(*(u16 *)to, from, 2, ret); > + __get_user_size_allowed(*(u16 *)to, from, 2, ret); > break; > case 4: > barrier_nospec(); > - __get_user_size(*(u32 *)to, from, 4, ret); > + __get_user_size_allowed(*(u32 *)to, from, 4, ret); > break; > case 8: > barrier_nospec(); > - __get_user_size(*(u64 *)to, from, 8, ret); > + __get_user_size_allowed(*(u64 *)to, from, 8, ret); > break; > } > if (ret == 0) > return 0; > } > > + return __copy_tofrom_user((__force void __user *)to, from, n); > +} > + > +static inline unsigned long > +raw_copy_from_user(void *to, const void __user *from, unsigned long n) > +{ > + unsigned long ret; > + > barrier_nospec(); > allow_read_from_user(from, n); > - ret = __copy_tofrom_user((__force void __user *)to, from, n); > + ret = raw_copy_from_user_allowed(to, from, n); > prevent_read_from_user(from, n); > return ret; > } > @@ -571,6 +578,9 @@ user_write_access_begin(const void __user *ptr, size_t len) > #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) > #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) > > +#define unsafe_copy_from_user(d, s, l, e) \ > + unsafe_op_wrap(raw_copy_from_user_allowed(d, s, l), e) > + > #define unsafe_copy_to_user(d, s, l, e) \ > do { \ > u8 __user *_dst = (u8 __user *)(d); \ >
Le 16/10/2020 à 08:54, Christoph Hellwig a écrit : > On Thu, Oct 15, 2020 at 10:01:52AM -0500, Christopher M. Riedl wrote: >> Implement raw_copy_from_user_allowed() which assumes that userspace read >> access is open. Use this new function to implement raw_copy_from_user(). >> Finally, wrap the new function to follow the usual "unsafe_" convention >> of taking a label argument. The new raw_copy_from_user_allowed() calls >> __copy_tofrom_user() internally, but this is still safe to call in user >> access blocks formed with user_*_access_begin()/user_*_access_end() >> since asm functions are not instrumented for tracing. > > Please also add a fallback unsafe_copy_from_user to linux/uaccess.h > so this can be used as a generic API. > I guess this can be done in a separate patch independant of that series ? Christophe
On Fri Oct 16, 2020 at 10:17 AM CDT, Christophe Leroy wrote: > > > Le 15/10/2020 à 17:01, Christopher M. Riedl a écrit : > > Implement raw_copy_from_user_allowed() which assumes that userspace read > > access is open. Use this new function to implement raw_copy_from_user(). > > Finally, wrap the new function to follow the usual "unsafe_" convention > > of taking a label argument. The new raw_copy_from_user_allowed() calls > > __copy_tofrom_user() internally, but this is still safe to call in user > > access blocks formed with user_*_access_begin()/user_*_access_end() > > since asm functions are not instrumented for tracing. > > Would objtool accept that if it was implemented on powerpc ? > > __copy_tofrom_user() is a function which is optimised for larger memory > copies (using dcbz, etc ...) > Do we need such an optimisation for unsafe_copy_from_user() ? Or can we > do a simple loop as done for > unsafe_copy_to_user() instead ? I tried using a simple loop based on your unsafe_copy_to_user() implementation. Similar to the copy_{vsx,fpr}_from_user() results there is a hit to signal handling performance. The results with the loop are in the 'unsafe-signal64-copy' column: | | hash | radix | | -------------------- | ------ | ------ | | linuxppc/next | 289014 | 158408 | | unsafe-signal64 | 298506 | 253053 | | unsafe-signal64-copy | 197029 | 177002 | Similar to the copy_{vsx,fpr}_from_user() patch I don't fully understand why this performs so badly yet. Implementation: unsafe_copy_from_user(d, s, l, e) \ do { \ u8 *_dst = (u8 *)(d); \ const u8 __user *_src = (u8 __user*)(s); \ size_t _len = (l); \ int _i; \ \ for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ unsafe_get_user(*(long*)(_dst + _i), (long __user *)(_src + _i), e); \ if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ unsafe_get_user(*(u32*)(_dst + _i), (u32 __user *)(_src + _i), e); \ _i += 4; \ } \ if (_len & 2) { \ unsafe_get_user(*(u16*)(_dst + _i), (u16 __user *)(_src + _i), e); \ _i += 2; \ } \ if (_len & 1) \ unsafe_get_user(*(u8*)(_dst + _i), (u8 __user *)(_src + _i), e); \ } while (0) > > Christophe > > > > > Signed-off-by: Christopher M. Riedl <cmr@codefail.de> > > --- > > arch/powerpc/include/asm/uaccess.h | 28 +++++++++++++++++++--------- > > 1 file changed, 19 insertions(+), 9 deletions(-) > > > > diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h > > index 26781b044932..66940b4eb692 100644 > > --- a/arch/powerpc/include/asm/uaccess.h > > +++ b/arch/powerpc/include/asm/uaccess.h > > @@ -418,38 +418,45 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) > > } > > #endif /* __powerpc64__ */ > > > > -static inline unsigned long raw_copy_from_user(void *to, > > - const void __user *from, unsigned long n) > > +static inline unsigned long > > +raw_copy_from_user_allowed(void *to, const void __user *from, unsigned long n) > > { > > - unsigned long ret; > > if (__builtin_constant_p(n) && (n <= 8)) { > > - ret = 1; > > + unsigned long ret = 1; > > > > switch (n) { > > case 1: > > barrier_nospec(); > > - __get_user_size(*(u8 *)to, from, 1, ret); > > + __get_user_size_allowed(*(u8 *)to, from, 1, ret); > > break; > > case 2: > > barrier_nospec(); > > - __get_user_size(*(u16 *)to, from, 2, ret); > > + __get_user_size_allowed(*(u16 *)to, from, 2, ret); > > break; > > case 4: > > barrier_nospec(); > > - __get_user_size(*(u32 *)to, from, 4, ret); > > + __get_user_size_allowed(*(u32 *)to, from, 4, ret); > > break; > > case 8: > > barrier_nospec(); > > - __get_user_size(*(u64 *)to, from, 8, ret); > > + __get_user_size_allowed(*(u64 *)to, from, 8, ret); > > break; > > } > > if (ret == 0) > > return 0; > > } > > > > + return __copy_tofrom_user((__force void __user *)to, from, n); > > +} > > + > > +static inline unsigned long > > +raw_copy_from_user(void *to, const void __user *from, unsigned long n) > > +{ > > + unsigned long ret; > > + > > barrier_nospec(); > > allow_read_from_user(from, n); > > - ret = __copy_tofrom_user((__force void __user *)to, from, n); > > + ret = raw_copy_from_user_allowed(to, from, n); > > prevent_read_from_user(from, n); > > return ret; > > } > > @@ -571,6 +578,9 @@ user_write_access_begin(const void __user *ptr, size_t len) > > #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) > > #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) > > > > +#define unsafe_copy_from_user(d, s, l, e) \ > > + unsafe_op_wrap(raw_copy_from_user_allowed(d, s, l), e) > > + > > #define unsafe_copy_to_user(d, s, l, e) \ > > do { \ > > u8 __user *_dst = (u8 __user *)(d); \ > >
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 26781b044932..66940b4eb692 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -418,38 +418,45 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) } #endif /* __powerpc64__ */ -static inline unsigned long raw_copy_from_user(void *to, - const void __user *from, unsigned long n) +static inline unsigned long +raw_copy_from_user_allowed(void *to, const void __user *from, unsigned long n) { - unsigned long ret; if (__builtin_constant_p(n) && (n <= 8)) { - ret = 1; + unsigned long ret = 1; switch (n) { case 1: barrier_nospec(); - __get_user_size(*(u8 *)to, from, 1, ret); + __get_user_size_allowed(*(u8 *)to, from, 1, ret); break; case 2: barrier_nospec(); - __get_user_size(*(u16 *)to, from, 2, ret); + __get_user_size_allowed(*(u16 *)to, from, 2, ret); break; case 4: barrier_nospec(); - __get_user_size(*(u32 *)to, from, 4, ret); + __get_user_size_allowed(*(u32 *)to, from, 4, ret); break; case 8: barrier_nospec(); - __get_user_size(*(u64 *)to, from, 8, ret); + __get_user_size_allowed(*(u64 *)to, from, 8, ret); break; } if (ret == 0) return 0; } + return __copy_tofrom_user((__force void __user *)to, from, n); +} + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long ret; + barrier_nospec(); allow_read_from_user(from, n); - ret = __copy_tofrom_user((__force void __user *)to, from, n); + ret = raw_copy_from_user_allowed(to, from, n); prevent_read_from_user(from, n); return ret; } @@ -571,6 +578,9 @@ user_write_access_begin(const void __user *ptr, size_t len) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) +#define unsafe_copy_from_user(d, s, l, e) \ + unsafe_op_wrap(raw_copy_from_user_allowed(d, s, l), e) + #define unsafe_copy_to_user(d, s, l, e) \ do { \ u8 __user *_dst = (u8 __user *)(d); \
Implement raw_copy_from_user_allowed() which assumes that userspace read access is open. Use this new function to implement raw_copy_from_user(). Finally, wrap the new function to follow the usual "unsafe_" convention of taking a label argument. The new raw_copy_from_user_allowed() calls __copy_tofrom_user() internally, but this is still safe to call in user access blocks formed with user_*_access_begin()/user_*_access_end() since asm functions are not instrumented for tracing. Signed-off-by: Christopher M. Riedl <cmr@codefail.de> --- arch/powerpc/include/asm/uaccess.h | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-)