@@ -112,8 +112,12 @@ EXPORT_SYMBOL(__get_user_8)
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
- ASM_STAC
+#ifdef CONFIG_X86_64
+ check_range size=1
+#else
ASM_BARRIER_NOSPEC
+#endif
+ ASM_STAC
UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
@@ -122,8 +126,12 @@ SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
- ASM_STAC
+#ifdef CONFIG_X86_64
+ check_range size=2
+#else
ASM_BARRIER_NOSPEC
+#endif
+ ASM_STAC
UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
@@ -132,8 +140,12 @@ SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
- ASM_STAC
+#ifdef CONFIG_X86_64
+ check_range size=4
+#else
ASM_BARRIER_NOSPEC
+#endif
+ ASM_STAC
UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
@@ -142,8 +154,12 @@ SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
- ASM_STAC
+#ifdef CONFIG_X86_64
+ check_range size=8
+#else
ASM_BARRIER_NOSPEC
+#endif
+ ASM_STAC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
The barrier_nospec() in 64-bit __get_user() is slow. Instead use pointer masking to force the user pointer to all 1's if a previous access_ok() mispredicted true for an invalid address. Note that for safety on some AMD CPUs, this relies on recent commit 86e6b1547b3d ("x86: fix user address masking non-canonical speculation issue"). Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> --- arch/x86/lib/getuser.S | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-)