Commit e74deb11 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/uaccess: Introduce user_access_{save,restore}()

Introduce common helpers for when we need to safely suspend a
uaccess section; for instance to generate a {KA,UB}SAN report.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 88e47182
...@@ -58,6 +58,23 @@ static __always_inline void stac(void) ...@@ -58,6 +58,23 @@ static __always_inline void stac(void)
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
} }
static __always_inline unsigned long smap_save(void)
{
unsigned long flags;
asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
return flags;
}
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */ /* These macros can be used in asm() statements */
#define ASM_CLAC \ #define ASM_CLAC \
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
...@@ -69,6 +86,9 @@ static __always_inline void stac(void) ...@@ -69,6 +86,9 @@ static __always_inline void stac(void)
static inline void clac(void) { } static inline void clac(void) { }
static inline void stac(void) { } static inline void stac(void) { }
static inline unsigned long smap_save(void) { return 0; }
static inline void smap_restore(unsigned long flags) { }
#define ASM_CLAC #define ASM_CLAC
#define ASM_STAC #define ASM_STAC
......
...@@ -715,6 +715,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt ...@@ -715,6 +715,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
#define user_access_begin(a,b) user_access_begin(a,b) #define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end() #define user_access_end() __uaccess_end()
#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)
#define unsafe_put_user(x, ptr, label) \ #define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
......
...@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); ...@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define user_access_end() do { } while (0) #define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif #endif
#ifdef CONFIG_HARDENED_USERCOPY #ifdef CONFIG_HARDENED_USERCOPY
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment