Commit 30306f61 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hardening-v5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull hardening fixes from Kees Cook:

 - Correctly handle vm_map areas in hardened usercopy (Matthew Wilcox)

 - Adjust CFI RCU usage to avoid boot splats with cpuidle (Sami Tolvanen)

* tag 'hardening-v5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  usercopy: Make usercopy resilient against ridiculously large copies
  usercopy: Cast pointer to an integer once
  usercopy: Handle vm_map_ram() areas
  cfi: Fix __cfi_slowpath_diag RCU usage with cpuidle
parents afe9eb14 1dfbe9fc
...@@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, ...@@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
void free_vm_area(struct vm_struct *area); void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr);
struct vmap_area *find_vmap_area(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr) static inline bool is_vm_area_hugepages(const void *addr)
{ {
......
...@@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr) ...@@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
static inline cfi_check_fn find_check_fn(unsigned long ptr) static inline cfi_check_fn find_check_fn(unsigned long ptr)
{ {
cfi_check_fn fn = NULL; cfi_check_fn fn = NULL;
unsigned long flags;
bool rcu_idle;
if (is_kernel_text(ptr)) if (is_kernel_text(ptr))
return __cfi_check; return __cfi_check;
...@@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr) ...@@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr)
* the shadow and __module_address use RCU, so we need to wake it * the shadow and __module_address use RCU, so we need to wake it
* up if necessary. * up if necessary.
*/ */
RCU_NONIDLE({ rcu_idle = !rcu_is_watching();
if (rcu_idle) {
local_irq_save(flags);
rcu_irq_enter();
}
if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
fn = find_shadow_check_fn(ptr); fn = find_shadow_check_fn(ptr);
if (!fn) if (!fn)
fn = find_module_check_fn(ptr); fn = find_module_check_fn(ptr);
});
if (rcu_idle) {
rcu_irq_exit();
local_irq_restore(flags);
}
return fn; return fn;
} }
......
...@@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, ...@@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
static inline void check_heap_object(const void *ptr, unsigned long n, static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user) bool to_user)
{ {
uintptr_t addr = (uintptr_t)ptr;
unsigned long offset;
struct folio *folio; struct folio *folio;
if (is_kmap_addr(ptr)) { if (is_kmap_addr(ptr)) {
unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); offset = offset_in_page(ptr);
if (n > PAGE_SIZE - offset)
if ((unsigned long)ptr + n - 1 > page_end) usercopy_abort("kmap", NULL, to_user, offset, n);
usercopy_abort("kmap", NULL, to_user,
offset_in_page(ptr), n);
return; return;
} }
if (is_vmalloc_addr(ptr)) { if (is_vmalloc_addr(ptr)) {
struct vm_struct *area = find_vm_area(ptr); struct vmap_area *area = find_vmap_area(addr);
unsigned long offset;
if (!area) { if (!area)
usercopy_abort("vmalloc", "no area", to_user, 0, n); usercopy_abort("vmalloc", "no area", to_user, 0, n);
return;
}
offset = ptr - area->addr; if (n > area->va_end - addr) {
if (offset + n > get_vm_area_size(area)) offset = addr - area->va_start;
usercopy_abort("vmalloc", NULL, to_user, offset, n); usercopy_abort("vmalloc", NULL, to_user, offset, n);
}
return; return;
} }
...@@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n, ...@@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
/* Check slab allocator for flags and size. */ /* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user); __check_heap_object(ptr, n, folio_slab(folio), to_user);
} else if (folio_test_large(folio)) { } else if (folio_test_large(folio)) {
unsigned long offset = ptr - folio_address(folio); offset = ptr - folio_address(folio);
if (offset + n > folio_size(folio)) if (n > folio_size(folio) - offset)
usercopy_abort("page alloc", NULL, to_user, offset, n); usercopy_abort("page alloc", NULL, to_user, offset, n);
} }
} }
......
...@@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) ...@@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
free_vmap_area_noflush(va); free_vmap_area_noflush(va);
} }
static struct vmap_area *find_vmap_area(unsigned long addr) struct vmap_area *find_vmap_area(unsigned long addr)
{ {
struct vmap_area *va; struct vmap_area *va;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment