Commit 34e4c79f authored by Heiko Carstens's avatar Heiko Carstens Committed by Vasily Gorbik

s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc()

Make use of the set_direct_map() calls for module allocations.
In particular:

- All changes to read-only permissions in kernel VA mappings are also
  applied to the direct mapping. Note that execute permissions are
  intentionally not applied to the direct mapping in order to make
  sure that all allocated pages within the direct mapping stay
  non-executable

- module_alloc() passes the VM_FLUSH_RESET_PERMS to __vmalloc_node_range()
  to make sure that all implicit permission changes made to the direct
  mapping are reset when the allocated vm area is freed again

Side effects: the direct mapping will be fragmented depending on how many
vm areas with VM_FLUSH_RESET_PERMS and/or explicit page permission changes
are allocated and freed again.

For example, just after boot of a system the direct mapping statistics look
like:

$cat /proc/meminfo
...
DirectMap4k:      111628 kB
DirectMap1M:    16665600 kB
DirectMap2G:           0 kB
Acked-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 7608f70a
...@@ -62,9 +62,10 @@ void *module_alloc(unsigned long size) ...@@ -62,9 +62,10 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN) if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL; return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN, p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(), MODULES_END, MODULES_VADDR + get_module_load_offset(),
gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, MODULES_END, gfp_mask, PAGE_KERNEL,
__builtin_return_address(0)); VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p); vfree(p);
return NULL; return NULL;
......
...@@ -323,9 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end, ...@@ -323,9 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
int rc = -EINVAL; int rc = -EINVAL;
pgd_t *pgdp; pgd_t *pgdp;
if (addr == end)
return 0;
mutex_lock(&cpa_mutex);
pgdp = pgd_offset_k(addr); pgdp = pgd_offset_k(addr);
do { do {
if (pgd_none(*pgdp)) if (pgd_none(*pgdp))
...@@ -336,18 +333,66 @@ static int change_page_attr(unsigned long addr, unsigned long end, ...@@ -336,18 +333,66 @@ static int change_page_attr(unsigned long addr, unsigned long end,
break; break;
cond_resched(); cond_resched();
} while (pgdp++, addr = next, addr < end && !rc); } while (pgdp++, addr = next, addr < end && !rc);
mutex_unlock(&cpa_mutex); return rc;
}
static int change_page_attr_alias(unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long alias, offset, va_start, va_end;
struct vm_struct *area;
int rc = 0;
/*
* Changes to read-only permissions on kernel VA mappings are also
* applied to the kernel direct mapping. Execute permissions are
* intentionally not transferred to keep all allocated pages within
* the direct mapping non-executable.
*/
flags &= SET_MEMORY_RO | SET_MEMORY_RW;
if (!flags)
return 0;
area = NULL;
while (addr < end) {
if (!area)
area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_ALLOC))
return 0;
va_start = (unsigned long)area->addr;
va_end = va_start + area->nr_pages * PAGE_SIZE;
offset = (addr - va_start) >> PAGE_SHIFT;
alias = (unsigned long)page_address(area->pages[offset]);
rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
if (rc)
break;
addr += PAGE_SIZE;
if (addr >= va_end)
area = NULL;
}
return rc; return rc;
} }
int __set_memory(unsigned long addr, int numpages, unsigned long flags) int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{ {
unsigned long end;
int rc;
if (!MACHINE_HAS_NX) if (!MACHINE_HAS_NX)
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X); flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags) if (!flags)
return 0; return 0;
if (!numpages)
return 0;
addr &= PAGE_MASK; addr &= PAGE_MASK;
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags); end = addr + numpages * PAGE_SIZE;
mutex_lock(&cpa_mutex);
rc = change_page_attr(addr, end, flags);
if (rc)
goto out;
rc = change_page_attr_alias(addr, end, flags);
out:
mutex_unlock(&cpa_mutex);
return rc;
} }
int set_direct_map_invalid_noflush(struct page *page) int set_direct_map_invalid_noflush(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment