Commit bc2391e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "Subsystems affected by this patch series: mm/hugetlb, samples, mm/cma,
  mm/vmalloc, mm/pagealloc"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/page_alloc: fix documentation error
  vmalloc: fix the owner argument for the new __vmalloc_node_range callers
  mm/cma.c: use exact_nid true to fix possible per-numa cma leak
  samples/vfs: avoid warning in statx override
  mm/hugetlb.c: fix pages per hugetlb calculation
parents 0c7d7d1f 8beeae86
...@@ -122,7 +122,7 @@ void *alloc_insn_page(void) ...@@ -122,7 +122,7 @@ void *alloc_insn_page(void)
{ {
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __func__); NUMA_NO_NODE, __builtin_return_address(0));
} }
/* arm kprobe: install breakpoint in text */ /* arm kprobe: install breakpoint in text */
......
...@@ -377,7 +377,8 @@ void __init hyperv_init(void) ...@@ -377,7 +377,8 @@ void __init hyperv_init(void)
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX, VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__); VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
if (hv_hypercall_pg == NULL) { if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
goto remove_cpuhp_state; goto remove_cpuhp_state;
......
...@@ -2785,7 +2785,7 @@ void * __weak module_alloc(unsigned long size) ...@@ -2785,7 +2785,7 @@ void * __weak module_alloc(unsigned long size)
{ {
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __func__); NUMA_NO_NODE, __builtin_return_address(0));
} }
bool __weak module_init_section(const char *name) bool __weak module_init_section(const char *name)
......
...@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, ...@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
*/ */
if (base < highmem_start && limit > highmem_start) { if (base < highmem_start && limit > highmem_start) {
addr = memblock_alloc_range_nid(size, alignment, addr = memblock_alloc_range_nid(size, alignment,
highmem_start, limit, nid, false); highmem_start, limit, nid, true);
limit = highmem_start; limit = highmem_start;
} }
if (!addr) { if (!addr) {
addr = memblock_alloc_range_nid(size, alignment, base, addr = memblock_alloc_range_nid(size, alignment, base,
limit, nid, false); limit, nid, true);
if (!addr) { if (!addr) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
......
...@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage) ...@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
/* Use first found vma */ /* Use first found vma */
pgoff_start = page_to_pgoff(hpage); pgoff_start = page_to_pgoff(hpage);
pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1; pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) { pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
......
...@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void) ...@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void)
* Initialise min_free_kbytes. * Initialise min_free_kbytes.
* *
* For small machines we want it small (128k min). For large machines * For small machines we want it small (128k min). For large machines
* we want it large (64MB max). But it is not linear, because network * we want it large (256MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use * bandwidth does not increase linearly with machine size. We use
* *
* min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/fcntl.h> #include <linux/fcntl.h>
#define statx foo #define statx foo
#define statx_timestamp foo_timestamp #define statx_timestamp foo_timestamp
struct statx;
struct statx_timestamp;
#include <sys/stat.h> #include <sys/stat.h>
#undef statx #undef statx
#undef statx_timestamp #undef statx_timestamp
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment