Commit 6531e115 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "11 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  scripts/spdxcheck.py: always open files in binary mode
  checkstack.pl: fix for aarch64
  userfaultfd: check VM_MAYWRITE was set after verifying the uffd is registered
  fs/iomap.c: get/put the page in iomap_page_create/release()
  hugetlbfs: call VM_BUG_ON_PAGE earlier in free_huge_page()
  memblock: annotate memblock_is_reserved() with __init_memblock
  psi: fix reference to kernel commandline enable
  arch/sh/include/asm/io.h: provide prototypes for PCI I/O mapping in asm/io.h
  mm/sparse: add common helper to mark all memblocks present
  mm: introduce common STRUCT_PAGE_MAX_SHIFT define
  alpha: fix hang caused by the bootmem removal
parents 380ef2c9 3a6ab5c7
......@@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
/* Find our memory. */
setup_memory(kernel_end);
memblock_set_bottom_up(true);
/* First guess at cpu cache sizes. Do this before init_arch. */
determine_cpu_caches(cpu->type);
......
......@@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
panic("kernel loaded out of ram");
memblock_add(PFN_PHYS(node_min_pfn),
(node_max_pfn - node_min_pfn) << PAGE_SHIFT);
/* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
Note that we round this down, not up - node memory
has much larger alignment than 8Mb, so it's safe. */
node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
memblock_add(PFN_PHYS(node_min_pfn),
(node_max_pfn - node_min_pfn) << PAGE_SHIFT);
NODE_DATA(nid)->node_start_pfn = node_min_pfn;
NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
......
......@@ -34,15 +34,6 @@
*/
#define PCI_IO_SIZE SZ_16M
/*
* Log2 of the upper bound of the size of a struct page. Used for sizing
* the vmemmap region only, does not affect actual memory footprint.
* We don't use sizeof(struct page) directly since taking its size here
* requires its definition to be available at this point in the inclusion
* chain, and it may not be a power of 2 in the first place.
*/
#define STRUCT_PAGE_MAX_SHIFT 6
/*
* VMEMMAP_SIZE - allows the whole linear region to be covered by
* a struct page array
......
......@@ -610,14 +610,6 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
* correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
......
......@@ -24,6 +24,7 @@
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
#include <asm-generic/pci_iomap.h>
#include <mach/mangle-port.h>
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
......
......@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
/*
* migrate_page_move_mapping() assumes that pages with private data have
* their count elevated by 1.
*/
get_page(page);
set_page_private(page, (unsigned long)iop);
SetPagePrivate(page);
return iop;
......@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
WARN_ON_ONCE(atomic_read(&iop->write_count));
ClearPagePrivate(page);
set_page_private(page, 0);
put_page(page);
kfree(iop);
}
......
......@@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
cond_resched();
BUG_ON(!vma_can_userfault(vma));
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
/*
* Nothing to do: this vma is already registered into this
......@@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!vma->vm_userfaultfd_ctx.ctx)
goto skip;
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
if (vma->vm_start > start)
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
......
......@@ -16,6 +16,7 @@
#define __ASM_GENERIC_FIXMAP_H
#include <linux/bug.h>
#include <linux/mm_types.h>
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
......
......@@ -206,6 +206,11 @@ struct page {
#endif
} _struct_page_alignment;
/*
* Used for sizing the vmemmap region on some architectures
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
......
......@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif
#if defined(CONFIG_SPARSEMEM)
void memblocks_present(void);
#else
static inline void memblocks_present(void) {}
#endif
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
......
......@@ -515,8 +515,8 @@ config PSI_DEFAULT_DISABLED
depends on PSI
help
If set, pressure stall information tracking will be disabled
per default but can be enabled through passing psi_enable=1
on the kernel commandline during boot.
per default but can be enabled through passing psi=1 on the
kernel commandline during boot.
endmenu # "CPU/Task time and stats accounting"
......
......@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
(struct hugepage_subpool *)page_private(page);
bool restore_reserve;
set_page_private(page, 0);
page->mapping = NULL;
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(page_mapcount(page), page);
set_page_private(page, 0);
page->mapping = NULL;
restore_reserve = PagePrivate(page);
ClearPagePrivate(page);
......
......@@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr
return -1;
}
bool __init memblock_is_reserved(phys_addr_t addr)
bool __init_memblock memblock_is_reserved(phys_addr_t addr)
{
return memblock_search(&memblock.reserved, addr) != -1;
}
......
......@@ -239,6 +239,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
}
}
/*
* Mark all memblocks as present using memory_present(). This is a
* convienence function that is useful for a number of arches
* to mark all of the systems memory as present during initialization.
*/
void __init memblocks_present(void)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
memory_present(memblock_get_region_node(reg),
memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
}
/*
* Subtle, we encode the real pfn into the mem_map such that
* the identity pfn - section_mem_map will return the actual
......
......@@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
if ($arch eq 'aarch64') {
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
$re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
$re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
......
......@@ -168,6 +168,7 @@ class id_parser(object):
self.curline = 0
try:
for line in fd:
line = line.decode(locale.getpreferredencoding(False), errors='ignore')
self.curline += 1
if self.curline > maxlines:
break
......@@ -249,12 +250,13 @@ if __name__ == '__main__':
try:
if len(args.path) and args.path[0] == '-':
parser.parse_lines(sys.stdin, args.maxlines, '-')
stdin = os.fdopen(sys.stdin.fileno(), 'rb')
parser.parse_lines(stdin, args.maxlines, '-')
else:
if args.path:
for p in args.path:
if os.path.isfile(p):
parser.parse_lines(open(p), args.maxlines, p)
parser.parse_lines(open(p, 'rb'), args.maxlines, p)
elif os.path.isdir(p):
scan_git_subtree(repo.head.reference.commit.tree, p)
else:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment