Commit a7259df7 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

memblock: make memblock_find_in_range method private

There are a lot of uses of memblock_find_in_range() along with
memblock_reserve() from the times memblock allocation APIs did not exist.

memblock_find_in_range() is the very core of memblock allocations, so any
future changes to its internal behaviour would mandate updates of all the
users outside memblock.

Replace the calls to memblock_find_in_range() with an equivalent calls to
memblock_phys_alloc() and memblock_phys_alloc_range() and make
memblock_find_in_range() private method of memblock.

This simplifies the callers, ensures that (unlikely) errors in
memblock_reserve() are handled and improves maintainability of
memblock_find_in_range().

Link: https://lkml.kernel.org/r/20210816122622.30279-1-rppt@kernel.orgSigned-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>		[arm64]
Acked-by: default avatarKirill A. Shutemov <kirill.shtuemov@linux.intel.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>	[ACPI]
Acked-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Acked-by: Nick Kossifidis <mick@ics.forth.gr>			[riscv]
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Acked-by: default avatarRob Herring <robh@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38b031dd
...@@ -1012,31 +1012,25 @@ static void __init reserve_crashkernel(void) ...@@ -1012,31 +1012,25 @@ static void __init reserve_crashkernel(void)
unsigned long long lowmem_max = __pa(high_memory - 1) + 1; unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
if (crash_max > lowmem_max) if (crash_max > lowmem_max)
crash_max = lowmem_max; crash_max = lowmem_max;
crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
crash_size, CRASH_ALIGN); crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
CRASH_ALIGN, crash_max);
if (!crash_base) { if (!crash_base) {
pr_err("crashkernel reservation failed - No suitable area found.\n"); pr_err("crashkernel reservation failed - No suitable area found.\n");
return; return;
} }
} else { } else {
unsigned long long crash_max = crash_base + crash_size;
unsigned long long start; unsigned long long start;
start = memblock_find_in_range(crash_base, start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
crash_base + crash_size, crash_base, crash_max);
crash_size, SECTION_SIZE); if (!start) {
if (start != crash_base) {
pr_err("crashkernel reservation failed - memory is in use.\n"); pr_err("crashkernel reservation failed - memory is in use.\n");
return; return;
} }
} }
ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
(unsigned long)crash_base);
return;
}
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20), (unsigned long)(crash_size >> 20),
(unsigned long)(crash_base >> 20), (unsigned long)(crash_base >> 20),
......
...@@ -92,12 +92,10 @@ void __init kvm_hyp_reserve(void) ...@@ -92,12 +92,10 @@ void __init kvm_hyp_reserve(void)
* this is unmapped from the host stage-2, and fallback to PAGE_SIZE. * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
*/ */
hyp_mem_size = hyp_mem_pages << PAGE_SHIFT; hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
ALIGN(hyp_mem_size, PMD_SIZE), PMD_SIZE);
PMD_SIZE);
if (!hyp_mem_base) if (!hyp_mem_base)
hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
hyp_mem_size, PAGE_SIZE);
else else
hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE); hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
...@@ -105,7 +103,6 @@ void __init kvm_hyp_reserve(void) ...@@ -105,7 +103,6 @@ void __init kvm_hyp_reserve(void)
kvm_err("Failed to reserve hyp memory\n"); kvm_err("Failed to reserve hyp memory\n");
return; return;
} }
memblock_reserve(hyp_mem_base, hyp_mem_size);
kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20, kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
hyp_mem_base); hyp_mem_base);
......
...@@ -74,6 +74,7 @@ phys_addr_t arm64_dma_phys_limit __ro_after_init; ...@@ -74,6 +74,7 @@ phys_addr_t arm64_dma_phys_limit __ro_after_init;
static void __init reserve_crashkernel(void) static void __init reserve_crashkernel(void)
{ {
unsigned long long crash_base, crash_size; unsigned long long crash_base, crash_size;
unsigned long long crash_max = arm64_dma_phys_limit;
int ret; int ret;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
...@@ -84,33 +85,18 @@ static void __init reserve_crashkernel(void) ...@@ -84,33 +85,18 @@ static void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(crash_size); crash_size = PAGE_ALIGN(crash_size);
if (crash_base == 0) { /* User specifies base address explicitly. */
/* Current arm64 boot protocol requires 2MB alignment */ if (crash_base)
crash_base = memblock_find_in_range(0, arm64_dma_phys_limit, crash_max = crash_base + crash_size;
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
crash_size);
return;
}
} else {
/* User specifies base address explicitly. */
if (!memblock_is_region_memory(crash_base, crash_size)) {
pr_warn("cannot reserve crashkernel: region is not memory\n");
return;
}
if (memblock_is_region_reserved(crash_base, crash_size)) { /* Current arm64 boot protocol requires 2MB alignment */
pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
return; crash_base, crash_max);
} if (!crash_base) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
if (!IS_ALIGNED(crash_base, SZ_2M)) { crash_size);
pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); return;
return;
}
} }
memblock_reserve(crash_base, crash_size);
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20); crash_base, crash_base + crash_size, crash_size >> 20);
......
...@@ -452,8 +452,9 @@ static void __init mips_parse_crashkernel(void) ...@@ -452,8 +452,9 @@ static void __init mips_parse_crashkernel(void)
return; return;
if (crash_base <= 0) { if (crash_base <= 0) {
crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX, crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_size, CRASH_ALIGN); CRASH_ALIGN,
CRASH_ADDR_MAX);
if (!crash_base) { if (!crash_base) {
pr_warn("crashkernel reservation failed - No suitable area found.\n"); pr_warn("crashkernel reservation failed - No suitable area found.\n");
return; return;
...@@ -461,8 +462,9 @@ static void __init mips_parse_crashkernel(void) ...@@ -461,8 +462,9 @@ static void __init mips_parse_crashkernel(void)
} else { } else {
unsigned long long start; unsigned long long start;
start = memblock_find_in_range(crash_base, crash_base + crash_size, start = memblock_phys_alloc_range(crash_size, 1,
crash_size, 1); crash_base,
crash_base + crash_size);
if (start != crash_base) { if (start != crash_base) {
pr_warn("Invalid memory region reserved for crash kernel\n"); pr_warn("Invalid memory region reserved for crash kernel\n");
return; return;
...@@ -656,10 +658,6 @@ static void __init arch_mem_init(char **cmdline_p) ...@@ -656,10 +658,6 @@ static void __init arch_mem_init(char **cmdline_p)
mips_reserve_vmcore(); mips_reserve_vmcore();
mips_parse_crashkernel(); mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
#endif
device_tree_init(); device_tree_init();
/* /*
......
...@@ -819,38 +819,22 @@ static void __init reserve_crashkernel(void) ...@@ -819,38 +819,22 @@ static void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(crash_size); crash_size = PAGE_ALIGN(crash_size);
if (crash_base == 0) { if (crash_base) {
/* search_start = crash_base;
* Current riscv boot protocol requires 2MB alignment for search_end = crash_base + crash_size;
* RV64 and 4MB alignment for RV32 (hugepage size) }
*/
crash_base = memblock_find_in_range(search_start, search_end,
crash_size, PMD_SIZE);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
return;
}
} else {
/* User specifies base address explicitly. */
if (!memblock_is_region_memory(crash_base, crash_size)) {
pr_warn("crashkernel: requested region is not memory\n");
return;
}
if (memblock_is_region_reserved(crash_base, crash_size)) {
pr_warn("crashkernel: requested region is reserved\n");
return;
}
if (!IS_ALIGNED(crash_base, PMD_SIZE)) { /*
pr_warn("crashkernel: requested region is misaligned\n"); * Current riscv boot protocol requires 2MB alignment for
return; * RV64 and 4MB alignment for RV32 (hugepage size)
} */
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start, search_end);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
return;
} }
memblock_reserve(crash_base, crash_size);
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20); crash_base, crash_base + crash_size, crash_size >> 20);
......
...@@ -626,8 +626,9 @@ static void __init reserve_crashkernel(void) ...@@ -626,8 +626,9 @@ static void __init reserve_crashkernel(void)
return; return;
} }
low = crash_base ?: low; low = crash_base ?: low;
crash_base = memblock_find_in_range(low, high, crash_size, crash_base = memblock_phys_alloc_range(crash_size,
KEXEC_CRASH_MEM_ALIGN); KEXEC_CRASH_MEM_ALIGN,
low, high);
} }
if (!crash_base) { if (!crash_base) {
...@@ -636,8 +637,10 @@ static void __init reserve_crashkernel(void) ...@@ -636,8 +637,10 @@ static void __init reserve_crashkernel(void)
return; return;
} }
if (register_memory_notifier(&kdump_mem_nb)) if (register_memory_notifier(&kdump_mem_nb)) {
memblock_free(crash_base, crash_size);
return; return;
}
if (!OLDMEM_BASE && MACHINE_IS_VM) if (!OLDMEM_BASE && MACHINE_IS_VM)
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
......
...@@ -109,14 +109,13 @@ static u32 __init allocate_aperture(void) ...@@ -109,14 +109,13 @@ static u32 __init allocate_aperture(void)
* memory. Unfortunately we cannot move it up because that would * memory. Unfortunately we cannot move it up because that would
* make the IOMMU useless. * make the IOMMU useless.
*/ */
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, addr = memblock_phys_alloc_range(aper_size, aper_size,
aper_size, aper_size); GART_MIN_ADDR, GART_MAX_ADDR);
if (!addr) { if (!addr) {
pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n", pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n",
addr, addr + aper_size - 1, aper_size >> 10); addr, addr + aper_size - 1, aper_size >> 10);
return 0; return 0;
} }
memblock_reserve(addr, aper_size);
pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n", pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n",
addr, addr + aper_size - 1, aper_size >> 10); addr, addr + aper_size - 1, aper_size >> 10);
register_nosave_region(addr >> PAGE_SHIFT, register_nosave_region(addr >> PAGE_SHIFT,
......
...@@ -127,14 +127,12 @@ __ref void *alloc_low_pages(unsigned int num) ...@@ -127,14 +127,12 @@ __ref void *alloc_low_pages(unsigned int num)
unsigned long ret = 0; unsigned long ret = 0;
if (min_pfn_mapped < max_pfn_mapped) { if (min_pfn_mapped < max_pfn_mapped) {
ret = memblock_find_in_range( ret = memblock_phys_alloc_range(
PAGE_SIZE * num, PAGE_SIZE,
min_pfn_mapped << PAGE_SHIFT, min_pfn_mapped << PAGE_SHIFT,
max_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT);
PAGE_SIZE * num , PAGE_SIZE);
} }
if (ret) if (!ret && can_use_brk_pgt)
memblock_reserve(ret, PAGE_SIZE * num);
else if (can_use_brk_pgt)
ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE)); ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
if (!ret) if (!ret)
...@@ -610,8 +608,17 @@ static void __init memory_map_top_down(unsigned long map_start, ...@@ -610,8 +608,17 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long addr; unsigned long addr;
unsigned long mapped_ram_size = 0; unsigned long mapped_ram_size = 0;
/* xen has big range in reserved near end of ram, skip it at first.*/ /*
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); * Systems that have many reserved areas near top of the memory,
* e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
* require lots of 4K mappings which may exhaust pgt_buf.
* Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
* there is enough mapped memory that can be allocated from
* memblock.
*/
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
map_end);
memblock_free(addr, PMD_SIZE);
real_end = addr + PMD_SIZE; real_end = addr + PMD_SIZE;
/* step_size need to be small so pgt_buf from BRK could cover it */ /* step_size need to be small so pgt_buf from BRK could cover it */
......
...@@ -376,15 +376,14 @@ static int __init numa_alloc_distance(void) ...@@ -376,15 +376,14 @@ static int __init numa_alloc_distance(void)
cnt++; cnt++;
size = cnt * cnt * sizeof(numa_distance[0]); size = cnt * cnt * sizeof(numa_distance[0]);
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
size, PAGE_SIZE); PFN_PHYS(max_pfn_mapped));
if (!phys) { if (!phys) {
pr_warn("Warning: can't allocate distance table!\n"); pr_warn("Warning: can't allocate distance table!\n");
/* don't retry until explicitly reset */ /* don't retry until explicitly reset */
numa_distance = (void *)1LU; numa_distance = (void *)1LU;
return -ENOMEM; return -ENOMEM;
} }
memblock_reserve(phys, size);
numa_distance = __va(phys); numa_distance = __va(phys);
numa_distance_cnt = cnt; numa_distance_cnt = cnt;
......
...@@ -447,13 +447,12 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) ...@@ -447,13 +447,12 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
if (numa_dist_cnt) { if (numa_dist_cnt) {
u64 phys; u64 phys;
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), phys = memblock_phys_alloc_range(phys_size, PAGE_SIZE, 0,
phys_size, PAGE_SIZE); PFN_PHYS(max_pfn_mapped));
if (!phys) { if (!phys) {
pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu; goto no_emu;
} }
memblock_reserve(phys, phys_size);
phys_dist = __va(phys); phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++) for (i = 0; i < numa_dist_cnt; i++)
......
...@@ -28,7 +28,7 @@ void __init reserve_real_mode(void) ...@@ -28,7 +28,7 @@ void __init reserve_real_mode(void)
WARN_ON(slab_is_available()); WARN_ON(slab_is_available());
/* Has to be under 1M so we can execute real-mode AP code. */ /* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
if (!mem) if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n"); pr_info("No sub-1M memory is available for the trampoline\n");
else else
......
...@@ -583,8 +583,8 @@ void __init acpi_table_upgrade(void) ...@@ -583,8 +583,8 @@ void __init acpi_table_upgrade(void)
} }
acpi_tables_addr = acpi_tables_addr =
memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS, memblock_phys_alloc_range(all_tables_size, PAGE_SIZE,
all_tables_size, PAGE_SIZE); 0, ACPI_TABLE_UPGRADE_MAX_PHYS);
if (!acpi_tables_addr) { if (!acpi_tables_addr) {
WARN_ON(1); WARN_ON(1);
return; return;
...@@ -599,7 +599,6 @@ void __init acpi_table_upgrade(void) ...@@ -599,7 +599,6 @@ void __init acpi_table_upgrade(void)
* Both memblock_reserve and e820__range_add (via arch_reserve_mem_area) * Both memblock_reserve and e820__range_add (via arch_reserve_mem_area)
* works fine. * works fine.
*/ */
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size); arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
/* /*
......
...@@ -279,13 +279,10 @@ static int __init numa_alloc_distance(void) ...@@ -279,13 +279,10 @@ static int __init numa_alloc_distance(void)
int i, j; int i, j;
size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn), phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0, PFN_PHYS(max_pfn));
size, PAGE_SIZE);
if (WARN_ON(!phys)) if (WARN_ON(!phys))
return -ENOMEM; return -ENOMEM;
memblock_reserve(phys, size);
numa_distance = __va(phys); numa_distance = __va(phys);
numa_distance_cnt = nr_node_ids; numa_distance_cnt = nr_node_ids;
......
...@@ -33,18 +33,22 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, ...@@ -33,18 +33,22 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t *res_base) phys_addr_t *res_base)
{ {
phys_addr_t base; phys_addr_t base;
int err = 0;
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
align = !align ? SMP_CACHE_BYTES : align; align = !align ? SMP_CACHE_BYTES : align;
base = memblock_find_in_range(start, end, size, align); base = memblock_phys_alloc_range(size, align, start, end);
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
*res_base = base; *res_base = base;
if (nomap) if (nomap) {
return memblock_mark_nomap(base, size); err = memblock_mark_nomap(base, size);
if (err)
memblock_free(base, size);
}
return memblock_reserve(base, size); return err;
} }
/* /*
......
...@@ -99,8 +99,6 @@ void memblock_discard(void); ...@@ -99,8 +99,6 @@ void memblock_discard(void);
static inline void memblock_discard(void) {} static inline void memblock_discard(void) {}
#endif #endif
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void); void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_add(phys_addr_t base, phys_addr_t size);
......
...@@ -315,7 +315,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, ...@@ -315,7 +315,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
* Return: * Return:
* Found address on success, 0 on failure. * Found address on success, 0 on failure.
*/ */
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
phys_addr_t end, phys_addr_t size, phys_addr_t end, phys_addr_t size,
phys_addr_t align) phys_addr_t align)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment