Commit fff875a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:
 "memblock debug enhancements.

  Improve tracking of early memory allocations when memblock debug is
  enabled:

   - Add memblock_dbg() to memblock_phys_alloc_range() to get details
     about its usage

   - Make memblock allocator wrappers actually inline to track their
     callers in memblock debug messages"

* tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
  mm: memblock: drop __init from memblock functions to make it inline
  mm: memblock: add more debug logs
parents 870d1675 5bdba520
...@@ -404,13 +404,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, ...@@ -404,13 +404,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, phys_addr_t min_addr, phys_addr_t max_addr,
int nid); int nid);
static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{ {
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_raw(phys_addr_t size, static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align) phys_addr_t align)
{ {
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
...@@ -418,7 +418,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size, ...@@ -418,7 +418,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE); NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_from(phys_addr_t size, static inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align, phys_addr_t align,
phys_addr_t min_addr) phys_addr_t min_addr)
{ {
...@@ -426,33 +426,33 @@ static inline void * __init memblock_alloc_from(phys_addr_t size, ...@@ -426,33 +426,33 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_low(phys_addr_t size, static inline void *memblock_alloc_low(phys_addr_t size,
phys_addr_t align) phys_addr_t align)
{ {
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_node(phys_addr_t size, static inline void *memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid) phys_addr_t align, int nid)
{ {
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
} }
static inline void __init memblock_free_early(phys_addr_t base, static inline void memblock_free_early(phys_addr_t base,
phys_addr_t size) phys_addr_t size)
{ {
memblock_free(base, size); memblock_free(base, size);
} }
static inline void __init memblock_free_early_nid(phys_addr_t base, static inline void memblock_free_early_nid(phys_addr_t base,
phys_addr_t size, int nid) phys_addr_t size, int nid)
{ {
memblock_free(base, size); memblock_free(base, size);
} }
static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
{ {
__memblock_free_late(base, size); __memblock_free_late(base, size);
} }
...@@ -460,7 +460,7 @@ static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) ...@@ -460,7 +460,7 @@ static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
/* /*
* Set the allocation direction to bottom-up or top-down. * Set the allocation direction to bottom-up or top-down.
*/ */
static inline void __init memblock_set_bottom_up(bool enable) static inline void memblock_set_bottom_up(bool enable)
{ {
memblock.bottom_up = enable; memblock.bottom_up = enable;
} }
......
...@@ -1419,6 +1419,9 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, ...@@ -1419,6 +1419,9 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
phys_addr_t start, phys_addr_t start,
phys_addr_t end) phys_addr_t end)
{ {
memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
__func__, (u64)size, (u64)align, &start, &end,
(void *)_RET_IP_);
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
false); false);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment