Commit 12c0ab66 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "12 patches.

  Subsystems affected by this patch series: mm (memcg, zsmalloc, swap,
  mailmap, selftests, pagecache, hugetlb, pagemap), lib, and coredump"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/mmap.c: fix mmap return value when vma is merged after call_mmap()
  hugetlb_cgroup: fix offline of hugetlb cgroup with reservations
  mm/filemap: add static for function __add_to_page_cache_locked
  userfaultfd: selftests: fix SIGSEGV if huge mmap fails
  tools/testing/selftests/vm: fix build error
  mailmap: add two more addresses of Uwe Kleine-König
  mm/swapfile: do not sleep with a spin lock held
  mm/zsmalloc.c: drop ZSMALLOC_PGTABLE_MAPPING
  mm: list_lru: set shrinker map bit when child nr_items is not zero
  mm: memcg/slab: fix obj_cgroup_charge() return value handling
  coredump: fix core_pattern parse error
  zlib: export S390 symbols for zlib modules
parents 7059c2c0 309d08d9
...@@ -322,6 +322,8 @@ TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn> ...@@ -322,6 +322,8 @@ TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com> Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws> Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de> Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
......
...@@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y ...@@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=y CONFIG_BINFMT_MISC=y
CONFIG_CMA=y CONFIG_CMA=y
CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -229,7 +229,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, ...@@ -229,7 +229,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
*/ */
if (ispipe) { if (ispipe) {
if (isspace(*pat_ptr)) { if (isspace(*pat_ptr)) {
was_space = true; if (cn->used != 0)
was_space = true;
pat_ptr++; pat_ptr++;
continue; continue;
} else if (was_space) { } else if (was_space) {
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
* zsmalloc mapping modes * zsmalloc mapping modes
* *
* NOTE: These only make a difference when a mapped object spans pages. * NOTE: These only make a difference when a mapped object spans pages.
* They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
*/ */
enum zs_mapmode { enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */ ZS_MM_RW, /* normal read-write mapping */
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "dfltcc_util.h" #include "dfltcc_util.h"
#include "dfltcc.h" #include "dfltcc.h"
#include <asm/setup.h> #include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h> #include <linux/zutil.h>
/* /*
...@@ -29,6 +30,7 @@ int dfltcc_can_inflate( ...@@ -29,6 +30,7 @@ int dfltcc_can_inflate(
return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) && return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0); is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
} }
EXPORT_SYMBOL(dfltcc_can_inflate);
static int dfltcc_was_inflate_used( static int dfltcc_was_inflate_used(
z_streamp strm z_streamp strm
...@@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate( ...@@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate(
return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ? return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE; DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
} }
EXPORT_SYMBOL(dfltcc_inflate);
...@@ -707,19 +707,6 @@ config ZSMALLOC ...@@ -707,19 +707,6 @@ config ZSMALLOC
returned by an alloc(). This handle must be mapped in order to returned by an alloc(). This handle must be mapped in order to
access the allocated space. access the allocated space.
config ZSMALLOC_PGTABLE_MAPPING
bool "Use page table mapping to access object in zsmalloc"
depends on ZSMALLOC=y
help
By default, zsmalloc uses a copy-based object mapping method to
access allocations that span two pages. However, if a particular
architecture (ex, ARM) performs VM mapping faster than copying,
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark:
https://github.com/spartacus06/zsmapbench
config ZSMALLOC_STAT config ZSMALLOC_STAT
bool "Export zsmalloc statistics" bool "Export zsmalloc statistics"
depends on ZSMALLOC depends on ZSMALLOC
......
...@@ -827,7 +827,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -827,7 +827,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
} }
EXPORT_SYMBOL_GPL(replace_page_cache_page); EXPORT_SYMBOL_GPL(replace_page_cache_page);
noinline int __add_to_page_cache_locked(struct page *page, static noinline int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t offset, gfp_t gfp, pgoff_t offset, gfp_t gfp,
void **shadowp) void **shadowp)
......
...@@ -82,11 +82,8 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) ...@@ -82,11 +82,8 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
for (idx = 0; idx < hugetlb_max_hstate; idx++) { for (idx = 0; idx < hugetlb_max_hstate; idx++) {
if (page_counter_read( if (page_counter_read(
hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) || hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
h_cg, idx))) {
return true; return true;
}
} }
return false; return false;
} }
...@@ -202,9 +199,10 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) ...@@ -202,9 +199,10 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h; struct hstate *h;
struct page *page; struct page *page;
int idx = 0; int idx;
do { do {
idx = 0;
for_each_hstate(h) { for_each_hstate(h) {
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru) list_for_each_entry(page, &h->hugepage_activelist, lru)
......
...@@ -534,7 +534,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, ...@@ -534,7 +534,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_node *nlru = &lru->node[nid];
int dst_idx = dst_memcg->kmemcg_id; int dst_idx = dst_memcg->kmemcg_id;
struct list_lru_one *src, *dst; struct list_lru_one *src, *dst;
bool set;
/* /*
* Since list_lru_{add,del} may be called under an IRQ-safe lock, * Since list_lru_{add,del} may be called under an IRQ-safe lock,
...@@ -546,11 +545,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, ...@@ -546,11 +545,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
dst = list_lru_from_memcg_idx(nlru, dst_idx); dst = list_lru_from_memcg_idx(nlru, dst_idx);
list_splice_init(&src->list, &dst->list); list_splice_init(&src->list, &dst->list);
set = (!dst->nr_items && src->nr_items);
dst->nr_items += src->nr_items; if (src->nr_items) {
if (set) dst->nr_items += src->nr_items;
memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
src->nr_items = 0; src->nr_items = 0;
}
spin_unlock_irq(&nlru->lock); spin_unlock_irq(&nlru->lock);
} }
......
...@@ -1808,6 +1808,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1808,6 +1808,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (error) if (error)
goto unmap_and_free_vma; goto unmap_and_free_vma;
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
/* If vm_flags changed after call_mmap(), we should try merge vma again /* If vm_flags changed after call_mmap(), we should try merge vma again
* as we may succeed this time. * as we may succeed this time.
*/ */
...@@ -1822,25 +1833,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1822,25 +1833,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
fput(vma->vm_file); fput(vma->vm_file);
vm_area_free(vma); vm_area_free(vma);
vma = merge; vma = merge;
/* Update vm_flags and possible addr to pick up the change. We don't /* Update vm_flags to pick up the change. */
* warn here if addr changed as the vma is not linked by vma_link().
*/
addr = vma->vm_start;
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
goto unmap_writable; goto unmap_writable;
} }
} }
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) { } else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma); error = shmem_zero_setup(vma);
......
...@@ -274,22 +274,32 @@ static inline size_t obj_full_size(struct kmem_cache *s) ...@@ -274,22 +274,32 @@ static inline size_t obj_full_size(struct kmem_cache *s)
return s->size + sizeof(struct obj_cgroup *); return s->size + sizeof(struct obj_cgroup *);
} }
static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, /*
size_t objects, * Returns false if the allocation should fail.
gfp_t flags) */
static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
struct obj_cgroup **objcgp,
size_t objects, gfp_t flags)
{ {
struct obj_cgroup *objcg; struct obj_cgroup *objcg;
if (!memcg_kmem_enabled())
return true;
if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
return true;
objcg = get_obj_cgroup_from_current(); objcg = get_obj_cgroup_from_current();
if (!objcg) if (!objcg)
return NULL; return true;
if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
obj_cgroup_put(objcg); obj_cgroup_put(objcg);
return NULL; return false;
} }
return objcg; *objcgp = objcg;
return true;
} }
static inline void mod_objcg_state(struct obj_cgroup *objcg, static inline void mod_objcg_state(struct obj_cgroup *objcg,
...@@ -315,7 +325,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, ...@@ -315,7 +325,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
unsigned long off; unsigned long off;
size_t i; size_t i;
if (!objcg) if (!memcg_kmem_enabled() || !objcg)
return; return;
flags &= ~__GFP_ACCOUNT; flags &= ~__GFP_ACCOUNT;
...@@ -400,11 +410,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) ...@@ -400,11 +410,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
{ {
} }
static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
size_t objects, struct obj_cgroup **objcgp,
gfp_t flags) size_t objects, gfp_t flags)
{ {
return NULL; return true;
} }
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
...@@ -508,9 +518,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, ...@@ -508,9 +518,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
if (should_failslab(s, flags)) if (should_failslab(s, flags))
return NULL; return NULL;
if (memcg_kmem_enabled() && if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) return NULL;
*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
return s; return s;
} }
...@@ -529,8 +538,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, ...@@ -529,8 +538,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
s->flags, flags); s->flags, flags);
} }
if (memcg_kmem_enabled()) memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
} }
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
......
...@@ -2867,6 +2867,7 @@ late_initcall(max_swapfiles_check); ...@@ -2867,6 +2867,7 @@ late_initcall(max_swapfiles_check);
static struct swap_info_struct *alloc_swap_info(void) static struct swap_info_struct *alloc_swap_info(void)
{ {
struct swap_info_struct *p; struct swap_info_struct *p;
struct swap_info_struct *defer = NULL;
unsigned int type; unsigned int type;
int i; int i;
...@@ -2895,7 +2896,7 @@ static struct swap_info_struct *alloc_swap_info(void) ...@@ -2895,7 +2896,7 @@ static struct swap_info_struct *alloc_swap_info(void)
smp_wmb(); smp_wmb();
WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
} else { } else {
kvfree(p); defer = p;
p = swap_info[type]; p = swap_info[type];
/* /*
* Do not memset this entry: a racing procfs swap_next() * Do not memset this entry: a racing procfs swap_next()
...@@ -2908,6 +2909,7 @@ static struct swap_info_struct *alloc_swap_info(void) ...@@ -2908,6 +2909,7 @@ static struct swap_info_struct *alloc_swap_info(void)
plist_node_init(&p->avail_lists[i], 0); plist_node_init(&p->avail_lists[i], 0);
p->flags = SWP_USED; p->flags = SWP_USED;
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
kvfree(defer);
spin_lock_init(&p->lock); spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock); spin_lock_init(&p->cont_lock);
......
...@@ -293,11 +293,7 @@ struct zspage { ...@@ -293,11 +293,7 @@ struct zspage {
}; };
struct mapping_area { struct mapping_area {
#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
struct vm_struct *vm; /* vm area for mapping object that span pages */
#else
char *vm_buf; /* copy buffer for objects that span pages */ char *vm_buf; /* copy buffer for objects that span pages */
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */ char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */ enum zs_mapmode vm_mm; /* mapping mode */
}; };
...@@ -1113,54 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class) ...@@ -1113,54 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
return zspage; return zspage;
} }
#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
static inline int __zs_cpu_up(struct mapping_area *area)
{
/*
* Make sure we don't leak memory if a cpu UP notification
* and zs_init() race and both call zs_cpu_up() on the same cpu
*/
if (area->vm)
return 0;
area->vm = get_vm_area(PAGE_SIZE * 2, 0);
if (!area->vm)
return -ENOMEM;
/*
* Populate ptes in advance to avoid pte allocation with GFP_KERNEL
* in non-preemtible context of zs_map_object.
*/
return apply_to_page_range(&init_mm, (unsigned long)area->vm->addr,
PAGE_SIZE * 2, NULL, NULL);
}
static inline void __zs_cpu_down(struct mapping_area *area)
{
if (area->vm)
free_vm_area(area->vm);
area->vm = NULL;
}
static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
unsigned long addr = (unsigned long)area->vm->addr;
BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
area->vm_addr = area->vm->addr;
return area->vm_addr + off;
}
static inline void __zs_unmap_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
unsigned long addr = (unsigned long)area->vm_addr;
unmap_kernel_range(addr, PAGE_SIZE * 2);
}
#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
static inline int __zs_cpu_up(struct mapping_area *area) static inline int __zs_cpu_up(struct mapping_area *area)
{ {
/* /*
...@@ -1241,8 +1189,6 @@ static void __zs_unmap_object(struct mapping_area *area, ...@@ -1241,8 +1189,6 @@ static void __zs_unmap_object(struct mapping_area *area,
pagefault_enable(); pagefault_enable();
} }
#endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
static int zs_cpu_prepare(unsigned int cpu) static int zs_cpu_prepare(unsigned int cpu)
{ {
struct mapping_area *area; struct mapping_area *area;
......
...@@ -60,9 +60,13 @@ ifeq ($(CAN_BUILD_X86_64),1) ...@@ -60,9 +60,13 @@ ifeq ($(CAN_BUILD_X86_64),1)
TEST_GEN_FILES += $(BINARIES_64) TEST_GEN_FILES += $(BINARIES_64)
endif endif
else else
ifneq (,$(findstring $(ARCH),powerpc))
TEST_GEN_FILES += protection_keys TEST_GEN_FILES += protection_keys
endif endif
endif
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64)) ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range TEST_GEN_FILES += virtual_address_range
......
...@@ -206,19 +206,19 @@ static int hugetlb_release_pages(char *rel_area) ...@@ -206,19 +206,19 @@ static int hugetlb_release_pages(char *rel_area)
return ret; return ret;
} }
static void hugetlb_allocate_area(void **alloc_area) static void hugetlb_allocate_area(void **alloc_area)
{ {
void *area_alias = NULL; void *area_alias = NULL;
char **alloc_area_alias; char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
(map_shared ? MAP_SHARED : MAP_PRIVATE) | (map_shared ? MAP_SHARED : MAP_PRIVATE) |
MAP_HUGETLB, MAP_HUGETLB,
huge_fd, *alloc_area == area_src ? 0 : huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size); nr_pages * page_size);
if (*alloc_area == MAP_FAILED) { if (*alloc_area == MAP_FAILED) {
fprintf(stderr, "mmap of hugetlbfs file failed\n"); perror("mmap of hugetlbfs file failed");
*alloc_area = NULL; goto fail;
} }
if (map_shared) { if (map_shared) {
...@@ -227,14 +227,11 @@ static void hugetlb_allocate_area(void **alloc_area) ...@@ -227,14 +227,11 @@ static void hugetlb_allocate_area(void **alloc_area)
huge_fd, *alloc_area == area_src ? 0 : huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size); nr_pages * page_size);
if (area_alias == MAP_FAILED) { if (area_alias == MAP_FAILED) {
if (munmap(*alloc_area, nr_pages * page_size) < 0) { perror("mmap of hugetlb file alias failed");
perror("hugetlb munmap"); goto fail_munmap;
exit(1);
}
*alloc_area = NULL;
return;
} }
} }
if (*alloc_area == area_src) { if (*alloc_area == area_src) {
huge_fd_off0 = *alloc_area; huge_fd_off0 = *alloc_area;
alloc_area_alias = &area_src_alias; alloc_area_alias = &area_src_alias;
...@@ -243,6 +240,16 @@ static void hugetlb_allocate_area(void **alloc_area) ...@@ -243,6 +240,16 @@ static void hugetlb_allocate_area(void **alloc_area)
} }
if (area_alias) if (area_alias)
*alloc_area_alias = area_alias; *alloc_area_alias = area_alias;
return;
fail_munmap:
if (munmap(*alloc_area, nr_pages * page_size) < 0) {
perror("hugetlb munmap");
exit(1);
}
fail:
*alloc_area = NULL;
} }
static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset) static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment