Commit 8712a005 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge misc fixes from Andrew Morton:
 "Nine fixes"

* emailed patches from Andrew Morton akpm@linux-foundation.org>:
  cris: convert ffs from an object-like macro to a function-like macro
  hfsplus: add HFSX subfolder count support
  tools/testing/selftests/ipc/msgque.c: handle msgget failure return correctly
  MAINTAINERS: blackfin: add git repository
  revert "kallsyms: fix absolute addresses for kASLR"
  mm/Kconfig: fix URL for zsmalloc benchmark
  fs/proc/base.c: fix GPF in /proc/$PID/map_files
  mm/compaction: break out of loop on !PageBuddy in isolate_freepages_block
  mm: fix GFP_THISNODE callers and clarify
parents e6a4b6f5 0eb808eb
...@@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h ...@@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h
BLACKFIN ARCHITECTURE BLACKFIN ARCHITECTURE
M: Steven Miao <realmz6@gmail.com> M: Steven Miao <realmz6@gmail.com>
L: adi-buildroot-devel@lists.sourceforge.net L: adi-buildroot-devel@lists.sourceforge.net
T: git git://git.code.sf.net/p/adi-linux/code
W: http://blackfin.uclinux.org W: http://blackfin.uclinux.org
S: Supported S: Supported
F: arch/blackfin/ F: arch/blackfin/
......
...@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) ...@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* definition, which doesn't have the same semantics. We don't want to * definition, which doesn't have the same semantics. We don't want to
* use -fno-builtin, so just hide the name ffs. * use -fno-builtin, so just hide the name ffs.
*/ */
#define ffs kernel_ffs #define ffs(x) kernel_ffs(x)
#include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/__fls.h>
......
...@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) ...@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */ /* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_exact_node(nid, page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT); IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) { if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex); mutex_unlock(&uc_pool->add_chunk_mutex);
......
...@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) ...@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid; area->nid = nid;
area->order = order; area->order = order;
area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, area->pages = alloc_pages_exact_node(area->nid,
GFP_KERNEL|__GFP_THISNODE,
area->order); area->order);
if (!area->pages) { if (!area->pages) {
......
...@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, ...@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
nid = cpu_to_node(cpu); nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid, page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order); pg_order);
if (page == NULL) { if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
......
...@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, ...@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
folder = &entry->folder; folder = &entry->folder;
memset(folder, 0, sizeof(*folder)); memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER); folder->type = cpu_to_be16(HFSPLUS_FOLDER);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
folder->id = cpu_to_be32(inode->i_ino); folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date = HFSPLUS_I(inode)->create_date =
folder->create_date = folder->create_date =
...@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, ...@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
return hfs_brec_find(fd, hfs_find_rec_by_key); return hfs_brec_find(fd, hfs_find_rec_by_key);
} }
static void hfsplus_subfolders_inc(struct inode *dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
/*
* Increment subfolder count. Note, the value is only meaningful
* for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
*/
HFSPLUS_I(dir)->subfolders++;
}
}
static void hfsplus_subfolders_dec(struct inode *dir)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
/*
* Decrement subfolder count. Note, the value is only meaningful
* for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
*
* Check for zero. Some subfolders may have been created
* by an implementation ignorant of this counter.
*/
if (HFSPLUS_I(dir)->subfolders)
HFSPLUS_I(dir)->subfolders--;
}
}
int hfsplus_create_cat(u32 cnid, struct inode *dir, int hfsplus_create_cat(u32 cnid, struct inode *dir,
struct qstr *str, struct inode *inode) struct qstr *str, struct inode *inode)
{ {
...@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, ...@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
goto err1; goto err1;
dir->i_size++; dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
...@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) ...@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
goto out; goto out;
dir->i_size--; dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
...@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid, ...@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid,
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength); src_fd.entrylength);
type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */ /* create new dir entry with the data from the old entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
...@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid, ...@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid,
if (err) if (err)
goto out; goto out;
dst_dir->i_size++; dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */ /* finally remove the old entry */
...@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid, ...@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid,
if (err) if (err)
goto out; goto out;
src_dir->i_size--; src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */ /* remove old thread entry */
......
...@@ -242,6 +242,7 @@ struct hfsplus_inode_info { ...@@ -242,6 +242,7 @@ struct hfsplus_inode_info {
*/ */
sector_t fs_blocks; sector_t fs_blocks;
u8 userflags; /* BSD user file flags */ u8 userflags; /* BSD user file flags */
u32 subfolders; /* Subfolder count (HFSX only) */
struct list_head open_dir_list; struct list_head open_dir_list;
loff_t phys_size; loff_t phys_size;
......
...@@ -261,7 +261,7 @@ struct hfsplus_cat_folder { ...@@ -261,7 +261,7 @@ struct hfsplus_cat_folder {
struct DInfo user_info; struct DInfo user_info;
struct DXInfo finder_info; struct DXInfo finder_info;
__be32 text_encoding; __be32 text_encoding;
u32 reserved; __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
} __packed; } __packed;
/* HFS file info (stolen from hfs.h) */ /* HFS file info (stolen from hfs.h) */
...@@ -301,11 +301,13 @@ struct hfsplus_cat_file { ...@@ -301,11 +301,13 @@ struct hfsplus_cat_file {
struct hfsplus_fork_raw rsrc_fork; struct hfsplus_fork_raw rsrc_fork;
} __packed; } __packed;
/* File attribute bits */ /* File and folder flag bits */
#define HFSPLUS_FILE_LOCKED 0x0001 #define HFSPLUS_FILE_LOCKED 0x0001
#define HFSPLUS_FILE_THREAD_EXISTS 0x0002 #define HFSPLUS_FILE_THREAD_EXISTS 0x0002
#define HFSPLUS_XATTR_EXISTS 0x0004 #define HFSPLUS_XATTR_EXISTS 0x0004
#define HFSPLUS_ACL_EXISTS 0x0008 #define HFSPLUS_ACL_EXISTS 0x0008
#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
* (HFSX only) */
/* HFS+ catalog thread (part of a cat_entry) */ /* HFS+ catalog thread (part of a cat_entry) */
struct hfsplus_cat_thread { struct hfsplus_cat_thread {
......
...@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) ...@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
hip->extent_state = 0; hip->extent_state = 0;
hip->flags = 0; hip->flags = 0;
hip->userflags = 0; hip->userflags = 0;
hip->subfolders = 0;
memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->alloc_blocks = 0; hip->alloc_blocks = 0;
...@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) ...@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
HFSPLUS_I(inode)->create_date = folder->create_date; HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0; HFSPLUS_I(inode)->fs_blocks = 0;
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
HFSPLUS_I(inode)->subfolders =
be32_to_cpu(folder->subfolders);
}
inode->i_op = &hfsplus_dir_inode_operations; inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations; inode->i_fop = &hfsplus_dir_operations;
} else if (type == HFSPLUS_FILE) { } else if (type == HFSPLUS_FILE) {
...@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode) ...@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode)
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
folder->valence = cpu_to_be32(inode->i_size - 2); folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
folder->subfolders =
cpu_to_be32(HFSPLUS_I(inode)->subfolders);
}
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder)); sizeof(struct hfsplus_cat_folder));
} else if (HFSPLUS_IS_RSRC(inode)) { } else if (HFSPLUS_IS_RSRC(inode)) {
......
...@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) ...@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
if (rc) if (rc)
goto out_mmput; goto out_mmput;
rc = -ENOENT;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end); vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) { if (vma && vma->vm_file) {
......
...@@ -123,6 +123,10 @@ struct vm_area_struct; ...@@ -123,6 +123,10 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD) __GFP_NO_KSWAPD)
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else #else
......
...@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) ...@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone)
/* /*
* The NUMA zonelists are doubled because we need zonelists that restrict the * The NUMA zonelists are doubled because we need zonelists that restrict the
* allocations to a single node for GFP_THISNODE. * allocations to a single node for __GFP_THISNODE.
* *
* [0] : Zonelist with fallback * [0] : Zonelist with fallback
* [1] : No fallback (GFP_THISNODE) * [1] : No fallback (__GFP_THISNODE)
*/ */
#define MAX_ZONELISTS 2 #define MAX_ZONELISTS 2
......
...@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) ...@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* *
* %GFP_NOWAIT - Allocation will not sleep. * %GFP_NOWAIT - Allocation will not sleep.
* *
* %GFP_THISNODE - Allocate node-local memory only. * %__GFP_THISNODE - Allocate node-local memory only.
* *
* %GFP_DMA - Allocation suitable for DMA. * %GFP_DMA - Allocation suitable for DMA.
* Should only be used for kmalloc() caches. Otherwise, use a * Should only be used for kmalloc() caches. Otherwise, use a
......
...@@ -549,14 +549,14 @@ static int create_hash_tables(void) ...@@ -549,14 +549,14 @@ static int create_hash_tables(void)
struct page *page; struct page *page;
page = alloc_pages_exact_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0); 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1] per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page); = (struct profile_hit *)page_address(page);
page = alloc_pages_exact_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0); 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
......
...@@ -575,5 +575,5 @@ config PGTABLE_MAPPING ...@@ -575,5 +575,5 @@ config PGTABLE_MAPPING
then you should select this. This causes zsmalloc to use page table then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping. mapping rather than copying for object mapping.
You can check speed with zsmalloc benchmark[1]. You can check speed with zsmalloc benchmark:
[1] https://github.com/spartacus06/zsmalloc https://github.com/spartacus06/zsmapbench
...@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
{ {
int nr_scanned = 0, total_isolated = 0; int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL; struct page *cursor, *valid_page = NULL;
unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags; unsigned long flags;
bool locked = false; bool locked = false;
...@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned++; nr_scanned++;
if (!pfn_valid_within(blockpfn)) if (!pfn_valid_within(blockpfn))
continue; goto isolate_fail;
if (!valid_page) if (!valid_page)
valid_page = page; valid_page = page;
if (!PageBuddy(page)) if (!PageBuddy(page))
continue; goto isolate_fail;
/* /*
* The zone lock must be held to isolate freepages. * The zone lock must be held to isolate freepages.
...@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Recheck this is a buddy page under lock */ /* Recheck this is a buddy page under lock */
if (!PageBuddy(page)) if (!PageBuddy(page))
continue; goto isolate_fail;
/* Found a free page, break it into order-0 pages */ /* Found a free page, break it into order-0 pages */
isolated = split_free_page(page); isolated = split_free_page(page);
if (!isolated && strict)
break;
total_isolated += isolated; total_isolated += isolated;
for (i = 0; i < isolated; i++) { for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist); list_add(&page->lru, freelist);
...@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (isolated) { if (isolated) {
blockpfn += isolated - 1; blockpfn += isolated - 1;
cursor += isolated - 1; cursor += isolated - 1;
continue;
} }
isolate_fail:
if (strict)
break;
else
continue;
} }
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
...@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* pages requested were isolated. If there were any failures, 0 is * pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail. * returned and CMA will fail.
*/ */
if (strict && nr_strict_required > total_isolated) if (strict && blockpfn < end_pfn)
total_isolated = 0; total_isolated = 0;
if (locked) if (locked)
......
...@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, ...@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
pm->node); pm->node);
else else
return alloc_pages_exact_node(pm->node, return alloc_pages_exact_node(pm->node,
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
} }
/* /*
...@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page, ...@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
struct page *newpage; struct page *newpage;
newpage = alloc_pages_exact_node(nid, newpage = alloc_pages_exact_node(nid,
(GFP_HIGHUSER_MOVABLE | GFP_THISNODE | (GFP_HIGHUSER_MOVABLE |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NOWARN) & __GFP_NORETRY | __GFP_NOWARN) &
~GFP_IOFS, 0); ~GFP_IOFS, 0);
return newpage; return newpage;
...@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_dropref; goto out_dropref;
new_page = alloc_pages_node(node, new_page = alloc_pages_node(node,
(GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
HPAGE_PMD_ORDER);
if (!new_page) if (!new_page)
goto out_fail; goto out_fail;
......
...@@ -330,7 +330,8 @@ static void write_src(void) ...@@ -330,7 +330,8 @@ static void write_src(void)
printf("\tPTR\t_text + %#llx\n", printf("\tPTR\t_text + %#llx\n",
table[i].addr - _text); table[i].addr - _text);
else else
printf("\tPTR\t%#llx\n", table[i].addr); printf("\tPTR\t_text - %#llx\n",
_text - table[i].addr);
} else { } else {
printf("\tPTR\t%#llx\n", table[i].addr); printf("\tPTR\t%#llx\n", table[i].addr);
} }
......
...@@ -201,6 +201,7 @@ int main(int argc, char **argv) ...@@ -201,6 +201,7 @@ int main(int argc, char **argv)
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) { if (msgque.msq_id == -1) {
err = -errno;
printf("Can't create queue\n"); printf("Can't create queue\n");
goto err_out; goto err_out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment