Commit 5170a3b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge fixes from Andrew Morton:
 "14 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  shmem: fix init_page_accessed use to stop !PageLRU bug
  kernel/printk/printk.c: revert "printk: enable interrupts before calling console_trylock_for_printk()"
  tools/testing/selftests/ipc/msgque.c: improve error handling when not running as root
  fs/seq_file: fallback to vmalloc allocation
  /proc/stat: convert to single_open_size()
  hwpoison: fix the handling path of the victimized page frame that belong to non-LRU
  mm:vmscan: update the trace-vmscan-postprocess.pl for event vmscan/mm_vmscan_lru_isolate
  msync: fix incorrect fstart calculation
  zram: revalidate disk after capacity change
  tools: memory-hotplug fix unexpected operator error
  tools: cpu-hotplug fix unexpected operator error
  autofs4: fix false positive compile error
  slub: fix off by one in number of slab tests
  mm: page_alloc: fix CMA area initialisation when pageblock > MAX_ORDER
parents e55a3798 66d2f4d2
...@@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP => 21; ...@@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP => 21;
use constant HIGH_NR_SCANNED => 22; use constant HIGH_NR_SCANNED => 22;
use constant HIGH_NR_TAKEN => 23; use constant HIGH_NR_TAKEN => 23;
use constant HIGH_NR_RECLAIMED => 24; use constant HIGH_NR_RECLAIMED => 24;
use constant HIGH_NR_CONTIG_DIRTY => 25;
my %perprocesspid; my %perprocesspid;
my %perprocess; my %perprocess;
...@@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)'; ...@@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)'; my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)'; my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)'; my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)'; my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) file=([0-9]*)';
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)'; my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)'; my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)'; my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
...@@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex( ...@@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex(
$regex_lru_isolate_default, $regex_lru_isolate_default,
"isolate_mode", "order", "isolate_mode", "order",
"nr_requested", "nr_scanned", "nr_taken", "nr_requested", "nr_scanned", "nr_taken",
"contig_taken", "contig_dirty", "contig_failed"); "file");
$regex_lru_shrink_inactive = generate_traceevent_regex( $regex_lru_shrink_inactive = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_inactive", "vmscan/mm_vmscan_lru_shrink_inactive",
$regex_lru_shrink_inactive_default, $regex_lru_shrink_inactive_default,
...@@ -375,7 +374,6 @@ EVENT_PROCESS: ...@@ -375,7 +374,6 @@ EVENT_PROCESS:
} }
my $isolate_mode = $1; my $isolate_mode = $1;
my $nr_scanned = $4; my $nr_scanned = $4;
my $nr_contig_dirty = $7;
# To closer match vmstat scanning statistics, only count isolate_both # To closer match vmstat scanning statistics, only count isolate_both
# and isolate_inactive as scanning. isolate_active is rotation # and isolate_inactive as scanning. isolate_active is rotation
...@@ -385,7 +383,6 @@ EVENT_PROCESS: ...@@ -385,7 +383,6 @@ EVENT_PROCESS:
if ($isolate_mode != 2) { if ($isolate_mode != 2) {
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
} }
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
$details = $6; $details = $6;
if ($details !~ /$regex_lru_shrink_inactive/o) { if ($details !~ /$regex_lru_shrink_inactive/o) {
...@@ -539,13 +536,6 @@ sub dump_stats { ...@@ -539,13 +536,6 @@ sub dump_stats {
} }
} }
} }
if ($stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}) {
print " ";
my $count = $stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY};
if ($count != 0) {
print "contig-dirty=$count ";
}
}
print "\n"; print "\n";
} }
......
...@@ -622,8 +622,10 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) ...@@ -622,8 +622,10 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats)); memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0; zram->disksize = 0;
if (reset_capacity) if (reset_capacity) {
set_capacity(zram->disk, 0); set_capacity(zram->disk, 0);
revalidate_disk(zram->disk);
}
up_write(&zram->init_lock); up_write(&zram->init_lock);
} }
...@@ -664,6 +666,7 @@ static ssize_t disksize_store(struct device *dev, ...@@ -664,6 +666,7 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp; zram->comp = comp;
zram->disksize = disksize; zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
revalidate_disk(zram->disk);
up_write(&zram->init_lock); up_write(&zram->init_lock);
return len; return len;
......
...@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) ...@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
int pipefd; int pipefd;
struct autofs_sb_info *sbi; struct autofs_sb_info *sbi;
struct autofs_info *ino; struct autofs_info *ino;
int pgrp; int pgrp = 0;
bool pgrp_set = false; bool pgrp_set = false;
int ret = -EINVAL; int ret = -EINVAL;
......
...@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
static int stat_open(struct inode *inode, struct file *file) static int stat_open(struct inode *inode, struct file *file)
{ {
size_t size = 1024 + 128 * num_possible_cpus(); size_t size = 1024 + 128 * num_online_cpus();
char *buf;
struct seq_file *m;
int res;
/* minimum size to display an interrupt count : 2 bytes */ /* minimum size to display an interrupt count : 2 bytes */
size += 2 * nr_irqs; size += 2 * nr_irqs;
return single_open_size(file, show_stat, NULL, size);
/* don't ask for more than the kmalloc() max size */
if (size > KMALLOC_MAX_SIZE)
size = KMALLOC_MAX_SIZE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
res = single_open(file, show_stat, NULL);
if (!res) {
m = file->private_data;
m->buf = buf;
m->size = ksize(buf);
} else
kfree(buf);
return res;
} }
static const struct file_operations proc_stat_operations = { static const struct file_operations proc_stat_operations = {
......
...@@ -8,8 +8,10 @@ ...@@ -8,8 +8,10 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cred.h> #include <linux/cred.h>
#include <linux/mm.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m) ...@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
m->count = m->size; m->count = m->size;
} }
static void *seq_buf_alloc(unsigned long size)
{
void *buf;
buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!buf && size > PAGE_SIZE)
buf = vmalloc(size);
return buf;
}
/** /**
* seq_open - initialize sequential file * seq_open - initialize sequential file
* @file: file we initialize * @file: file we initialize
...@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset) ...@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
return 0; return 0;
} }
if (!m->buf) { if (!m->buf) {
m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
if (!m->buf) if (!m->buf)
return -ENOMEM; return -ENOMEM;
} }
...@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset) ...@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
Eoverflow: Eoverflow:
m->op->stop(m, p); m->op->stop(m, p);
kfree(m->buf); kvfree(m->buf);
m->count = 0; m->count = 0;
m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); m->buf = seq_buf_alloc(m->size <<= 1);
return !m->buf ? -ENOMEM : -EAGAIN; return !m->buf ? -ENOMEM : -EAGAIN;
} }
...@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) ...@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
/* grab buffer if we didn't have one */ /* grab buffer if we didn't have one */
if (!m->buf) { if (!m->buf) {
m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
if (!m->buf) if (!m->buf)
goto Enomem; goto Enomem;
} }
...@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) ...@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
if (m->count < m->size) if (m->count < m->size)
goto Fill; goto Fill;
m->op->stop(m, p); m->op->stop(m, p);
kfree(m->buf); kvfree(m->buf);
m->count = 0; m->count = 0;
m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); m->buf = seq_buf_alloc(m->size <<= 1);
if (!m->buf) if (!m->buf)
goto Enomem; goto Enomem;
m->version = 0; m->version = 0;
...@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek); ...@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
int seq_release(struct inode *inode, struct file *file) int seq_release(struct inode *inode, struct file *file)
{ {
struct seq_file *m = file->private_data; struct seq_file *m = file->private_data;
kfree(m->buf); kvfree(m->buf);
kfree(m); kfree(m);
return 0; return 0;
} }
...@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open); ...@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open);
int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
void *data, size_t size) void *data, size_t size)
{ {
char *buf = kmalloc(size, GFP_KERNEL); char *buf = seq_buf_alloc(size);
int ret; int ret;
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
ret = single_open(file, show, data); ret = single_open(file, show, data);
if (ret) { if (ret) {
kfree(buf); kvfree(buf);
return ret; return ret;
} }
((struct seq_file *)file->private_data)->buf = buf; ((struct seq_file *)file->private_data)->buf = buf;
......
...@@ -1416,9 +1416,10 @@ static int have_callable_console(void) ...@@ -1416,9 +1416,10 @@ static int have_callable_console(void)
/* /*
* Can we actually use the console at this time on this cpu? * Can we actually use the console at this time on this cpu?
* *
* Console drivers may assume that per-cpu resources have been allocated. So * Console drivers may assume that per-cpu resources have
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't * been allocated. So unless they're explicitly marked as
* call them until this CPU is officially up. * being able to cope (CON_ANYTIME) don't call them until
* this CPU is officially up.
*/ */
static inline int can_use_console(unsigned int cpu) static inline int can_use_console(unsigned int cpu)
{ {
...@@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu) ...@@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu)
* console_lock held, and 'console_locked' set) if it * console_lock held, and 'console_locked' set) if it
* is successful, false otherwise. * is successful, false otherwise.
*/ */
static int console_trylock_for_printk(void) static int console_trylock_for_printk(unsigned int cpu)
{ {
unsigned int cpu = smp_processor_id();
if (!console_trylock()) if (!console_trylock())
return 0; return 0;
/* /*
...@@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level, ...@@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level,
*/ */
if (!oops_in_progress && !lockdep_recursing(current)) { if (!oops_in_progress && !lockdep_recursing(current)) {
recursion_bug = 1; recursion_bug = 1;
local_irq_restore(flags); goto out_restore_irqs;
return 0;
} }
zap_locks(); zap_locks();
} }
...@@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level, ...@@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level,
logbuf_cpu = UINT_MAX; logbuf_cpu = UINT_MAX;
raw_spin_unlock(&logbuf_lock); raw_spin_unlock(&logbuf_lock);
lockdep_on();
local_irq_restore(flags);
/* If called from the scheduler, we can not call up(). */ /* If called from the scheduler, we can not call up(). */
if (in_sched) if (!in_sched) {
return printed_len; /*
* Try to acquire and then immediately release the console
/* * semaphore. The release will print out buffers and wake up
* Disable preemption to avoid being preempted while holding * /dev/kmsg and syslog() users.
* console_sem which would prevent anyone from printing to console */
*/ if (console_trylock_for_printk(this_cpu))
preempt_disable(); console_unlock();
/* }
* Try to acquire and then immediately release the console semaphore.
* The release will print out buffers and wake up /dev/kmsg and syslog()
* users.
*/
if (console_trylock_for_printk())
console_unlock();
preempt_enable();
lockdep_on();
out_restore_irqs:
local_irq_restore(flags);
return printed_len; return printed_len;
} }
EXPORT_SYMBOL(vprintk_emit); EXPORT_SYMBOL(vprintk_emit);
......
...@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct page *hpage = *hpagep; struct page *hpage = *hpagep;
struct page *ppage; struct page *ppage;
if (PageReserved(p) || PageSlab(p)) if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
return SWAP_SUCCESS; return SWAP_SUCCESS;
/* /*
...@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
action_result(pfn, "free buddy, 2nd try", DELAYED); action_result(pfn, "free buddy, 2nd try", DELAYED);
return 0; return 0;
} }
action_result(pfn, "non LRU", IGNORED);
put_page(p);
return -EBUSY;
} }
} }
...@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
return 0; return 0;
} }
if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
goto identify_page_state;
/* /*
* For error on the tail page, we should set PG_hwpoison * For error on the tail page, we should set PG_hwpoison
* on the head page to show that the hugepage is hwpoisoned * on the head page to show that the hugepage is hwpoisoned
...@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
goto out; goto out;
} }
identify_page_state:
res = -EBUSY; res = -EBUSY;
/* /*
* The first check uses the current page flags which may not have any * The first check uses the current page flags which may not have any
......
...@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) ...@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
goto out_unlock; goto out_unlock;
} }
file = vma->vm_file; file = vma->vm_file;
fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); fstart = (start - vma->vm_start) +
((loff_t)vma->vm_pgoff << PAGE_SHIFT);
fend = fstart + (min(end, vma->vm_end) - start) - 1; fend = fstart + (min(end, vma->vm_end) - start) - 1;
start = vma->vm_end; start = vma->vm_end;
if ((flags & MS_SYNC) && file && if ((flags & MS_SYNC) && file &&
......
...@@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page) ...@@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_count(p, 0); set_page_count(p, 0);
} while (++p, --i); } while (++p, --i);
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA); set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
if (pageblock_order >= MAX_ORDER) {
i = pageblock_nr_pages;
p = page;
do {
set_page_refcounted(p);
__free_pages(p, MAX_ORDER - 1);
p += MAX_ORDER_NR_PAGES;
} while (i -= MAX_ORDER_NR_PAGES);
} else {
set_page_refcounted(page);
__free_pages(page, pageblock_order);
}
adjust_managed_page_count(page, pageblock_nr_pages); adjust_managed_page_count(page, pageblock_nr_pages);
} }
#endif #endif
......
...@@ -1029,6 +1029,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1029,6 +1029,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
goto failed; goto failed;
} }
if (page && sgp == SGP_WRITE)
mark_page_accessed(page);
/* fallocated page? */ /* fallocated page? */
if (page && !PageUptodate(page)) { if (page && !PageUptodate(page)) {
if (sgp != SGP_READ) if (sgp != SGP_READ)
...@@ -1110,6 +1113,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1110,6 +1113,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock(&info->lock); spin_unlock(&info->lock);
if (sgp == SGP_WRITE)
mark_page_accessed(page);
delete_from_swap_cache(page); delete_from_swap_cache(page);
set_page_dirty(page); set_page_dirty(page);
swap_free(swap); swap_free(swap);
...@@ -1136,6 +1142,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1136,6 +1142,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
__SetPageSwapBacked(page); __SetPageSwapBacked(page);
__set_page_locked(page); __set_page_locked(page);
if (sgp == SGP_WRITE)
init_page_accessed(page);
error = mem_cgroup_charge_file(page, current->mm, error = mem_cgroup_charge_file(page, current->mm,
gfp & GFP_RECLAIM_MASK); gfp & GFP_RECLAIM_MASK);
if (error) if (error)
...@@ -1412,13 +1421,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping, ...@@ -1412,13 +1421,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata) struct page **pagep, void **fsdata)
{ {
int ret;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
pgoff_t index = pos >> PAGE_CACHE_SHIFT; pgoff_t index = pos >> PAGE_CACHE_SHIFT;
ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
if (ret == 0 && *pagep)
init_page_accessed(*pagep);
return ret;
} }
static int static int
......
...@@ -1881,7 +1881,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, ...@@ -1881,7 +1881,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
new.frozen = 0; new.frozen = 0;
if (!new.inuse && n->nr_partial > s->min_partial) if (!new.inuse && n->nr_partial >= s->min_partial)
m = M_FREE; m = M_FREE;
else if (new.freelist) { else if (new.freelist) {
m = M_PARTIAL; m = M_PARTIAL;
...@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s, ...@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
new.freelist, new.counters, new.freelist, new.counters,
"unfreezing slab")); "unfreezing slab"));
if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
page->next = discard_page; page->next = discard_page;
discard_page = page; discard_page = page;
} else { } else {
...@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return; return;
} }
if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
goto slab_empty; goto slab_empty;
/* /*
......
all: all:
run_tests: run_tests:
@/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
clean: clean:
...@@ -193,6 +193,11 @@ int main(int argc, char **argv) ...@@ -193,6 +193,11 @@ int main(int argc, char **argv)
int msg, pid, err; int msg, pid, err;
struct msgque_data msgque; struct msgque_data msgque;
if (getuid() != 0) {
printf("Please run the test as root - Exiting.\n");
exit(1);
}
msgque.key = ftok(argv[0], 822155650); msgque.key = ftok(argv[0], 822155650);
if (msgque.key == -1) { if (msgque.key == -1) {
printf("Can't make key\n"); printf("Can't make key\n");
......
all: all:
run_tests: run_tests:
@/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
clean: clean:
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment