Commit 3e85fb9c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (Andrew's patch bomb)

Merge the emailed seties of 19 patches from Andrew Morton

* akpm:
  rapidio/tsi721: fix queue wrapping bug in inbound doorbell handler
  memcg: fix mapcount check in move charge code for anonymous page
  mm: thp: fix BUG on mm->nr_ptes
  alpha: fix 32/64-bit bug in futex support
  memcg: fix GPF when cgroup removal races with last exit
  debugobjects: Fix selftest for static warnings
  floppy/scsi: fix setting of BIO flags
  memcg: fix deadlock by inverting lrucare nesting
  drivers/rtc/rtc-r9701.c: fix crash in r9701_remove()
  c2port: class_create() returns an ERR_PTR
  pps: class_create() returns an ERR_PTR, not NULL
  hung_task: fix the broken rcu_lock_break() logic
  vfork: kill PF_STARTING
  coredump_wait: don't call complete_vfork_done()
  vfork: make it killable
  vfork: introduce complete_vfork_done()
  aio: wake up waiters when freeing unused kiocbs
  kprobes: return proper error code from register_kprobe()
  kmsg_dump: don't run on non-error paths by default
parents 055bf38d b24823e6
...@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
default: off. default: off.
printk.always_kmsg_dump=
Trigger kmsg_dump for cases other than kernel oops or
panics
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled
printk.time= Show timing data prefixed to each printk message line printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable) Format: <bool> (1/Y/y=enable, 0/N/n=disable)
......
...@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" lda $31,3b-2b(%0)\n" " lda $31,3b-2b(%0)\n"
" .previous\n" " .previous\n"
: "+r"(ret), "=&r"(prev), "=&r"(cmp) : "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)oldval), "r"(newval) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "memory"); : "memory");
*uval = prev; *uval = prev;
......
...@@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev) ...@@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_size = size; bio.bi_size = size;
bio.bi_bdev = bdev; bio.bi_bdev = bdev;
bio.bi_sector = 0; bio.bi_sector = 0;
bio.bi_flags = BIO_QUIET; bio.bi_flags = (1 << BIO_QUIET);
init_completion(&complete); init_completion(&complete);
bio.bi_private = &complete; bio.bi_private = &complete;
bio.bi_end_io = floppy_rb0_complete; bio.bi_end_io = floppy_rb0_complete;
......
...@@ -984,9 +984,9 @@ static int __init c2port_init(void) ...@@ -984,9 +984,9 @@ static int __init c2port_init(void)
" - (C) 2007 Rodolfo Giometti\n"); " - (C) 2007 Rodolfo Giometti\n");
c2port_class = class_create(THIS_MODULE, "c2port"); c2port_class = class_create(THIS_MODULE, "c2port");
if (!c2port_class) { if (IS_ERR(c2port_class)) {
printk(KERN_ERR "c2port: failed to allocate class\n"); printk(KERN_ERR "c2port: failed to allocate class\n");
return -ENOMEM; return PTR_ERR(c2port_class);
} }
c2port_class->dev_attrs = c2port_attrs; c2port_class->dev_attrs = c2port_attrs;
......
...@@ -369,9 +369,9 @@ static int __init pps_init(void) ...@@ -369,9 +369,9 @@ static int __init pps_init(void)
int err; int err;
pps_class = class_create(THIS_MODULE, "pps"); pps_class = class_create(THIS_MODULE, "pps");
if (!pps_class) { if (IS_ERR(pps_class)) {
pr_err("failed to allocate class\n"); pr_err("failed to allocate class\n");
return -ENOMEM; return PTR_ERR(pps_class);
} }
pps_class->dev_attrs = pps_attrs; pps_class->dev_attrs = pps_attrs;
......
...@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work) ...@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
*/ */
mport = priv->mport; mport = priv->mport;
wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)); wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
while (wr_ptr != rd_ptr) { while (wr_ptr != rd_ptr) {
idb_entry = (u64 *)(priv->idb_base + idb_entry = (u64 *)(priv->idb_base +
(TSI721_IDB_ENTRY_SIZE * rd_ptr)); (TSI721_IDB_ENTRY_SIZE * rd_ptr));
rd_ptr++; rd_ptr++;
rd_ptr %= IDB_QSIZE;
idb.msg = *idb_entry; idb.msg = *idb_entry;
*idb_entry = 0; *idb_entry = 0;
......
...@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi) ...@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi)
unsigned char tmp; unsigned char tmp;
int res; int res;
tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
dev_err(&spi->dev, "cannot read RTC register\n");
return -ENODEV;
}
rtc = rtc_device_register("r9701", rtc = rtc_device_register("r9701",
&spi->dev, &r9701_rtc_ops, THIS_MODULE); &spi->dev, &r9701_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) if (IS_ERR(rtc))
...@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi) ...@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, rtc); dev_set_drvdata(&spi->dev, rtc);
tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
rtc_device_unregister(rtc);
return res;
}
return 0; return 0;
} }
......
...@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s ...@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
kunmap_atomic(sdt, KM_USER0); kunmap_atomic(sdt, KM_USER0);
} }
bio->bi_flags |= BIO_MAPPED_INTEGRITY; bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
} }
return 0; return 0;
......
...@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) ...@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
kmem_cache_free(kiocb_cachep, req); kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--; ctx->reqs_active--;
} }
if (unlikely(!ctx->reqs_active && ctx->dead))
wake_up_all(&ctx->wait);
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
} }
......
...@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state) ...@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
struct completion *vfork_done;
int core_waiters = -EBUSY; int core_waiters = -EBUSY;
init_completion(&core_state->startup); init_completion(&core_state->startup);
...@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) ...@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code); core_waiters = zap_threads(tsk, mm, core_state, exit_code);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (unlikely(core_waiters < 0)) if (core_waiters > 0)
goto fail;
/*
* Make sure nobody is waiting for us to release the VM,
* otherwise we can deadlock when we wait on each other
*/
vfork_done = tsk->vfork_done;
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
if (core_waiters)
wait_for_completion(&core_state->startup); wait_for_completion(&core_state->startup);
fail:
return core_waiters; return core_waiters;
} }
......
...@@ -15,13 +15,18 @@ ...@@ -15,13 +15,18 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/list.h> #include <linux/list.h>
/*
* Keep this list arranged in rough order of priority. Anything listed after
* KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
* is passed to the kernel.
*/
enum kmsg_dump_reason { enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC, KMSG_DUMP_PANIC,
KMSG_DUMP_OOPS,
KMSG_DUMP_EMERG,
KMSG_DUMP_RESTART, KMSG_DUMP_RESTART,
KMSG_DUMP_HALT, KMSG_DUMP_HALT,
KMSG_DUMP_POWEROFF, KMSG_DUMP_POWEROFF,
KMSG_DUMP_EMERG,
}; };
/** /**
......
...@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, ...@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage, extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage); struct page *newpage);
extern void mem_cgroup_reset_owner(struct page *page);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account; extern int do_swap_account;
#endif #endif
...@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage, ...@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage) struct page *newpage)
{ {
} }
static inline void mem_cgroup_reset_owner(struct page *page)
{
}
#endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* CONFIG_CGROUP_MEM_CONT */
#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
......
...@@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * ...@@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/* /*
* Per process flags * Per process flags
*/ */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
...@@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p) ...@@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also * subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and * pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. * ->cgroup.subsys[]. And ->vfork_done.
* *
* Nests both inside and outside of read_lock(&tasklist_lock). * Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock), * It must not be nested with write_lock_irq(&tasklist_lock),
......
...@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) ...@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
return mm; return mm;
} }
static void complete_vfork_done(struct task_struct *tsk)
{
struct completion *vfork;
task_lock(tsk);
vfork = tsk->vfork_done;
if (likely(vfork)) {
tsk->vfork_done = NULL;
complete(vfork);
}
task_unlock(tsk);
}
static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{
int killed;
freezer_do_not_count();
killed = wait_for_completion_killable(vfork);
freezer_count();
if (killed) {
task_lock(child);
child->vfork_done = NULL;
task_unlock(child);
}
put_task_struct(child);
return killed;
}
/* Please note the differences between mmput and mm_release. /* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct, * mmput is called whenever we stop holding onto a mm_struct,
* error success whatever. * error success whatever.
...@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) ...@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
*/ */
void mm_release(struct task_struct *tsk, struct mm_struct *mm) void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{ {
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any futexes when releasing the mm */ /* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) { if (unlikely(tsk->robust_list)) {
...@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) ...@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
/* Get rid of any cached register state */ /* Get rid of any cached register state */
deactivate_mm(tsk, mm); deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */ if (tsk->vfork_done)
if (vfork_done) { complete_vfork_done(tsk);
tsk->vfork_done = NULL;
complete(vfork_done);
}
/* /*
* If we're exiting normally, clear a user-space tid field if * If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave * requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary * the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit. * trouble, say, a killed vfork parent shouldn't touch this mm.
* Userland only wants this done for a sys_exit.
*/ */
if (tsk->clear_child_tid) { if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) && if (!(tsk->flags & PF_SIGNALED) &&
...@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) ...@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
new_flags |= PF_FORKNOEXEC; new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags; p->flags = new_flags;
} }
...@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags, ...@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_VFORK) { if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork; p->vfork_done = &vfork;
init_completion(&vfork); init_completion(&vfork);
get_task_struct(p);
} }
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't finished SIGSTOP raising yet. Now we clear it
* and set the child going.
*/
p->flags &= ~PF_STARTING;
wake_up_new_task(p); wake_up_new_task(p);
/* forking complete and child started to run, tell ptracer */ /* forking complete and child started to run, tell ptracer */
...@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags, ...@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
ptrace_event(trace, nr); ptrace_event(trace, nr);
if (clone_flags & CLONE_VFORK) { if (clone_flags & CLONE_VFORK) {
freezer_do_not_count(); if (!wait_for_vfork_done(p, &vfork))
wait_for_completion(&vfork); ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
freezer_count();
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
} }
} else { } else {
nr = PTR_ERR(p); nr = PTR_ERR(p);
......
...@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) ...@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* For preemptible RCU it is sufficient to call rcu_read_unlock in order * For preemptible RCU it is sufficient to call rcu_read_unlock in order
* to exit the grace period. For classic RCU, a reschedule is required. * to exit the grace period. For classic RCU, a reschedule is required.
*/ */
static void rcu_lock_break(struct task_struct *g, struct task_struct *t) static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
{ {
bool can_cont;
get_task_struct(g); get_task_struct(g);
get_task_struct(t); get_task_struct(t);
rcu_read_unlock(); rcu_read_unlock();
cond_resched(); cond_resched();
rcu_read_lock(); rcu_read_lock();
can_cont = pid_alive(g) && pid_alive(t);
put_task_struct(t); put_task_struct(t);
put_task_struct(g); put_task_struct(g);
return can_cont;
} }
/* /*
...@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) ...@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
goto unlock; goto unlock;
if (!--batch_count) { if (!--batch_count) {
batch_count = HUNG_TASK_BATCHING; batch_count = HUNG_TASK_BATCHING;
rcu_lock_break(g, t); if (!rcu_lock_break(g, t))
/* Exit if t or g was unhashed during refresh. */
if (t->state == TASK_DEAD || g->state == TASK_DEAD)
goto unlock; goto unlock;
} }
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
......
...@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
if (!kernel_text_address((unsigned long) p->addr) || if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr) || ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) jump_label_text_reserved(p->addr, p->addr)) {
goto fail_with_jump_label; ret = -EINVAL;
goto cannot_probe;
}
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED; p->flags &= KPROBE_FLAG_DISABLED;
...@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
* its code to prohibit unexpected unloading. * its code to prohibit unexpected unloading.
*/ */
if (unlikely(!try_module_get(probed_mod))) if (unlikely(!try_module_get(probed_mod)))
goto fail_with_jump_label; goto cannot_probe;
/* /*
* If the module freed .init.text, we couldn't insert * If the module freed .init.text, we couldn't insert
...@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
if (within_module_init((unsigned long)p->addr, probed_mod) && if (within_module_init((unsigned long)p->addr, probed_mod) &&
probed_mod->state != MODULE_STATE_COMING) { probed_mod->state != MODULE_STATE_COMING) {
module_put(probed_mod); module_put(probed_mod);
goto fail_with_jump_label; goto cannot_probe;
} }
/* ret will be updated by following code */ /* ret will be updated by following code */
} }
...@@ -1409,7 +1411,7 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1409,7 +1411,7 @@ int __kprobes register_kprobe(struct kprobe *p)
return ret; return ret;
fail_with_jump_label: cannot_probe:
preempt_enable(); preempt_enable();
jump_label_unlock(); jump_label_unlock();
return ret; return ret;
......
...@@ -702,6 +702,9 @@ static bool printk_time = 0; ...@@ -702,6 +702,9 @@ static bool printk_time = 0;
#endif #endif
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
static bool always_kmsg_dump;
module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
/* Check if we have any console registered that can be called early in boot. */ /* Check if we have any console registered that can be called early in boot. */
static int have_callable_console(void) static int have_callable_console(void)
{ {
...@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) ...@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
unsigned long l1, l2; unsigned long l1, l2;
unsigned long flags; unsigned long flags;
if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
return;
/* Theoretically, the log could move on after we do this, but /* Theoretically, the log could move on after we do this, but
there's not a lot we can do about that. The new messages there's not a lot we can do about that. The new messages
will overwrite the start of what we dump. */ will overwrite the start of what we dump. */
......
...@@ -818,17 +818,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state) ...@@ -818,17 +818,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state)
if (obj->static_init == 1) { if (obj->static_init == 1) {
debug_object_init(obj, &descr_type_test); debug_object_init(obj, &descr_type_test);
debug_object_activate(obj, &descr_type_test); debug_object_activate(obj, &descr_type_test);
/* return 0;
* Real code should return 0 here ! This is
* not a fixup of some bad behaviour. We
* merily call the debug_init function to keep
* track of the object.
*/
return 1;
} else {
/* Real code needs to emit a warning here */
} }
return 0; return 1;
case ODEBUG_STATE_ACTIVE: case ODEBUG_STATE_ACTIVE:
debug_object_deactivate(obj, &descr_type_test); debug_object_deactivate(obj, &descr_type_test);
...@@ -967,7 +959,7 @@ static void __init debug_objects_selftest(void) ...@@ -967,7 +959,7 @@ static void __init debug_objects_selftest(void)
obj.static_init = 1; obj.static_init = 1;
debug_object_activate(&obj, &descr_type_test); debug_object_activate(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
goto out; goto out;
debug_object_init(&obj, &descr_type_test); debug_object_init(&obj, &descr_type_test);
if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
......
...@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, ...@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm); prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm->nr_ptes++;
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
...@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_mkold(pmd_wrprotect(pmd)); pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd); set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm); prepare_pmd_huge_pte(pgtable, dst_mm);
dst_mm->nr_ptes++;
ret = 0; ret = 0;
out_unlock: out_unlock:
...@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, ...@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
} }
kfree(pages); kfree(pages);
mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */ smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable); pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page); page_remove_rmap(page);
...@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(page_mapcount(page) < 0); VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock); spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable); pte_free(tlb->mm, pgtable);
...@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page, ...@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
pte_unmap(pte); pte_unmap(pte);
} }
mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */ smp_wmb(); /* make pte visible before pmd */
/* /*
* Up to this point the pmd is present and huge and * Up to this point the pmd is present and huge and
...@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd); set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, _pmd); update_mmu_cache(vma, address, _pmd);
prepare_pmd_huge_pte(pgtable, mm); prepare_pmd_huge_pte(pgtable, mm);
mm->nr_ptes--;
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/memcontrol.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
...@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page, ...@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) { if (new_page) {
/*
* The memcg-specific accounting when moving
* pages around the LRU lists relies on the
* page's owner (memcg) to be valid. Usually,
* pages are assigned to a new owner before
* being put on the LRU list, but since this
* is not the case here, the stale owner from
* a previous allocation cycle must be reset.
*/
mem_cgroup_reset_owner(new_page);
copy_user_highpage(new_page, page, address, vma); copy_user_highpage(new_page, page, address, vma);
SetPageDirty(new_page); SetPageDirty(new_page);
......
...@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, ...@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup; memcg = pc->mem_cgroup;
/*
* Surreptitiously switch any uncharged page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
* Our caller holds lru_lock, and PageCgroupUsed is updated
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page); mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */ /* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
...@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page, struct page *page,
unsigned int nr_pages, unsigned int nr_pages,
struct page_cgroup *pc, struct page_cgroup *pc,
enum charge_type ctype) enum charge_type ctype,
bool lrucare)
{ {
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
lock_page_cgroup(pc); lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) { if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
...@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* we don't need page_cgroup_lock about tail pages, becase they are not * we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point. * accessed by any other context at this point.
*/ */
/*
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
if (lrucare) {
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
was_on_lru = true;
}
}
pc->mem_cgroup = memcg; pc->mem_cgroup = memcg;
/* /*
* We access a page_cgroup asynchronously without lock_page_cgroup(). * We access a page_cgroup asynchronously without lock_page_cgroup().
...@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
break; break;
} }
if (lrucare) {
if (was_on_lru) {
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
WARN_ON_ONCE(PageLRU(page));
/* /*
* "charge_statistics" updated event counter. Then, check it. * "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
...@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM) if (ret == -ENOMEM)
return ret; return ret;
__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
return 0; return 0;
} }
...@@ -2663,35 +2704,6 @@ static void ...@@ -2663,35 +2704,6 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype); enum charge_type ctype);
static void
__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *zone = page_zone(page);
unsigned long flags;
bool removed = false;
/*
* In some case, SwapCache, FUSE(splice_buf->radixtree), the page
* is already on LRU. It means the page may on some other page_cgroup's
* LRU. Take care of it.
*/
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page)) {
del_page_from_lru_list(zone, page, page_lru(page));
ClearPageLRU(page);
removed = true;
}
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
if (removed) {
add_page_to_lru_list(zone, page, page_lru(page));
SetPageLRU(page);
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
return;
}
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
...@@ -2769,13 +2781,16 @@ static void ...@@ -2769,13 +2781,16 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype) enum charge_type ctype)
{ {
struct page_cgroup *pc;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
if (!memcg) if (!memcg)
return; return;
cgroup_exclude_rmdir(&memcg->css); cgroup_exclude_rmdir(&memcg->css);
__mem_cgroup_commit_charge_lrucare(page, memcg, ctype); pc = lookup_page_cgroup(page);
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
/* /*
* Now swap is on-memory. This means this page may be * Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count. * counted both as mem and swap....double count.
...@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void) ...@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
batch->memcg = NULL; batch->memcg = NULL;
} }
/*
* A function for resetting pc->mem_cgroup for newly allocated pages.
* This function should be called if the newpage will be added to LRU
* before start accounting.
*/
void mem_cgroup_reset_owner(struct page *newpage)
{
struct page_cgroup *pc;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(newpage);
VM_BUG_ON(PageCgroupUsed(pc));
pc->mem_cgroup = root_mem_cgroup;
}
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
/* /*
* called after __delete_from_swap_cache() and drop "page" account. * called after __delete_from_swap_cache() and drop "page" account.
...@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page, ...@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
return ret; return ret;
} }
...@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, ...@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
* the newpage may be on LRU(or pagevec for LRU) already. We lock * the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup. * LRU while we overwrite pc->mem_cgroup.
*/ */
__mem_cgroup_commit_charge_lrucare(newpage, memcg, type); __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
} }
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
...@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma, ...@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return NULL; return NULL;
if (PageAnon(page)) { if (PageAnon(page)) {
/* we don't move shared anon */ /* we don't move shared anon */
if (!move_anon() || page_mapcount(page) > 2) if (!move_anon() || page_mapcount(page) > 1)
return NULL; return NULL;
} else if (!move_file()) } else if (!move_file())
/* we ignore mapcount for file pages */ /* we ignore mapcount for file pages */
......
...@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!newpage) if (!newpage)
return -ENOMEM; return -ENOMEM;
mem_cgroup_reset_owner(newpage);
if (page_count(page) == 1) { if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */ /* page was freed from under us. So we are done. */
goto out; goto out;
......
...@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release); ...@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct zone* zone, void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail) struct page *page, struct page *page_tail)
{ {
int active; int uninitialized_var(active);
enum lru_list lru; enum lru_list lru;
const int file = 0; const int file = 0;
...@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone, ...@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
active = 0; active = 0;
lru = LRU_INACTIVE_ANON; lru = LRU_INACTIVE_ANON;
} }
update_page_reclaim_stat(zone, page_tail, file, active);
} else { } else {
SetPageUnevictable(page_tail); SetPageUnevictable(page_tail);
lru = LRU_UNEVICTABLE; lru = LRU_UNEVICTABLE;
...@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone, ...@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
list_head = page_tail->lru.prev; list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head); list_move_tail(&page_tail->lru, list_head);
} }
if (!PageUnevictable(page))
update_page_reclaim_stat(zone, page_tail, file, active);
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg) ...@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page); SetPageLRU(page);
if (active) if (active)
SetPageActive(page); SetPageActive(page);
update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(zone, page, lru); add_page_to_lru_list(zone, page, lru);
update_page_reclaim_stat(zone, page, file, active);
} }
/* /*
......
...@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
new_page = alloc_page_vma(gfp_mask, vma, addr); new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page) if (!new_page)
break; /* Out of memory */ break; /* Out of memory */
/*
* The memcg-specific accounting when moving
* pages around the LRU lists relies on the
* page's owner (memcg) to be valid. Usually,
* pages are assigned to a new owner before
* being put on the LRU list, but since this
* is not the case here, the stale owner from
* a previous allocation cycle must be reset.
*/
mem_cgroup_reset_owner(new_page);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment