Commit fd19bda4 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'rcu/next' of...

Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull additional commits for locktorture, from Paul E. McKenney.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 62731433 3e28e377
...@@ -45,6 +45,11 @@ torture_type Type of lock to torture. By default, only spinlocks will ...@@ -45,6 +45,11 @@ torture_type Type of lock to torture. By default, only spinlocks will
o "spin_lock_irq": spin_lock_irq() and spin_unlock_irq() o "spin_lock_irq": spin_lock_irq() and spin_unlock_irq()
pairs. pairs.
o "rw_lock": read/write lock() and unlock() rwlock pairs.
o "rw_lock_irq": read/write lock_irq() and unlock_irq()
rwlock pairs.
o "mutex_lock": mutex_lock() and mutex_unlock() pairs. o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
o "rwsem_lock": read/write down() and up() semaphore pairs. o "rwsem_lock": read/write down() and up() semaphore pairs.
......
...@@ -20,31 +20,20 @@ ...@@ -20,31 +20,20 @@
* Author: Paul E. McKenney <paulmck@us.ibm.com> * Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c. * Based on kernel/rcu/torture.c.
*/ */
#include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/stat.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h> #include <linux/torture.h>
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -204,7 +193,7 @@ static struct lock_torture_ops spin_lock_ops = { ...@@ -204,7 +193,7 @@ static struct lock_torture_ops spin_lock_ops = {
}; };
static int torture_spin_lock_write_lock_irq(void) static int torture_spin_lock_write_lock_irq(void)
__acquires(torture_spinlock_irq) __acquires(torture_spinlock)
{ {
unsigned long flags; unsigned long flags;
...@@ -229,6 +218,110 @@ static struct lock_torture_ops spin_lock_irq_ops = { ...@@ -229,6 +218,110 @@ static struct lock_torture_ops spin_lock_irq_ops = {
.name = "spin_lock_irq" .name = "spin_lock_irq"
}; };
static DEFINE_RWLOCK(torture_rwlock);
static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
{
write_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 2;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
{
write_unlock(&torture_rwlock);
}
static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
{
read_lock(&torture_rwlock);
return 0;
}
static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{
const unsigned long shortdelay_us = 10;
const unsigned long longdelay_ms = 100;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
(cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
else
udelay(shortdelay_us);
}
static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
{
read_unlock(&torture_rwlock);
}
static struct lock_torture_ops rw_lock_ops = {
.writelock = torture_rwlock_write_lock,
.write_delay = torture_rwlock_write_delay,
.writeunlock = torture_rwlock_write_unlock,
.readlock = torture_rwlock_read_lock,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock,
.name = "rw_lock"
};
static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
{
unsigned long flags;
write_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_write_unlock_irq(void)
__releases(torture_rwlock)
{
write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
{
unsigned long flags;
read_lock_irqsave(&torture_rwlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_rwlock_read_unlock_irq(void)
__releases(torture_rwlock)
{
write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops rw_lock_irq_ops = {
.writelock = torture_rwlock_write_lock_irq,
.write_delay = torture_rwlock_write_delay,
.writeunlock = torture_rwlock_write_unlock_irq,
.readlock = torture_rwlock_read_lock_irq,
.read_delay = torture_rwlock_read_delay,
.readunlock = torture_rwlock_read_unlock_irq,
.name = "rw_lock_irq"
};
static DEFINE_MUTEX(torture_mutex); static DEFINE_MUTEX(torture_mutex);
static int torture_mutex_lock(void) __acquires(torture_mutex) static int torture_mutex_lock(void) __acquires(torture_mutex)
...@@ -348,14 +441,19 @@ static int lock_torture_writer(void *arg) ...@@ -348,14 +441,19 @@ static int lock_torture_writer(void *arg)
do { do {
if ((torture_random(&rand) & 0xfffff) == 0) if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
cxt.cur_ops->writelock(); cxt.cur_ops->writelock();
if (WARN_ON_ONCE(lock_is_write_held)) if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++; lwsp->n_lock_fail++;
lock_is_write_held = 1; lock_is_write_held = 1;
if (WARN_ON_ONCE(lock_is_read_held))
lwsp->n_lock_fail++; /* rare, but... */
lwsp->n_lock_acquired++; lwsp->n_lock_acquired++;
cxt.cur_ops->write_delay(&rand); cxt.cur_ops->write_delay(&rand);
lock_is_write_held = 0; lock_is_write_held = 0;
cxt.cur_ops->writeunlock(); cxt.cur_ops->writeunlock();
stutter_wait("lock_torture_writer"); stutter_wait("lock_torture_writer");
} while (!torture_must_stop()); } while (!torture_must_stop());
torture_kthread_stopping("lock_torture_writer"); torture_kthread_stopping("lock_torture_writer");
...@@ -377,12 +475,17 @@ static int lock_torture_reader(void *arg) ...@@ -377,12 +475,17 @@ static int lock_torture_reader(void *arg)
do { do {
if ((torture_random(&rand) & 0xfffff) == 0) if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
cxt.cur_ops->readlock(); cxt.cur_ops->readlock();
lock_is_read_held = 1; lock_is_read_held = 1;
if (WARN_ON_ONCE(lock_is_write_held))
lrsp->n_lock_fail++; /* rare, but... */
lrsp->n_lock_acquired++; lrsp->n_lock_acquired++;
cxt.cur_ops->read_delay(&rand); cxt.cur_ops->read_delay(&rand);
lock_is_read_held = 0; lock_is_read_held = 0;
cxt.cur_ops->readunlock(); cxt.cur_ops->readunlock();
stutter_wait("lock_torture_reader"); stutter_wait("lock_torture_reader");
} while (!torture_must_stop()); } while (!torture_must_stop());
torture_kthread_stopping("lock_torture_reader"); torture_kthread_stopping("lock_torture_reader");
...@@ -535,8 +638,11 @@ static int __init lock_torture_init(void) ...@@ -535,8 +638,11 @@ static int __init lock_torture_init(void)
int i, j; int i, j;
int firsterr = 0; int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = { static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, &lock_busted_ops,
&mutex_lock_ops, &rwsem_lock_ops, &spin_lock_ops, &spin_lock_irq_ops,
&rw_lock_ops, &rw_lock_irq_ops,
&mutex_lock_ops,
&rwsem_lock_ops,
}; };
if (!torture_init_begin(torture_type, verbose, &torture_runnable)) if (!torture_init_begin(torture_type, verbose, &torture_runnable))
...@@ -571,7 +677,8 @@ static int __init lock_torture_init(void) ...@@ -571,7 +677,8 @@ static int __init lock_torture_init(void)
cxt.debug_lock = true; cxt.debug_lock = true;
#endif #endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
if (strncmp(torture_type, "spin", 4) == 0) if ((strncmp(torture_type, "spin", 4) == 0) ||
(strncmp(torture_type, "rw_lock", 7) == 0))
cxt.debug_lock = true; cxt.debug_lock = true;
#endif #endif
......
...@@ -2043,9 +2043,10 @@ __acquires(&pool->lock) ...@@ -2043,9 +2043,10 @@ __acquires(&pool->lock)
* kernels, where a requeueing work item waiting for something to * kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could * happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in * indefinitely requeue itself while all other CPUs are trapped in
* stop_machine. * stop_machine. At the same time, report a quiescent RCU state so
* the same condition doesn't freeze RCU.
*/ */
cond_resched(); cond_resched_rcu_qs();
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
......
LOCK01 LOCK01
LOCK02 LOCK02
LOCK03 LOCK03
LOCK04
\ No newline at end of file
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment