Commit 6edda04c authored by Waiman Long's avatar Waiman Long Committed by akpm

mm/kmemleak: prevent soft lockup in first object iteration loop of kmemleak_scan()

The first RCU-based object iteration loop has to modify the object count. 
So we cannot skip taking the object lock.

One way to avoid soft lockup is to insert occasional cond_resched() call
into the loop.  This cannot be done while holding the RCU read lock which
is to protect objects from being freed.  However, taking a reference to
the object will prevent it from being freed.  We can then do a
cond_resched() call after every 64k objects safely.

Link: https://lkml.kernel.org/r/20220614220359.59282-4-longman@redhat.comSigned-off-by: default avatarWaiman Long <longman@redhat.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 64977918
...@@ -1474,12 +1474,16 @@ static void kmemleak_scan(void) ...@@ -1474,12 +1474,16 @@ static void kmemleak_scan(void)
struct zone *zone; struct zone *zone;
int __maybe_unused i; int __maybe_unused i;
int new_leaks = 0; int new_leaks = 0;
int loop1_cnt = 0;
jiffies_last_scan = jiffies; jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */ /* prepare the kmemleak_object's */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) { list_for_each_entry_rcu(object, &object_list, object_list) {
bool obj_pinned = false;
loop1_cnt++;
raw_spin_lock_irq(&object->lock); raw_spin_lock_irq(&object->lock);
#ifdef DEBUG #ifdef DEBUG
/* /*
...@@ -1505,10 +1509,32 @@ static void kmemleak_scan(void) ...@@ -1505,10 +1509,32 @@ static void kmemleak_scan(void)
/* reset the reference count (whiten the object) */ /* reset the reference count (whiten the object) */
object->count = 0; object->count = 0;
if (color_gray(object) && get_object(object)) if (color_gray(object) && get_object(object)) {
list_add_tail(&object->gray_list, &gray_list); list_add_tail(&object->gray_list, &gray_list);
obj_pinned = true;
}
raw_spin_unlock_irq(&object->lock); raw_spin_unlock_irq(&object->lock);
/*
* Do a cond_resched() to avoid soft lockup every 64k objects.
* Make sure a reference has been taken so that the object
* won't go away without RCU read lock.
*/
if (!(loop1_cnt & 0xffff)) {
if (!obj_pinned && !get_object(object)) {
/* Try the next object instead */
loop1_cnt--;
continue;
}
rcu_read_unlock();
cond_resched();
rcu_read_lock();
if (!obj_pinned)
put_object(object);
}
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment