Commit 135f97fd authored by Vitaly Wool's avatar Vitaly Wool Committed by Linus Torvalds

z3fold: remove preempt disabled sections for RT

Replace get_cpu_ptr() with migrate_disable()+this_cpu_ptr() so RT can take
spinlocks that become sleeping locks.

Signed-off-by Mike Galbraith <efault@gmx.de>

Link: https://lkml.kernel.org/r/20201209145151.18994-3-vitaly.wool@konsulko.comSigned-off-by: default avatarVitaly Wool <vitaly.wool@konsulko.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dcf5aedb
...@@ -623,14 +623,16 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool, ...@@ -623,14 +623,16 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool,
{ {
if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
zhdr->middle_chunks == 0) { zhdr->middle_chunks == 0) {
struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); struct list_head *unbuddied;
int freechunks = num_free_chunks(zhdr); int freechunks = num_free_chunks(zhdr);
migrate_disable();
unbuddied = this_cpu_ptr(pool->unbuddied);
spin_lock(&pool->lock); spin_lock(&pool->lock);
list_add(&zhdr->buddy, &unbuddied[freechunks]); list_add(&zhdr->buddy, &unbuddied[freechunks]);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
zhdr->cpu = smp_processor_id(); zhdr->cpu = smp_processor_id();
put_cpu_ptr(pool->unbuddied); migrate_enable();
} }
} }
...@@ -880,8 +882,9 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, ...@@ -880,8 +882,9 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
int chunks = size_to_chunks(size), i; int chunks = size_to_chunks(size), i;
lookup: lookup:
migrate_disable();
/* First, try to find an unbuddied z3fold page. */ /* First, try to find an unbuddied z3fold page. */
unbuddied = get_cpu_ptr(pool->unbuddied); unbuddied = this_cpu_ptr(pool->unbuddied);
for_each_unbuddied_list(i, chunks) { for_each_unbuddied_list(i, chunks) {
struct list_head *l = &unbuddied[i]; struct list_head *l = &unbuddied[i];
...@@ -899,7 +902,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, ...@@ -899,7 +902,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
!z3fold_page_trylock(zhdr)) { !z3fold_page_trylock(zhdr)) {
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
zhdr = NULL; zhdr = NULL;
put_cpu_ptr(pool->unbuddied); migrate_enable();
if (can_sleep) if (can_sleep)
cond_resched(); cond_resched();
goto lookup; goto lookup;
...@@ -913,7 +916,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, ...@@ -913,7 +916,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
test_bit(PAGE_CLAIMED, &page->private)) { test_bit(PAGE_CLAIMED, &page->private)) {
z3fold_page_unlock(zhdr); z3fold_page_unlock(zhdr);
zhdr = NULL; zhdr = NULL;
put_cpu_ptr(pool->unbuddied); migrate_enable();
if (can_sleep) if (can_sleep)
cond_resched(); cond_resched();
goto lookup; goto lookup;
...@@ -928,7 +931,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, ...@@ -928,7 +931,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
kref_get(&zhdr->refcount); kref_get(&zhdr->refcount);
break; break;
} }
put_cpu_ptr(pool->unbuddied); migrate_enable();
if (!zhdr) { if (!zhdr) {
int cpu; int cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment