Commit 9518e5bf authored by Mike Galbraith's avatar Mike Galbraith Committed by Jens Axboe

zram: Replace bit spinlocks with a spinlock_t.

The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
lock on PREEMPT_RT and it can not be acquired in this context. In this locked
section, zs_free() acquires a zs_pool::lock, and there is access to
zram::wb_limit_lock.

Add a spinlock_t for locking. Keep the set/ clear ZRAM_LOCK bit after
the lock has been acquired/ dropped. The size of struct zram_table_entry
increases by 4 bytes due to lock and additional 4 bytes padding with
CONFIG_ZRAM_TRACK_ENTRY_ACTIME enabled.
Signed-off-by: default avatarMike Galbraith <umgwanakikbuti@gmail.com>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Link: https://lore.kernel.org/r/20240906141520.730009-2-bigeasy@linutronix.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 296dbc72
...@@ -59,17 +59,24 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, ...@@ -59,17 +59,24 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
static int zram_slot_trylock(struct zram *zram, u32 index) static int zram_slot_trylock(struct zram *zram, u32 index)
{ {
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); int ret;
ret = spin_trylock(&zram->table[index].lock);
if (ret)
__set_bit(ZRAM_LOCK, &zram->table[index].flags);
return ret;
} }
static void zram_slot_lock(struct zram *zram, u32 index) static void zram_slot_lock(struct zram *zram, u32 index)
{ {
bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); spin_lock(&zram->table[index].lock);
__set_bit(ZRAM_LOCK, &zram->table[index].flags);
} }
static void zram_slot_unlock(struct zram *zram, u32 index) static void zram_slot_unlock(struct zram *zram, u32 index)
{ {
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
spin_unlock(&zram->table[index].lock);
} }
static inline bool init_done(struct zram *zram) static inline bool init_done(struct zram *zram)
...@@ -1211,7 +1218,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize) ...@@ -1211,7 +1218,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
static bool zram_meta_alloc(struct zram *zram, u64 disksize) static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{ {
size_t num_pages; size_t num_pages, index;
num_pages = disksize >> PAGE_SHIFT; num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
...@@ -1226,6 +1233,9 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) ...@@ -1226,6 +1233,9 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
if (!huge_class_size) if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool); huge_class_size = zs_huge_class_size(zram->mem_pool);
for (index = 0; index < num_pages; index++)
spin_lock_init(&zram->table[index].lock);
return true; return true;
} }
......
...@@ -69,6 +69,7 @@ struct zram_table_entry { ...@@ -69,6 +69,7 @@ struct zram_table_entry {
unsigned long element; unsigned long element;
}; };
unsigned long flags; unsigned long flags;
spinlock_t lock;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time; ktime_t ac_time;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment