Commit 7f48121e authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Andrew Morton

mm/vmalloc: add missing READ/WRITE_ONCE() annotations

purge_fragmented_blocks() accesses vmap_block::free and vmap_block::dirty
lockless for a quick check.

Add the missing READ/WRITE_ONCE() annotations.

Link: https://lkml.kernel.org/r/20230525124504.807356682@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 43d76502
......@@ -2094,9 +2094,9 @@ static bool purge_fragmented_block(struct vmap_block *vb,
return false;
/* prevent further allocs after releasing lock */
vb->free = 0;
WRITE_ONCE(vb->free, 0);
/* prevent purging it again */
vb->dirty = VMAP_BBMAP_BITS;
WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
vb->dirty_min = 0;
vb->dirty_max = VMAP_BBMAP_BITS;
spin_lock(&vbq->lock);
......@@ -2124,8 +2124,11 @@ static void purge_fragmented_blocks(int cpu)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
unsigned long free = READ_ONCE(vb->free);
unsigned long dirty = READ_ONCE(vb->dirty);
if (free + dirty != VMAP_BBMAP_BITS ||
dirty == VMAP_BBMAP_BITS)
continue;
spin_lock(&vb->lock);
......@@ -2233,7 +2236,7 @@ static void vb_free(unsigned long addr, unsigned long size)
vb->dirty_min = min(vb->dirty_min, offset);
vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
vb->dirty += 1UL << order;
WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
if (vb->dirty == VMAP_BBMAP_BITS) {
BUG_ON(vb->free);
spin_unlock(&vb->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment