Commit 3ae92ac2 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: introduce obj_allocated

The usage pattern for obj_to_head is to check whether the zpage is
allocated or not.  Thus, introduce obj_allocated.

Link: https://lkml.kernel.org/r/20211115185909.3949505-5-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0a5f079b
...@@ -877,13 +877,21 @@ static unsigned long handle_to_obj(unsigned long handle) ...@@ -877,13 +877,21 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle; return *(unsigned long *)handle;
} }
static unsigned long obj_to_head(struct page *page, void *obj) static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
{ {
unsigned long handle;
if (unlikely(PageHugeObject(page))) { if (unlikely(PageHugeObject(page))) {
VM_BUG_ON_PAGE(!is_first_page(page), page); VM_BUG_ON_PAGE(!is_first_page(page), page);
return page->index; handle = page->index;
} else } else
return *(unsigned long *)obj; handle = *(unsigned long *)obj;
if (!(handle & OBJ_ALLOCATED_TAG))
return false;
*phandle = handle & ~OBJ_ALLOCATED_TAG;
return true;
} }
static inline int testpin_tag(unsigned long handle) static inline int testpin_tag(unsigned long handle)
...@@ -1606,7 +1614,6 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, ...@@ -1606,7 +1614,6 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
static unsigned long find_alloced_obj(struct size_class *class, static unsigned long find_alloced_obj(struct size_class *class,
struct page *page, int *obj_idx) struct page *page, int *obj_idx)
{ {
unsigned long head;
int offset = 0; int offset = 0;
int index = *obj_idx; int index = *obj_idx;
unsigned long handle = 0; unsigned long handle = 0;
...@@ -1616,9 +1623,7 @@ static unsigned long find_alloced_obj(struct size_class *class, ...@@ -1616,9 +1623,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
offset += class->size * index; offset += class->size * index;
while (offset < PAGE_SIZE) { while (offset < PAGE_SIZE) {
head = obj_to_head(page, addr + offset); if (obj_allocated(page, addr + offset, &handle)) {
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
if (trypin_tag(handle)) if (trypin_tag(handle))
break; break;
handle = 0; handle = 0;
...@@ -1928,7 +1933,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -1928,7 +1933,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
struct page *dummy; struct page *dummy;
void *s_addr, *d_addr, *addr; void *s_addr, *d_addr, *addr;
int offset, pos; int offset, pos;
unsigned long handle, head; unsigned long handle;
unsigned long old_obj, new_obj; unsigned long old_obj, new_obj;
unsigned int obj_idx; unsigned int obj_idx;
int ret = -EAGAIN; int ret = -EAGAIN;
...@@ -1964,9 +1969,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -1964,9 +1969,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
pos = offset; pos = offset;
s_addr = kmap_atomic(page); s_addr = kmap_atomic(page);
while (pos < PAGE_SIZE) { while (pos < PAGE_SIZE) {
head = obj_to_head(page, s_addr + pos); if (obj_allocated(page, s_addr + pos, &handle)) {
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
if (!trypin_tag(handle)) if (!trypin_tag(handle))
goto unpin_objects; goto unpin_objects;
} }
...@@ -1982,9 +1985,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -1982,9 +1985,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
for (addr = s_addr + offset; addr < s_addr + pos; for (addr = s_addr + offset; addr < s_addr + pos;
addr += class->size) { addr += class->size) {
head = obj_to_head(page, addr); if (obj_allocated(page, addr, &handle)) {
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
BUG_ON(!testpin_tag(handle)); BUG_ON(!testpin_tag(handle));
old_obj = handle_to_obj(handle); old_obj = handle_to_obj(handle);
...@@ -2029,9 +2030,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, ...@@ -2029,9 +2030,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
unpin_objects: unpin_objects:
for (addr = s_addr + offset; addr < s_addr + pos; for (addr = s_addr + offset; addr < s_addr + pos;
addr += class->size) { addr += class->size) {
head = obj_to_head(page, addr); if (obj_allocated(page, addr, &handle)) {
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
BUG_ON(!testpin_tag(handle)); BUG_ON(!testpin_tag(handle));
unpin_tag(handle); unpin_tag(handle);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment