Commit ca54f6d8 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

zswap: make zswap_load() take a folio

Only convert a few easy parts of this function to use the folio passed in;
convert back to struct page for the majority of it.  Removes three hidden
calls to compound_head().

Link: https://lkml.kernel.org/r/20230715042343.434588-6-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fbcec6a3
...@@ -11,7 +11,7 @@ extern atomic_t zswap_stored_pages; ...@@ -11,7 +11,7 @@ extern atomic_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP #ifdef CONFIG_ZSWAP
bool zswap_store(struct folio *folio); bool zswap_store(struct folio *folio);
bool zswap_load(struct page *page); bool zswap_load(struct folio *folio);
void zswap_invalidate(int type, pgoff_t offset); void zswap_invalidate(int type, pgoff_t offset);
void zswap_swapon(int type); void zswap_swapon(int type);
void zswap_swapoff(int type); void zswap_swapoff(int type);
...@@ -23,7 +23,7 @@ static inline bool zswap_store(struct folio *folio) ...@@ -23,7 +23,7 @@ static inline bool zswap_store(struct folio *folio)
return false; return false;
} }
static inline bool zswap_load(struct page *page) static inline bool zswap_load(struct folio *folio)
{ {
return false; return false;
} }
......
...@@ -513,7 +513,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) ...@@ -513,7 +513,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
} }
delayacct_swapin_start(); delayacct_swapin_start();
if (zswap_load(page)) { if (zswap_load(folio)) {
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
folio_unlock(folio); folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) { } else if (data_race(sis->flags & SWP_FS_OPS)) {
......
...@@ -1405,11 +1405,12 @@ bool zswap_store(struct folio *folio) ...@@ -1405,11 +1405,12 @@ bool zswap_store(struct folio *folio)
goto reject; goto reject;
} }
bool zswap_load(struct page *page) bool zswap_load(struct folio *folio)
{ {
swp_entry_t swp = { .val = page_private(page), }; swp_entry_t swp = folio_swap_entry(folio);
int type = swp_type(swp); int type = swp_type(swp);
pgoff_t offset = swp_offset(swp); pgoff_t offset = swp_offset(swp);
struct page *page = &folio->page;
struct zswap_tree *tree = zswap_trees[type]; struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry; struct zswap_entry *entry;
struct scatterlist input, output; struct scatterlist input, output;
...@@ -1419,7 +1420,7 @@ bool zswap_load(struct page *page) ...@@ -1419,7 +1420,7 @@ bool zswap_load(struct page *page)
unsigned int dlen; unsigned int dlen;
bool ret; bool ret;
VM_WARN_ON_ONCE(!PageLocked(page)); VM_WARN_ON_ONCE(!folio_test_locked(folio));
/* find */ /* find */
spin_lock(&tree->lock); spin_lock(&tree->lock);
...@@ -1481,7 +1482,7 @@ bool zswap_load(struct page *page) ...@@ -1481,7 +1482,7 @@ bool zswap_load(struct page *page)
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (ret && zswap_exclusive_loads_enabled) { if (ret && zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry); zswap_invalidate_entry(tree, entry);
SetPageDirty(page); folio_mark_dirty(folio);
} else if (entry->length) { } else if (entry->length) {
spin_lock(&entry->pool->lru_lock); spin_lock(&entry->pool->lru_lock);
list_move(&entry->lru, &entry->pool->lru); list_move(&entry->lru, &entry->pool->lru);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment