Commit 16594e60 authored by Kefeng Wang's avatar Kefeng Wang Committed by Christian Brauner

fs: aio: convert to ring_folios and internal_folios

Since aio use folios in most functions, convert ring/internal_pages
to ring/internal_folios, let's directly use folio instead of page
throughout aio to remove hidden calls to compound_head(), eg,
flush_dcache_page().
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/20240321131640.948634-4-wangkefeng.wang@huawei.comReviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 5ea0a355
......@@ -122,7 +122,7 @@ struct kioctx {
unsigned long mmap_base;
unsigned long mmap_size;
struct page **ring_pages;
struct folio **ring_folios;
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
......@@ -160,7 +160,7 @@ struct kioctx {
spinlock_t completion_lock;
} ____cacheline_aligned_in_smp;
struct page *internal_pages[AIO_RING_PAGES];
struct folio *internal_folios[AIO_RING_PAGES];
struct file *aio_ring_file;
unsigned id;
......@@ -334,20 +334,20 @@ static void aio_free_ring(struct kioctx *ctx)
put_aio_ring_file(ctx);
for (i = 0; i < ctx->nr_pages; i++) {
struct folio *folio = page_folio(ctx->ring_pages[i]);
struct folio *folio = ctx->ring_folios[i];
if (!folio)
continue;
pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
folio_ref_count(folio));
ctx->ring_pages[i] = NULL;
ctx->ring_folios[i] = NULL;
folio_put(folio);
}
if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
kfree(ctx->ring_pages);
ctx->ring_pages = NULL;
if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
kfree(ctx->ring_folios);
ctx->ring_folios = NULL;
}
}
......@@ -442,7 +442,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
/* Make sure the old folio hasn't already been changed */
if (ctx->ring_pages[idx] != &src->page)
if (ctx->ring_folios[idx] != src)
rc = -EAGAIN;
} else
rc = -EINVAL;
......@@ -466,8 +466,8 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
folio_migrate_copy(dst, src);
BUG_ON(ctx->ring_pages[idx] != &src->page);
ctx->ring_pages[idx] = &dst->page;
BUG_ON(ctx->ring_folios[idx] != src);
ctx->ring_folios[idx] = dst;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
/* The old folio is no longer accessible. */
......@@ -517,11 +517,11 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
/ sizeof(struct io_event);
ctx->ring_pages = ctx->internal_pages;
ctx->ring_folios = ctx->internal_folios;
if (nr_pages > AIO_RING_PAGES) {
ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
GFP_KERNEL);
if (!ctx->ring_pages) {
if (!ctx->ring_folios) {
put_aio_ring_file(ctx);
return -ENOMEM;
}
......@@ -540,7 +540,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
folio_ref_count(folio));
folio_end_read(folio, true);
ctx->ring_pages[i] = &folio->page;
ctx->ring_folios[i] = folio;
}
ctx->nr_pages = i;
......@@ -573,7 +573,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
ring = page_address(ctx->ring_pages[0]);
ring = folio_address(ctx->ring_folios[0]);
ring->nr = nr_events; /* user copy */
ring->id = ~0U;
ring->head = ring->tail = 0;
......@@ -581,7 +581,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
flush_dcache_page(ctx->ring_pages[0]);
flush_dcache_folio(ctx->ring_folios[0]);
return 0;
}
......@@ -692,9 +692,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
/* While kioctx setup is in progress,
* we are protected from page migration
* changes ring_pages by ->ring_lock.
* changes ring_folios by ->ring_lock.
*/
ring = page_address(ctx->ring_pages[0]);
ring = folio_address(ctx->ring_folios[0]);
ring->id = ctx->id;
return 0;
}
......@@ -1036,7 +1036,7 @@ static void user_refill_reqs_available(struct kioctx *ctx)
* against ctx->completed_events below will make sure we do the
* safe/right thing.
*/
ring = page_address(ctx->ring_pages[0]);
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
refill_reqs_available(ctx, head, ctx->tail);
......@@ -1148,12 +1148,12 @@ static void aio_complete(struct aio_kiocb *iocb)
if (++tail >= ctx->nr_events)
tail = 0;
ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
*event = iocb->ki_res;
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
(void __user *)(unsigned long)iocb->ki_res.obj,
......@@ -1166,10 +1166,10 @@ static void aio_complete(struct aio_kiocb *iocb)
ctx->tail = tail;
ring = page_address(ctx->ring_pages[0]);
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
ring->tail = tail;
flush_dcache_page(ctx->ring_pages[0]);
flush_dcache_folio(ctx->ring_folios[0]);
ctx->completed_events++;
if (ctx->completed_events > 1)
......@@ -1241,8 +1241,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
ring = page_address(ctx->ring_pages[0]);
/* Access to ->ring_folios here is protected by ctx->ring_lock. */
ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
tail = ring->tail;
......@@ -1263,20 +1263,20 @@ static long aio_read_events_ring(struct kioctx *ctx,
while (ret < nr) {
long avail;
struct io_event *ev;
struct page *page;
struct folio *folio;
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
pos = head + AIO_EVENTS_OFFSET;
page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
pos %= AIO_EVENTS_PER_PAGE;
avail = min(avail, nr - ret);
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
ev = page_address(page);
ev = folio_address(folio);
copy_ret = copy_to_user(event + ret, ev + pos,
sizeof(*ev) * avail);
......@@ -1290,9 +1290,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
head %= ctx->nr_events;
}
ring = page_address(ctx->ring_pages[0]);
ring = folio_address(ctx->ring_folios[0]);
ring->head = head;
flush_dcache_page(ctx->ring_pages[0]);
flush_dcache_folio(ctx->ring_folios[0]);
pr_debug("%li h%u t%u\n", ret, head, tail);
out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment