Commit c9cc7772 authored by Kirill Smelkov's avatar Kirill Smelkov

bigfile/virtmem: Factor-out mmaping Page into VMA into vma_mmap_page

We are soon going to use this functionality from several places.

The place to perform the mmap is changed slightly because vma_mmap_page
deduces prot from page->state and for that we have to finish preparing
page->state first.
parent 29268616
......@@ -43,6 +43,7 @@ static void page_drop_memory(Page *page);
static void page_del(Page *page);
static void *vma_page_addr(VMA *vma, Page *page);
static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr);
static void vma_mmap_page(VMA *vma, Page *page);
static int vma_page_ismapped(VMA *vma, Page *page);
static void vma_page_ensure_unmapped(VMA *vma, Page *page);
static void vma_page_ensure_notmappedrw(VMA *vma, Page *page);
......@@ -732,24 +733,11 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
/* (6) page data ready. Mmap it atomically into vma address space, or mprotect
* appropriately if it was already mmaped. */
int prot = PROT_READ;
PageState newstate = PAGE_LOADED;
if (write || page->state == PAGE_DIRTY) {
prot |= PROT_WRITE;
newstate = PAGE_DIRTY;
}
if (!bitmap_test_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset)) {
// XXX err
page_mmap(page, vma_page_addr(vma, page), prot);
bitmap_set_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset);
page_incref(page);
}
else {
/* just changing protection bits should not fail, if parameters ok */
xmprotect(vma_page_addr(vma, page), page_size(page), prot);
}
// XXX also call page->markdirty() ?
if (newstate == PAGE_DIRTY && newstate != page->state) {
/* it is not allowed to modify pages while writeout is in progress */
......@@ -759,6 +747,8 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
}
page->state = max(page->state, newstate);
vma_mmap_page(vma, page);
/* mark page as used recently */
// XXX = list_move_tail()
list_del(&page->lru);
......@@ -930,6 +920,34 @@ static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr)
}
/* vma_mmap_page mmaps page into vma.
*
* the page must belong to covered file.
* mmap protection is PROT_READ if page is PAGE_LOADED or PROT_READ|PROT_WRITE
* if page is PAGE_DIRTY.
*
* must be called under virtmem lock.
*/
static void vma_mmap_page(VMA *vma, Page *page) {
pgoff_t pgoff_invma;
int prot = (page->state == PAGE_DIRTY ? PROT_READ|PROT_WRITE : PROT_READ);
ASSERT(page->state == PAGE_LOADED || page->state == PAGE_DIRTY);
ASSERT(vma->f_pgoffset <= page->f_pgoffset &&
page->f_pgoffset < vma_addr_fpgoffset(vma, vma->addr_stop));
pgoff_invma = page->f_pgoffset - vma->f_pgoffset;
if (!bitmap_test_bit(vma->page_ismappedv, pgoff_invma)) {
// XXX err
page_mmap(page, vma_page_addr(vma, page), prot);
bitmap_set_bit(vma->page_ismappedv, pgoff_invma);
page_incref(page);
}
else {
/* just changing protection bits should not fail, if parameters ok */
xmprotect(vma_page_addr(vma, page), page_size(page), prot);
}
}
/* is `page` mapped to `vma` */
static int vma_page_ismapped(VMA *vma, Page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment