From 0d10ef28b039d680f13a9575749131a18e31bb79 Mon Sep 17 00:00:00 2001 From: Kirill Smelkov <kirr@nexedi.com> Date: Wed, 10 Jul 2019 19:00:37 +0300 Subject: [PATCH] . --- bigfile/virtmem.c | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/bigfile/virtmem.c b/bigfile/virtmem.c index 8d056d8..32dc49c 100644 --- a/bigfile/virtmem.c +++ b/bigfile/virtmem.c @@ -41,6 +41,7 @@ static size_t page_size(const Page *page); static void page_drop_memory(Page *page); static void *vma_page_addr(VMA *vma, Page *page); static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr); +static void vma_mmap_page(VMA *vma, Page *page); static int vma_page_ismapped(VMA *vma, Page *page); static void vma_page_ensure_unmapped(VMA *vma, Page *page); static void vma_page_ensure_notmappedrw(VMA *vma, Page *page); @@ -270,11 +271,8 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen) if (!(pgoffset <= page->f_pgoffset && page->f_pgoffset < pgoffset + pglen)) continue; /* page is out of requested mmap coverage */ - // XXX err - // XXX notify watcher that we mmaped RAM page in its range? - page_mmap(page, vma_page_addr(vma, page), PROT_READ | PROT_WRITE); - bitmap_set_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset); - page_incref(page); + // XXX notify watcher that we mmap RAM page in its range? + vma_mmap_page(vma, page); } } @@ -818,6 +816,8 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) // XXX overlay: assert !vma->page_ismappedv[blk] XXX not ok? (retrying after virt unlock/lock) // XXX mmap page to all vma with .mmap_overlay=1 of this fileh. + vma_mmap_page(vma, page); +#if 0 if (!bitmap_test_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset)) { // XXX err page_mmap(page, vma_page_addr(vma, page), prot); @@ -828,8 +828,10 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) /* just changing protection bits should not fail, if parameters ok */ xmprotect(vma_page_addr(vma, page), page_size(page), prot); } +#endif // XXX also call page->markdirty() ? + // XXX move ^^^ before vma_mmap_page if (newstate == PAGE_DIRTY && newstate != page->state) { /* it is not allowed to modify pages while writeout is in progress */ BUG_ON(fileh->writeout_inprogress); @@ -993,6 +995,34 @@ static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr) } +/* vma_mmap_page mmaps page into vma. + * + * the page must belong to covered file. + * mmap protection is PROT_READ if page is PAGE_LOADED or PROT_READ|PROT_WRITE + * if page is PAGE_DIRTY. + * + * must be called under virtmem lock. + */ +static void vma_mmap_page(VMA *vma, Page *page) { + pgoff_t pgoff_invma; + int prot = (page->state == PAGE_DIRTY ? PROT_READ|PROT_WRITE : PROT_READ); + + ASSERT(page->state == PAGE_LOADED || page->state == PAGE_DIRTY); + ASSERT(vma->f_pgoffset <= page->f_pgoffset && + page->f_pgoffset < vma_addr_fpgoffset(vma, vma->addr_stop)); + + pgoff_invma = page->f_pgoffset - vma->f_pgoffset; + if (!bitmap_test_bit(vma->page_ismappedv, pgoff_invma)) { + // XXX err + page_mmap(page, vma_page_addr(vma, page), prot); + bitmap_set_bit(vma->page_ismappedv, pgoff_invma); + page_incref(page); + } + else { + /* just changing protection bits should not fail, if parameters ok */ + xmprotect(vma_page_addr(vma, page), page_size(page), prot); + } +} /* is `page` mapped to `vma` */ static int vma_page_ismapped(VMA *vma, Page *page) -- 2.30.9