Commit 71a3f4ed authored by Rusty Russell's avatar Rusty Russell

lguest: use get_user_pages_fast() instead of get_user_pages()

Using a simple page table thrashing program I measure a slight
improvement.  The program creates five processes.  Each touches 1000
pages then schedules the next process.  We repeat this 1000 times.  As
lguest only caches 4 cr3 values, this rebuilds a lot of shadow page
tables requiring virt->phys mappings.

	Before: 5.93 seconds
	After: 5.40 seconds

(Counts of slow vs fastpath in this usage are 6092 and 2852462 respectively.)

And more importantly for lguest, the code is simpler.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent 912985dc
...@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) ...@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
} }
/*:*/ /*:*/
/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
* could probably try to grab batches of pages here as an optimization * an optimization (ie. pre-faulting). :*/
* (ie. pre-faulting). :*/
/*H:350 This routine takes a page number given by the Guest and converts it to /*H:350 This routine takes a page number given by the Guest and converts it to
* an actual, physical page number. It can fail for several reasons: the * an actual, physical page number. It can fail for several reasons: the
...@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) ...@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
static unsigned long get_pfn(unsigned long virtpfn, int write) static unsigned long get_pfn(unsigned long virtpfn, int write)
{ {
struct page *page; struct page *page;
/* This value indicates failure. */
unsigned long ret = -1UL;
/* get_user_pages() is a complex interface: it gets the "struct /* gup me one page at this address please! */
* vm_area_struct" and "struct page" assocated with a range of pages. if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
* It also needs the task's mmap_sem held, and is not very quick. return page_to_pfn(page);
* It returns the number of pages it got. */
down_read(&current->mm->mmap_sem); /* This value indicates failure. */
if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, return -1UL;
1, write, 1, &page, NULL) == 1)
ret = page_to_pfn(page);
up_read(&current->mm->mmap_sem);
return ret;
} }
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
...@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) ...@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
/*H:460 And to complete the chain, release_pte() looks like this: */ /*H:460 And to complete the chain, release_pte() looks like this: */
static void release_pte(pte_t pte) static void release_pte(pte_t pte)
{ {
/* Remember that get_user_pages() took a reference to the page, in /* Remember that get_user_pages_fast() took a reference to the page, in
* get_pfn()? We have to put it back now. */ * get_pfn()? We have to put it back now. */
if (pte_flags(pte) & _PAGE_PRESENT) if (pte_flags(pte) & _PAGE_PRESENT)
put_page(pfn_to_page(pte_pfn(pte))); put_page(pfn_to_page(pte_pfn(pte)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment